Merge "Fix: Flac Encoder timestamp for EOS" into sc-mainline-prod am: 09ed0a5010
Original change: https://googleplex-android-review.googlesource.com/c/platform/frameworks/av/+/16155274
Change-Id: I7c7ee8100d352cbbcd37b15896b90f3fec2bebff
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..a7614d2
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,13 @@
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Left
+TabWidth: 4
+UseTab: Never
diff --git a/apex/OWNERS b/apex/OWNERS
index 5587f5f..54802d4 100644
--- a/apex/OWNERS
+++ b/apex/OWNERS
@@ -1,6 +1,7 @@
-chz@google.com
-dwkang@google.com
+essick@google.com
jiyong@google.com
lajos@google.com
-marcone@google.com
-wjia@google.com
+nchalko@google.com
+
+include platform/packages/modules/common:/MODULES_OWNERS
+
diff --git a/apex/mediaswcodec.32rc b/apex/mediaswcodec.32rc
new file mode 100644
index 0000000..79aef36
--- /dev/null
+++ b/apex/mediaswcodec.32rc
@@ -0,0 +1,6 @@
+service media.swcodec /apex/com.android.media.swcodec/bin/mediaswcodec
+ class main
+ user mediacodec
+ group camera drmrpc mediadrm
+ ioprio rt 4
+ task_profiles ProcessCapacityHigh
diff --git a/apex/mediatranscoding.32rc b/apex/mediatranscoding.32rc
new file mode 100644
index 0000000..5169462
--- /dev/null
+++ b/apex/mediatranscoding.32rc
@@ -0,0 +1,12 @@
+# media.transcoding service is defined on com.android.media apex which goes back
+# to API29, but we only want it started on API31+ devices. So we declare it as
+# "disabled" and start it explicitly on boot.
+service media.transcoding /apex/com.android.media/bin/mediatranscoding
+ class main
+ user media
+ group media
+ ioprio rt 4
+ # Restrict to little cores only with system-background cpuset.
+ task_profiles ServiceCapacityLow
+ interface aidl media.transcoding
+ disabled
diff --git a/camera/Android.bp b/camera/Android.bp
index 6878c20..4ed3269 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -43,6 +43,10 @@
],
}
+cc_library_headers {
+ name: "camera_headers",
+ export_include_dirs: ["include"],
+}
cc_library_shared {
name: "libcamera_client",
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 03439fd..24c9108 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -68,6 +68,9 @@
unavailablePhysicalIds16.push_back(String16(id8));
}
res = parcel->writeString16Vector(unavailablePhysicalIds16);
+ if (res != OK) return res;
+
+ res = parcel->writeString16(String16(clientPackage));
return res;
}
@@ -86,6 +89,12 @@
for (auto& id16 : unavailablePhysicalIds16) {
unavailablePhysicalIds.push_back(String8(id16));
}
+
+ String16 tempClientPackage;
+ res = parcel->readString16(&tempClientPackage);
+ if (res != OK) return res;
+ clientPackage = String8(tempClientPackage);
+
return res;
}
diff --git a/camera/OWNERS b/camera/OWNERS
index d6b95da..385c163 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -1,8 +1,7 @@
-epeev@google.com
+# Bug component: 41727
etalvala@google.com
+arakesh@google.com
+epeev@google.com
jchowdhary@google.com
shuzhenwang@google.com
-yinchiayeh@google.com
-# backup owner
-cychen@google.com
-zhijunhe@google.com
+ruchamk@google.com
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index bbb0289..3d78aef 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -37,8 +37,11 @@
oneway void notifyCameraState(in CameraSessionStats cameraSessionStats);
/**
- * Reports whether the top activity needs a rotate and crop override.
+ * Returns the necessary rotate and crop override for the top activity which
+ * will be one of ({@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_NONE},
+ * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_90},
+ * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_180},
+ * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_270}).
*/
- boolean isRotateAndCropOverrideNeeded(String packageName, int sensorOrientation,
- int lensFacing);
+ int getRotateAndCropOverride(String packageName, int lensFacing);
}
diff --git a/camera/include/camera/CameraBase.h b/camera/include/camera/CameraBase.h
index e156994..8e53968 100644
--- a/camera/include/camera/CameraBase.h
+++ b/camera/include/camera/CameraBase.h
@@ -85,11 +85,17 @@
*/
std::vector<String8> unavailablePhysicalIds;
+ /**
+ * Client package name if camera is open, otherwise not applicable
+ */
+ String8 clientPackage;
+
virtual status_t writeToParcel(android::Parcel* parcel) const;
virtual status_t readFromParcel(const android::Parcel* parcel);
- CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds) :
- cameraId(id), status(s), unavailablePhysicalIds(unavailSubIds) {}
+ CameraStatus(String8 id, int32_t s, const std::vector<String8>& unavailSubIds,
+ const String8& clientPkg) : cameraId(id), status(s),
+ unavailablePhysicalIds(unavailSubIds), clientPackage(clientPkg) {}
CameraStatus() : status(ICameraServiceListener::STATUS_PRESENT) {}
};
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 95ef2b2..5892f1a 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -189,8 +189,12 @@
sp<CameraManagerGlobal> cm = mCameraManager.promote();
if (cm != nullptr) {
AutoMutex lock(cm->mLock);
+ std::vector<String8> cameraIdList;
for (auto& pair : cm->mDeviceStatusMap) {
- const String8 &cameraId = pair.first;
+ cameraIdList.push_back(pair.first);
+ }
+
+ for (String8 cameraId : cameraIdList) {
cm->onStatusChangedLocked(
CameraServiceListener::STATUS_NOT_PRESENT, cameraId);
}
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index ccbfaa9..da887a2 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -139,6 +139,8 @@
return !(*this == other);
}
bool operator < (const Callback& other) const {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wordered-compare-function-pointers"
if (*this == other) return false;
if (mContext != other.mContext) return mContext < other.mContext;
if (mPhysicalCamAvailable != other.mPhysicalCamAvailable) {
@@ -152,6 +154,7 @@
}
if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
return mUnavailable < other.mUnavailable;
+#pragma GCC diagnostic pop
}
bool operator > (const Callback& other) const {
return (*this != other && !(*this < other));
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index dab2fef..05124c0 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -182,7 +182,7 @@
int64_t format = entry.data.i64[i + STREAM_FORMAT_OFFSET];
int64_t width = entry.data.i64[i + STREAM_WIDTH_OFFSET];
int64_t height = entry.data.i64[i + STREAM_HEIGHT_OFFSET];
- int64_t duration = entry.data.i32[i + STREAM_DURATION_OFFSET];
+ int64_t duration = entry.data.i64[i + STREAM_DURATION_OFFSET];
// Leave the unfiltered format in so apps depending on previous wrong
// filter behavior continue to work
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 86781e5..0e9740a 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1090,6 +1090,15 @@
* (ACAMERA_LENS_OPTICAL_STABILIZATION_MODE), turning both modes on may
* produce undesirable interaction, so it is recommended not to enable
* both at the same time.</p>
+ * <p>If video stabilization is set to "PREVIEW_STABILIZATION",
+ * ACAMERA_LENS_OPTICAL_STABILIZATION_MODE is overridden. The camera sub-system may choose
+ * to turn on hardware based image stabilization in addition to software based stabilization
+ * if it deems that appropriate.
+ * This key may be a part of the available session keys, which camera clients may
+ * query via
+ * {@link ACameraManager_getCameraCharacteristics }.
+ * If this is the case, changing this key over the life-time of a capture session may
+ * cause delays / glitches.</p>
*
* @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
* @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
@@ -2144,6 +2153,51 @@
*/
ACAMERA_FLASH_INFO_AVAILABLE = // byte (acamera_metadata_enum_android_flash_info_available_t)
ACAMERA_FLASH_INFO_START,
+ /**
+ * <p>Maximum flashlight brightness level.</p>
+ *
+ * <p>Type: int32</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>If this value is greater than 1, then the device supports controlling the
+ * flashlight brightness level via
+ * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.
+ * If this value is equal to 1, flashlight brightness control is not supported.
+ * The value for this key will be null for devices with no flash unit.</p>
+ */
+ ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL = // int32
+ ACAMERA_FLASH_INFO_START + 2,
+ /**
+ * <p>Default flashlight brightness level to be set via
+ * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.</p>
+ *
+ * <p>Type: int32</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>If flash unit is available this will be greater than or equal to 1 and less
+ * or equal to <code>ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL</code>.</p>
+ * <p>Setting flashlight brightness above the default level
+ * (i.e.<code>ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL</code>) may make the device more
+ * likely to reach thermal throttling conditions and slow down, or drain the
+ * battery quicker than normal. To minimize such issues, it is recommended to
+ * start the flashlight at this default brightness until a user explicitly requests
+ * a brighter level.
+ * Note that the value for this key will be null for devices with no flash unit.
+ * The default level should always be > 0.</p>
+ *
+ * @see ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL
+ * @see ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL
+ */
+ ACAMERA_FLASH_INFO_STRENGTH_DEFAULT_LEVEL = // int32
+ ACAMERA_FLASH_INFO_START + 3,
ACAMERA_FLASH_INFO_END,
/**
@@ -2526,12 +2580,18 @@
* <p>If a camera device supports both OIS and digital image stabilization
* (ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE), turning both modes on may produce undesirable
* interaction, so it is recommended not to enable both at the same time.</p>
+ * <p>If ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE is set to "PREVIEW_STABILIZATION",
+ * ACAMERA_LENS_OPTICAL_STABILIZATION_MODE is overridden. The camera sub-system may choose
+ * to turn on hardware based image stabilization in addition to software based stabilization
+ * if it deems that appropriate. This key's value in the capture result will reflect which
+ * OIS mode was chosen.</p>
* <p>Not all devices will support OIS; see
* ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION for
* available controls.</p>
*
* @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
* @see ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION
+ * @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
*/
ACAMERA_LENS_OPTICAL_STABILIZATION_MODE = // byte (acamera_metadata_enum_android_lens_optical_stabilization_mode_t)
ACAMERA_LENS_START + 4,
@@ -4578,6 +4638,25 @@
*
* <p>Also defines the direction of rolling shutter readout, which is from top to bottom in
* the sensor's coordinate system.</p>
+ * <p>Starting with Android API level 32, camera clients that query the orientation via
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#get">CameraCharacteristics#get</a> on foldable devices which
+ * include logical cameras can receive a value that can dynamically change depending on the
+ * device/fold state.
+ * Clients are advised to not cache or store the orientation value of such logical sensors.
+ * In case repeated queries to CameraCharacteristics are not preferred, then clients can
+ * also access the entire mapping from device state to sensor orientation in
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/params/DeviceStateSensorOrientationMap.html">DeviceStateSensorOrientationMap</a>.
+ * Do note that a dynamically changing sensor orientation value in camera characteristics
+ * will not be the best way to establish the orientation per frame. Clients that want to
+ * know the sensor orientation of a particular captured frame should query the
+ * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID from the corresponding capture result and
+ * check the respective physical camera orientation.</p>
+ * <p>Native camera clients must query ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS for the mapping
+ * between device state and camera sensor orientation. Dynamic updates to the sensor
+ * orientation are not supported in this code path.</p>
+ *
+ * @see ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS
+ * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
*/
ACAMERA_SENSOR_ORIENTATION = // int32
ACAMERA_SENSOR_START + 14,
@@ -6284,6 +6363,21 @@
*/
ACAMERA_INFO_VERSION = // byte
ACAMERA_INFO_START + 1,
+ /**
+ *
+ * <p>Type: int64[2*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>HAL must populate the array with
+ * (hardware::camera::provider::V2_5::DeviceState, sensorOrientation) pairs for each
+ * supported device state bitwise combination.</p>
+ */
+ ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS = // int64[2*n]
+ ACAMERA_INFO_START + 3,
ACAMERA_INFO_END,
/**
@@ -7935,6 +8029,17 @@
*/
ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_ON = 1,
+ /**
+ * <p>Preview stabilization, where the preview in addition to all other non-RAW streams are
+ * stabilized with the same quality of stabilization, is enabled. This mode aims to give
+ * clients a 'what you see is what you get' effect. In this mode, the FoV reduction will
+ * be a maximum of 20 % both horizontally and vertically
+ * (10% from left, right, top, bottom) for the given zoom ratio / crop region.
+ * The resultant FoV will also be the same across all processed streams
+ * (that have the same aspect ratio).</p>
+ */
+ ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION = 2,
+
} acamera_metadata_enum_android_control_video_stabilization_mode_t;
// ACAMERA_CONTROL_AE_STATE
diff --git a/camera/ndk/ndk_vendor/impl/ACameraManager.h b/camera/ndk/ndk_vendor/impl/ACameraManager.h
index 8359bb1..4663529 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraManager.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraManager.h
@@ -136,6 +136,8 @@
return !(*this == other);
}
bool operator < (const Callback& other) const {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wordered-compare-function-pointers"
if (*this == other) return false;
if (mContext != other.mContext) return mContext < other.mContext;
if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
@@ -146,6 +148,7 @@
if (mPhysicalCamUnavailable != other.mPhysicalCamUnavailable)
return mPhysicalCamUnavailable < other.mPhysicalCamUnavailable;
return mUnavailable < other.mUnavailable;
+#pragma GCC diagnostic pop
}
bool operator > (const Callback& other) const {
return (*this != other && !(*this < other));
diff --git a/cmds/OWNERS b/cmds/OWNERS
index 0d32aac..a48c37a 100644
--- a/cmds/OWNERS
+++ b/cmds/OWNERS
@@ -1,3 +1,3 @@
elaurent@google.com
+essick@google.com
lajos@google.com
-marcone@google.com
diff --git a/cmds/screenrecord/Android.bp b/cmds/screenrecord/Android.bp
index 359a835..d0b3ce0 100644
--- a/cmds/screenrecord/Android.bp
+++ b/cmds/screenrecord/Android.bp
@@ -55,12 +55,6 @@
"libGLESv2",
],
- include_dirs: [
- "frameworks/av/media/libstagefright",
- "frameworks/av/media/libstagefright/include",
- "frameworks/native/include/media/openmax",
- ],
-
cflags: [
"-Werror",
"-Wall",
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index e6e3473..2e0b678 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -701,7 +701,7 @@
printf("Display is %dx%d @%.2ffps (orientation=%s), layerStack=%u\n",
layerStackSpaceRect.getWidth(), layerStackSpaceRect.getHeight(),
displayMode.refreshRate, toCString(displayState.orientation),
- displayState.layerStack);
+ displayState.layerStack.id);
fflush(stdout);
}
@@ -1067,7 +1067,7 @@
std::optional<PhysicalDisplayId> displayId = SurfaceComposerClient::getInternalDisplayId();
if (!displayId) {
- fprintf(stderr, "Failed to get token for internal display\n");
+ fprintf(stderr, "Failed to get ID for internal display\n");
return 1;
}
@@ -1168,17 +1168,14 @@
}
break;
case 'd':
- gPhysicalDisplayId = PhysicalDisplayId(atoll(optarg));
- if (gPhysicalDisplayId.value == 0) {
- fprintf(stderr, "Please specify a valid physical display id\n");
- return 2;
- } else if (SurfaceComposerClient::
- getPhysicalDisplayToken(gPhysicalDisplayId) == nullptr) {
- fprintf(stderr, "Invalid physical display id: %s\n",
- to_string(gPhysicalDisplayId).c_str());
- return 2;
+ if (const auto id = android::DisplayId::fromValue<PhysicalDisplayId>(atoll(optarg));
+ id && SurfaceComposerClient::getPhysicalDisplayToken(*id)) {
+ gPhysicalDisplayId = *id;
+ break;
}
- break;
+
+ fprintf(stderr, "Invalid physical display ID\n");
+ return 2;
default:
if (ic != '?') {
fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/cmds/stagefright/Android.bp b/cmds/stagefright/Android.bp
new file mode 100644
index 0000000..c4783d3
--- /dev/null
+++ b/cmds/stagefright/Android.bp
@@ -0,0 +1,278 @@
+package {
+ default_applicable_licenses: ["frameworks_av_cmds_stagefright_license"],
+}
+
+// Added automatically by a large-scale-change
+// See: http://go/android-license-faq
+license {
+ name: "frameworks_av_cmds_stagefright_license",
+ visibility: [":__subpackages__"],
+ license_kinds: [
+ "SPDX-license-identifier-Apache-2.0",
+ ],
+ license_text: [
+ "NOTICE",
+ ],
+}
+
+cc_binary {
+ name: "stagefright",
+
+ srcs: [
+ "AudioPlayer.cpp",
+ "stagefright.cpp",
+ "jpeg.cpp",
+ "SineSource.cpp",
+ ],
+
+ header_libs: [
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "libmedia",
+ "libmedia_codeclist",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libjpeg",
+ "libui",
+ "libgui",
+ "libcutils",
+ "liblog",
+ "libhidlbase",
+ "libdatasource",
+ "libaudioclient",
+ "android.hardware.media.omx@1.0",
+ "framework-permission-aidl-cpp",
+ ],
+
+ static_libs: ["framework-permission-aidl-cpp"],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+
+ system_ext_specific: true,
+}
+
+cc_binary {
+ name: "record",
+
+ srcs: [
+ "AudioPlayer.cpp",
+ "SineSource.cpp",
+ "record.cpp",
+ ],
+
+ header_libs: [
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ "camera_headers",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "libmedia",
+ "liblog",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libdatasource",
+ "libaudioclient",
+ "framework-permission-aidl-cpp",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+}
+
+cc_binary {
+ name: "recordvideo",
+
+ srcs: [
+ "AudioPlayer.cpp",
+ "recordvideo.cpp",
+ ],
+
+ header_libs: [
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "libmedia",
+ "liblog",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libaudioclient",
+ "framework-permission-aidl-cpp",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+}
+
+cc_binary {
+ name: "audioloop",
+
+ srcs: [
+ "AudioPlayer.cpp",
+ "SineSource.cpp",
+ "audioloop.cpp",
+ ],
+
+ header_libs: [
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "libmedia",
+ "liblog",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libaudioclient",
+ "framework-permission-aidl-cpp",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+}
+
+cc_binary {
+ name: "stream",
+
+ srcs: ["stream.cpp"],
+
+ header_libs: [
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "liblog",
+ "libutils",
+ "libbinder",
+ "libui",
+ "libgui",
+ "libstagefright_foundation",
+ "libmedia",
+ "libcutils",
+ "libdatasource",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+}
+
+cc_binary {
+ name: "codec",
+
+ srcs: [
+ "codec.cpp",
+ "SimplePlayer.cpp",
+ ],
+
+ header_libs: [
+ "libmediadrm_headers",
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "liblog",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libmedia",
+ "libmedia_omx",
+ "libaudioclient",
+ "libui",
+ "libgui",
+ "libcutils",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+}
+
+cc_binary {
+ name: "mediafilter",
+
+ srcs: [
+ "filters/argbtorgba.rscript",
+ "filters/nightvision.rscript",
+ "filters/saturation.rscript",
+ "mediafilter.cpp",
+ ],
+
+ header_libs: [
+ "libmediadrm_headers",
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ "rs-headers",
+ ],
+
+ include_dirs: ["frameworks/av/media/libstagefright"],
+
+ shared_libs: [
+ "libstagefright",
+ "liblog",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libmedia_omx",
+ "libui",
+ "libgui",
+ "libRScpp",
+ ],
+
+ static_libs: ["libstagefright_mediafilter"],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+
+ sanitize: {
+ cfi: true,
+ },
+}
+
+cc_binary {
+ name: "muxer",
+
+ srcs: ["muxer.cpp"],
+
+ header_libs: [
+ "libmediametrics_headers",
+ "libstagefright_headers",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "liblog",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libcutils",
+ "libc",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ ],
+}
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
deleted file mode 100644
index 803c4a4..0000000
--- a/cmds/stagefright/Android.mk
+++ /dev/null
@@ -1,276 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioPlayer.cpp \
- stagefright.cpp \
- jpeg.cpp \
- SineSource.cpp
-
-LOCAL_HEADER_LIBRARIES := \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia libmedia_codeclist libutils libbinder \
- libstagefright_foundation libjpeg libui libgui libcutils liblog \
- libhidlbase libdatasource libaudioclient \
- android.hardware.media.omx@1.0 \
- framework-permission-aidl-cpp
-
-LOCAL_STATIC_LIBRARIES := framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/av/media/libstagefright/include \
- frameworks/native/include/media/openmax \
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_SYSTEM_EXT_MODULE:= true
-LOCAL_MODULE:= stagefright
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioPlayer.cpp \
- SineSource.cpp \
- record.cpp
-
-LOCAL_HEADER_LIBRARIES := \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder \
- libstagefright_foundation libdatasource libaudioclient \
- framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/camera/include \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax \
- frameworks/native/include/media/hardware
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= record
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioPlayer.cpp \
- recordvideo.cpp
-
-LOCAL_HEADER_LIBRARIES := \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder \
- libstagefright_foundation libaudioclient
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax \
- frameworks/native/include/media/hardware \
- framework-permission-aidl-cpp
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= recordvideo
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioPlayer.cpp \
- SineSource.cpp \
- audioloop.cpp
-
-LOCAL_HEADER_LIBRARIES := \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder \
- libstagefright_foundation libaudioclient \
- framework-permission-aidl-cpp
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= audioloop
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- stream.cpp \
-
-LOCAL_HEADER_LIBRARIES := \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libui libgui \
- libstagefright_foundation libmedia libcutils libdatasource
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= stream
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- codec.cpp \
- SimplePlayer.cpp \
-
-LOCAL_HEADER_LIBRARIES := \
- libmediadrm_headers \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libmedia_omx libaudioclient libui libgui libcutils
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= codec
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- filters/argbtorgba.rscript \
- filters/nightvision.rscript \
- filters/saturation.rscript \
- mediafilter.cpp \
-
-LOCAL_HEADER_LIBRARIES := \
- libmediadrm_headers \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright \
- liblog \
- libutils \
- libbinder \
- libstagefright_foundation \
- libmedia_omx \
- libui \
- libgui \
- libRScpp \
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax \
- frameworks/rs/cpp \
- frameworks/rs \
-
-intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
-LOCAL_C_INCLUDES += $(intermediates)
-
-LOCAL_STATIC_LIBRARIES:= \
- libstagefright_mediafilter
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= mediafilter
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-LOCAL_SANITIZE := cfi
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- muxer.cpp \
-
-LOCAL_HEADER_LIBRARIES := \
- libmediametrics_headers \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libstagefright_foundation \
- libcutils libc
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= muxer
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/NOTICE
-
-include $(BUILD_EXECUTABLE)
diff --git a/cmds/stagefright/AudioPlayer.cpp b/cmds/stagefright/AudioPlayer.cpp
index 55427ca..a63bde6 100644
--- a/cmds/stagefright/AudioPlayer.cpp
+++ b/cmds/stagefright/AudioPlayer.cpp
@@ -249,7 +249,8 @@
mAudioTrack = new AudioTrack(
AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
- 0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
+ 0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE,
+ wp<IAudioTrackCallback>::fromExisting(this),
0 /*notificationFrames*/);
if ((err = mAudioTrack->initCheck()) != OK) {
@@ -397,10 +398,6 @@
mStartPosUs = 0;
}
-// static
-void AudioPlayer::AudioCallback(int event, void *user, void *info) {
- static_cast<AudioPlayer *>(user)->AudioCallback(event, info);
-}
bool AudioPlayer::reachedEOS(status_t *finalStatus) {
*finalStatus = OK;
@@ -455,20 +452,12 @@
return 0;
}
-void AudioPlayer::AudioCallback(int event, void *info) {
- switch (event) {
- case AudioTrack::EVENT_MORE_DATA:
- {
- AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
- size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
- buffer->size = numBytesWritten;
- }
- break;
+size_t AudioPlayer::onMoreData(const AudioTrack::Buffer& buffer) {
+ return fillBuffer(buffer.raw, buffer.size);
+}
- case AudioTrack::EVENT_STREAM_END:
- mReachedEOS = true;
- break;
- }
+void AudioPlayer::onStreamEnd() {
+ mReachedEOS = true;
}
size_t AudioPlayer::fillBuffer(void *data, size_t size) {
diff --git a/cmds/stagefright/AudioPlayer.h b/cmds/stagefright/AudioPlayer.h
index 43550ea..608f54b 100644
--- a/cmds/stagefright/AudioPlayer.h
+++ b/cmds/stagefright/AudioPlayer.h
@@ -19,6 +19,7 @@
#define AUDIO_PLAYER_H_
#include <media/AudioResamplerPublic.h>
+#include <media/AudioTrack.h>
#include <media/stagefright/MediaSource.h>
#include <media/MediaPlayerInterface.h>
#include <media/stagefright/MediaBuffer.h>
@@ -26,10 +27,9 @@
namespace android {
-class AudioTrack;
struct AwesomePlayer;
-class AudioPlayer {
+class AudioPlayer : AudioTrack::IAudioTrackCallback {
public:
enum {
REACHED_EOS,
@@ -66,6 +66,9 @@
status_t getPlaybackRate(AudioPlaybackRate *rate /* nonnull */);
private:
+ friend sp<AudioPlayer>;
+ size_t onMoreData(const AudioTrack::Buffer& buffer) override;
+ void onStreamEnd() override;
sp<MediaSource> mSource;
sp<AudioTrack> mAudioTrack;
@@ -99,9 +102,6 @@
int64_t mStartPosUs;
const uint32_t mCreateFlags;
- static void AudioCallback(int event, void *user, void *info);
- void AudioCallback(int event, void *info);
-
static size_t AudioSinkCallback(
MediaPlayerBase::AudioSink *audioSink,
void *data, size_t size, void *me,
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 4b41ff8..83f8fe9 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -166,9 +166,9 @@
sp<MediaSource> decoder = SimpleDecodingSource::Create(encoder);
if (playToSpeaker) {
- AudioPlayer player(NULL);
- player.setSource(decoder);
- player.start();
+ sp<AudioPlayer> player = sp<AudioPlayer>::make(nullptr);
+ player->setSource(decoder);
+ player->start();
sleep(duration);
ALOGI("Line: %d", __LINE__);
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 098c278..5743ad6 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -32,7 +32,6 @@
#include <media/stagefright/SimpleDecodingSource.h>
#include <media/MediaPlayerInterface.h>
-#include "AudioPlayer.h"
using namespace android;
@@ -274,17 +273,6 @@
const int32_t kNumChannels = 2;
sp<MediaSource> audioSource = new SineSource(kSampleRate, kNumChannels);
-#if 0
- sp<MediaPlayerBase::AudioSink> audioSink;
- AudioPlayer *player = new AudioPlayer(audioSink);
- player->setSource(audioSource);
- player->start();
-
- sleep(10);
-
- player->stop();
-#endif
-
sp<AMessage> encMeta = new AMessage;
encMeta->setString("mime",
0 ? MEDIA_MIMETYPE_AUDIO_AMR_WB : MEDIA_MIMETYPE_AUDIO_AAC);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index c430f05..ec16bc2 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -220,7 +220,7 @@
}
if (gPlaybackAudio) {
- AudioPlayer *player = new AudioPlayer(NULL);
+ sp<AudioPlayer> player = sp<AudioPlayer>::make(nullptr);
player->setSource(rawSource);
rawSource.clear();
@@ -235,9 +235,6 @@
fprintf(stderr, "unable to start playback err=%d (0x%08x)\n", err, err);
}
- delete player;
- player = NULL;
-
return;
} else if (gReproduceBug >= 3 && gReproduceBug <= 5) {
int64_t durationUs;
diff --git a/drm/drmserver/drmserver.rc b/drm/drmserver/drmserver.rc
index eb176c1..0319ff9 100644
--- a/drm/drmserver/drmserver.rc
+++ b/drm/drmserver/drmserver.rc
@@ -3,7 +3,7 @@
class main
user drm
group drm system inet drmrpc readproc
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
on property:drm.service.enabled=true
start drm
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 0ffe626..71df58c 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -66,7 +66,7 @@
],
static_libs: [
- "resourcemanager_aidl_interface-ndk_platform",
+ "resourcemanager_aidl_interface-ndk",
],
export_shared_lib_headers: [
diff --git a/drm/libmediadrm/fuzzer/Android.bp b/drm/libmediadrm/fuzzer/Android.bp
index 7281066..49bbad4 100644
--- a/drm/libmediadrm/fuzzer/Android.bp
+++ b/drm/libmediadrm/fuzzer/Android.bp
@@ -35,7 +35,7 @@
static_libs: [
"libmediadrm",
"liblog",
- "resourcemanager_aidl_interface-ndk_platform",
+ "resourcemanager_aidl_interface-ndk",
],
header_libs: [
"libmedia_headers",
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index 7bd1568..fd4ef95 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,16 +1,19 @@
{
"presubmit": [
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaDrmTestCases",
"options" : [
{
"include-annotation": "android.platform.test.annotations.Presubmit"
},
{
- "include-filter": "android.media.cts.MediaDrmClearkeyTest"
+ "include-filter": "android.mediadrm.cts.MediaDrmClearkeyTest"
},
{
- "include-filter": "android.media.cts.MediaDrmMetricsTest"
+ "include-filter": "android.mediadrm.cts.MediaDrmMetricsTest"
+ },
+ {
+ "include-filter": "android.mediadrm.cts.NativeMediaDrmClearkeyTest"
}
]
}
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
index 6ac3510..089eb1c 100644
--- a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -207,6 +207,7 @@
}
infoMap.clear();
+ android::Mutex::Autolock lock(mPlayPolicyLock);
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
}
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
index aa9b59d..95f15ca 100644
--- a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -262,7 +262,7 @@
void initProperties();
void setPlayPolicy();
- android::Mutex mPlayPolicyLock;
+ mutable android::Mutex mPlayPolicyLock;
android::KeyedVector<String8, String8> mPlayPolicy;
android::KeyedVector<String8, String8> mStringProperties;
android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index bc7c3f2..0cd9375 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -221,7 +221,6 @@
if (requestString.find(kOfflineLicense) != std::string::npos) {
std::string emptyResponse;
std::string keySetIdString(keySetId.begin(), keySetId.end());
- Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.StoreLicense(keySetIdString,
DeviceFiles::kLicenseStateReleasing,
emptyResponse)) {
@@ -337,7 +336,6 @@
}
*keySetId = kKeySetIdPrefix + ByteArrayToHexString(
reinterpret_cast<const uint8_t*>(randomData.data()), randomData.size());
- Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.LicenseExists(*keySetId)) {
// collision, regenerate
ALOGV("Retry generating KeySetId");
@@ -395,7 +393,6 @@
if (status == Status::OK) {
if (isOfflineLicense) {
if (isRelease) {
- Mutex::Autolock lock(mFileHandleLock);
mFileHandle.DeleteLicense(keySetId);
mSessionLibrary->destroySession(session);
} else {
@@ -404,7 +401,6 @@
return Void();
}
- Mutex::Autolock lock(mFileHandleLock);
bool ok = mFileHandle.StoreLicense(
keySetId,
DeviceFiles::kLicenseStateActive,
@@ -459,7 +455,6 @@
DeviceFiles::LicenseState licenseState;
std::string offlineLicense;
Status status = Status::OK;
- Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.RetrieveLicense(std::string(keySetId.begin(), keySetId.end()),
&licenseState, &offlineLicense)) {
ALOGE("Failed to restore offline license");
@@ -582,7 +577,6 @@
Return<void> DrmPlugin::queryKeyStatus(
const hidl_vec<uint8_t>& sessionId,
queryKeyStatus_cb _hidl_cb) {
-
if (sessionId.size() == 0) {
// Returns empty key status KeyValue pair
_hidl_cb(Status::BAD_VALUE, hidl_vec<KeyValue>());
@@ -592,12 +586,14 @@
std::vector<KeyValue> infoMapVec;
infoMapVec.clear();
+ mPlayPolicyLock.lock();
KeyValue keyValuePair;
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
keyValuePair.key = mPlayPolicy[i].key;
keyValuePair.value = mPlayPolicy[i].value;
infoMapVec.push_back(keyValuePair);
}
+ mPlayPolicyLock.unlock();
_hidl_cb(Status::OK, toHidlVec(infoMapVec));
return Void();
}
@@ -768,8 +764,6 @@
}
Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
- Mutex::Autolock lock(mFileHandleLock);
-
std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
std::vector<KeySetId> keySetIds;
if (mMockError != Status_V1_2::OK) {
@@ -790,7 +784,6 @@
return toStatus_1_0(mMockError);
}
std::string licenseName(keySetId.begin(), keySetId.end());
- Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.DeleteLicense(licenseName)) {
return Status::OK;
}
@@ -799,8 +792,6 @@
Return<void> DrmPlugin::getOfflineLicenseState(const KeySetId& keySetId,
getOfflineLicenseState_cb _hidl_cb) {
- Mutex::Autolock lock(mFileHandleLock);
-
std::string licenseName(keySetId.begin(), keySetId.end());
DeviceFiles::LicenseState state;
std::string license;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index e61db3f..56910be 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -24,13 +24,11 @@
}
bool MemoryFileSystem::FileExists(const std::string& fileName) const {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
return result != mMemoryFileSystem.end();
}
ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
return static_cast<ssize_t>(result->second.getFileSize());
@@ -42,7 +40,6 @@
std::vector<std::string> MemoryFileSystem::ListFiles() const {
std::vector<std::string> list;
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
for (const auto& filename : mMemoryFileSystem) {
list.push_back(filename.first);
}
@@ -51,7 +48,6 @@
size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
std::string key = GetFileName(path);
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
std::string serializedHashFile = result->second.getContent();
@@ -65,7 +61,6 @@
size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
std::string key = GetFileName(path);
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(key);
@@ -75,7 +70,6 @@
}
bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(result);
@@ -87,7 +81,6 @@
}
bool MemoryFileSystem::RemoveAllFiles() {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
mMemoryFileSystem.clear();
return mMemoryFileSystem.empty();
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
index 9afd3d7..ec4517d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
@@ -11,4 +11,4 @@
user media
group media mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
index c1abe7f..3b48cf2 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
@@ -10,4 +10,4 @@
user media
group media mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
index 1e0d431..6e64978 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
@@ -13,4 +13,4 @@
user media
group media mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
index 8130511..e302e1b 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
@@ -11,4 +11,4 @@
user media
group media mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc
index 46aba88..84a63a1 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service-lazy.clearkey.rc
@@ -15,4 +15,4 @@
user media
group media mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc
index 8186933..649599e 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.4-service.clearkey.rc
@@ -13,4 +13,4 @@
user media
group media mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index 5d6e3da..cb5c9fe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -432,8 +432,7 @@
mMockError = Status_V1_2::OK;
}
- DeviceFiles mFileHandle GUARDED_BY(mFileHandleLock);
- Mutex mFileHandleLock;
+ DeviceFiles mFileHandle;
Mutex mSecureStopLock;
CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
index a90d818..1d98860 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
@@ -5,9 +5,7 @@
#ifndef CLEARKEY_MEMORY_FILE_SYSTEM_H_
#define CLEARKEY_MEMORY_FILE_SYSTEM_H_
-#include <android-base/thread_annotations.h>
#include <map>
-#include <mutex>
#include <string>
#include "ClearKeyTypes.h"
@@ -51,12 +49,10 @@
size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
private:
- mutable std::mutex mMemoryFileSystemLock;
-
// License file name is made up of a unique keySetId, therefore,
// the filename can be used as the key to locate licenses in the
// memory file system.
- std::map<std::string, MemoryFile> mMemoryFileSystem GUARDED_BY(mMemoryFileSystemLock);
+ std::map<std::string, MemoryFile> mMemoryFileSystem;
std::string GetFileName(const std::string& path);
diff --git a/include/OWNERS b/include/OWNERS
index d6bd998..88de595 100644
--- a/include/OWNERS
+++ b/include/OWNERS
@@ -1,6 +1,5 @@
elaurent@google.com
-gkasten@google.com
hunga@google.com
jtinker@google.com
lajos@google.com
-marcone@google.com
+essick@google.com
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 200e92d..bd6db55 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -53,6 +53,83 @@
//EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 // assuming up to a maximum of 20 seconds of offloaded
+// for audio_track_cblk_t::mState, to match TrackBase.h
+static inline constexpr int CBLK_STATE_IDLE = 0;
+static inline constexpr int CBLK_STATE_PAUSING = 7;
+
+/**
+ * MirroredVariable is a local variable which simultaneously updates
+ * a mirrored storage location. This is useful for server side variables
+ * where a local copy is kept, but a client visible copy is offered through shared memory.
+ *
+ * We use std::atomic as the default container class to access this memory.
+ */
+template <typename T, template <typename> class Container = std::atomic>
+class MirroredVariable {
+ template <typename C>
+ struct Constraints {
+ // If setMirror is used with a different type U != T passed in,
+ // as a general rule, the Container must issue a memcpy to read or write
+ // (or its equivalent) to avoid possible strict aliasing issues.
+ // The memcpy also avoids gaps in structs and alignment issues with different types.
+ static constexpr bool ok_ = false; // Containers must specify constraints.
+ };
+ template <typename X>
+ struct Constraints<std::atomic<X>> {
+ // Atomics force read and write to memory.
+ static constexpr bool ok = std::is_same_v<X, T> ||
+ (std::atomic<X>::is_always_lock_free // no additional locking
+ && sizeof(std::atomic<X>) == sizeof(X) // layout identical to X.
+ && (std::is_arithmetic_v<X> || std::is_enum_v<X>)); // No gaps in the layout.
+ };
+
+static_assert(Constraints<Container<T>>::ok);
+public:
+ explicit MirroredVariable(const T& t) : t_{t} {}
+
+ // implicit conversion operator
+ operator T() const {
+ return t_;
+ }
+
+ MirroredVariable& operator=(const T& t) {
+ t_ = t;
+ if (mirror_ != nullptr) {
+ *mirror_ = t;
+ }
+ return *this;
+ }
+
+ template <typename U>
+ void setMirror(Container<U> *other_mirror) {
+ // Much of the concern is with T != U, however there are additional concerns
+ // when storage uses shared memory between processes. For atomics, it must be
+ // lock free.
+ static_assert(sizeof(U) == sizeof(T));
+ static_assert(alignof(U) == alignof(T));
+ static_assert(Constraints<Container<U>>::ok);
+ static_assert(sizeof(Container<U>) == sizeof(Container<T>));
+ static_assert(alignof(Container<U>) == alignof(Container<T>));
+ auto mirror = reinterpret_cast<Container<T>*>(other_mirror);
+ if (mirror_ != mirror) {
+ mirror_ = mirror;
+ if (mirror != nullptr) {
+ *mirror = t_;
+ }
+ }
+ }
+
+ void clear() {
+ mirror_ = nullptr;
+ }
+
+ MirroredVariable& operator&() const = delete;
+
+protected:
+ T t_{};
+ Container<T>* mirror_ = nullptr;
+};
+
struct AudioTrackSharedStreaming {
// similar to NBAIO MonoPipe
// in continuously incrementing frame units, take modulo buffer size, which must be a power of 2
@@ -188,6 +265,8 @@
volatile int32_t mFlags; // combinations of CBLK_*
+ std::atomic<int32_t> mState; // current TrackBase state.
+
public:
union {
AudioTrackSharedStreaming mStreaming;
@@ -198,6 +277,9 @@
// Cache line boundary (32 bytes)
};
+// TODO: ensure standard layout.
+// static_assert(std::is_standard_layout_v<audio_track_cblk_t>);
+
// ----------------------------------------------------------------------------
// Proxy for shared memory control block, to isolate callers from needing to know the details.
@@ -323,6 +405,7 @@
return mEpoch;
}
+ int32_t getState() const { return mCblk->mState; }
uint32_t getBufferSizeInFrames() const { return mBufferSizeInFrames; }
// See documentation for AudioTrack::setBufferSizeInFrames()
uint32_t setBufferSizeInFrames(uint32_t requestedSize);
diff --git a/media/OWNERS b/media/OWNERS
index 4cf4870..099729f 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -15,6 +15,7 @@
robertshih@google.com
taklee@google.com
wonsik@google.com
+ytai@google.com
# go/android-fwk-media-solutions for info on areas of ownership.
include platform/frameworks/av:/media/janitors/media_solutions_OWNERS
diff --git a/media/audioserver/Android.bp b/media/audioserver/Android.bp
index be25ffb..0b44700 100644
--- a/media/audioserver/Android.bp
+++ b/media/audioserver/Android.bp
@@ -25,7 +25,9 @@
],
shared_libs: [
+ "packagemanager_aidl-cpp",
"libaaudioservice",
+ "libaudioclient",
"libaudioflinger",
"libaudiopolicyservice",
"libaudioprocessing",
@@ -41,7 +43,6 @@
"libpowermanager",
"libutils",
"libvibrator",
-
],
// TODO check if we still need all of these include directories
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 8ee1efb..e3db5b4 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -17,11 +17,17 @@
#define LOG_TAG "audioserver"
//#define LOG_NDEBUG 0
+#include <algorithm>
+
#include <fcntl.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <cutils/properties.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/IAudioFlingerService.h>
#include <binder/IPCThreadState.h>
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
@@ -30,7 +36,6 @@
#include <utils/Log.h>
// from include_dirs
-#include "aaudio/AAudioTesting.h" // aaudio_policy_t, AAUDIO_PROP_MMAP_POLICY, AAUDIO_POLICY_*
#include "AudioFlinger.h"
#include "AudioPolicyService.h"
#include "AAudioService.h"
@@ -39,6 +44,10 @@
using namespace android;
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
int main(int argc __unused, char **argv)
{
// TODO: update with refined parameters
@@ -73,10 +82,8 @@
IPCThreadState::self()->joinThreadPool();
for (;;) {
siginfo_t info;
- int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
- if (ret == EINTR) {
- continue;
- }
+ int ret = TEMP_FAILURE_RETRY(waitid(P_PID, childPid, &info,
+ WEXITED | WSTOPPED | WCONTINUED));
if (ret < 0) {
break;
}
@@ -146,10 +153,24 @@
// AAudioService should only be used in OC-MR1 and later.
// And only enable the AAudioService if the system MMAP policy explicitly allows it.
// This prevents a client from misusing AAudioService when it is not supported.
- aaudio_policy_t mmapPolicy = property_get_int32(AAUDIO_PROP_MMAP_POLICY,
- AAUDIO_POLICY_NEVER);
- if (mmapPolicy == AAUDIO_POLICY_AUTO || mmapPolicy == AAUDIO_POLICY_ALWAYS) {
+ // If we cannot get audio flinger here, there must be some serious problems. In that case,
+ // attempting to call audio flinger on a null pointer could make the process crash
+ // and attract attentions.
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ std::vector<AudioMMapPolicyInfo> policyInfos;
+ status_t status = af->getMmapPolicyInfos(
+ AudioMMapPolicyType::DEFAULT, &policyInfos);
+ // Initialize aaudio service when querying mmap policy succeeds and
+ // any of the policy supports MMAP.
+ if (status == NO_ERROR &&
+ std::any_of(policyInfos.begin(), policyInfos.end(), [](const auto& info) {
+ return info.mmapPolicy == AudioMMapPolicy::AUTO ||
+ info.mmapPolicy == AudioMMapPolicy::ALWAYS;
+ })) {
AAudioService::instantiate();
+ } else {
+ ALOGD("Do not init aaudio service, status %d, policy info size %zu",
+ status, policyInfos.size());
}
ProcessState::self()->startThreadPool();
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 6ac4210..16cb323 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -15,13 +15,24 @@
},
{
"exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
+ "name": "CtsMediaAudioTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
},
// TODO: b/149314419
{
- "exclude-filter": "android.media.cts.AudioPlaybackCaptureTest"
+ "exclude-filter": "android.media.audio.cts.AudioPlaybackCaptureTest"
},
{
- "exclude-filter": "android.media.cts.AudioRecordTest"
+ "exclude-filter": "android.media.audio.cts.AudioRecordTest"
}
]
}
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 4bc1777..b7a5686 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -245,6 +245,19 @@
})
.withSetter(CodedColorAspectsSetter, mColorAspects)
.build());
+
+ addParameter(
+ DefineParam(mPictureQuantization, C2_PARAMKEY_PICTURE_QUANTIZATION)
+ .withDefault(C2StreamPictureQuantizationTuning::output::AllocShared(
+ 0 /* flexCount */, 0u /* stream */))
+ .withFields({C2F(mPictureQuantization, m.values[0].type_).oneOf(
+ {C2Config::picture_type_t(I_FRAME),
+ C2Config::picture_type_t(P_FRAME),
+ C2Config::picture_type_t(B_FRAME)}),
+ C2F(mPictureQuantization, m.values[0].min).any(),
+ C2F(mPictureQuantization, m.values[0].max).any()})
+ .withSetter(PictureQuantizationSetter)
+ .build());
}
static C2R InputDelaySetter(
@@ -464,9 +477,69 @@
me.set().matrix = coded.v.matrix;
return C2R::Ok();
}
+ static C2R PictureQuantizationSetter(bool mayBlock,
+ C2P<C2StreamPictureQuantizationTuning::output> &me) {
+ (void)mayBlock;
+
+ // these are the ones we're going to set, so want them to default
+ // to the DEFAULT values for the codec
+ int32_t iMin = HEVC_QP_MIN, pMin = HEVC_QP_MIN, bMin = HEVC_QP_MIN;
+ int32_t iMax = HEVC_QP_MAX, pMax = HEVC_QP_MAX, bMax = HEVC_QP_MAX;
+
+ for (size_t i = 0; i < me.v.flexCount(); ++i) {
+ const C2PictureQuantizationStruct &layer = me.v.m.values[i];
+
+ // layerMin is clamped to [HEVC_QP_MIN, layerMax] to avoid error
+ // cases where layer.min > layer.max
+ int32_t layerMax = std::clamp(layer.max, HEVC_QP_MIN, HEVC_QP_MAX);
+ int32_t layerMin = std::clamp(layer.min, HEVC_QP_MIN, layerMax);
+ if (layer.type_ == C2Config::picture_type_t(I_FRAME)) {
+ iMax = layerMax;
+ iMin = layerMin;
+ ALOGV("iMin %d iMax %d", iMin, iMax);
+ } else if (layer.type_ == C2Config::picture_type_t(P_FRAME)) {
+ pMax = layerMax;
+ pMin = layerMin;
+ ALOGV("pMin %d pMax %d", pMin, pMax);
+ } else if (layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+ bMax = layerMax;
+ bMin = layerMin;
+ ALOGV("bMin %d bMax %d", bMin, bMax);
+ }
+ }
+
+ ALOGV("PictureQuantizationSetter(entry): i %d-%d p %d-%d b %d-%d",
+ iMin, iMax, pMin, pMax, bMin, bMax);
+
+ int32_t maxFrameQP = std::min(std::min(iMax, pMax), bMax);
+ int32_t minFrameQP = std::max(std::max(iMin, pMin), bMin);
+ if (minFrameQP > maxFrameQP) {
+ minFrameQP = maxFrameQP;
+ }
+
+ // put them back into the structure
+ for (size_t i = 0; i < me.v.flexCount(); ++i) {
+ const C2PictureQuantizationStruct &layer = me.v.m.values[i];
+
+ if (layer.type_ == C2Config::picture_type_t(I_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(P_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+ me.set().m.values[i].max = maxFrameQP;
+ me.set().m.values[i].min = minFrameQP;
+ }
+ }
+
+ ALOGV("PictureQuantizationSetter(exit): i = p = b = %d-%d",
+ minFrameQP, maxFrameQP);
+
+ return C2R::Ok();
+ }
std::shared_ptr<C2StreamColorAspectsInfo::output> getCodedColorAspects_l() {
return mCodedColorAspects;
}
+ std::shared_ptr<C2StreamPictureQuantizationTuning::output> getPictureQuantization_l() const {
+ return mPictureQuantization;
+ }
private:
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
@@ -482,6 +555,7 @@
std::shared_ptr<C2StreamGopTuning::output> mGop;
std::shared_ptr<C2StreamColorAspectsInfo::input> mColorAspects;
std::shared_ptr<C2StreamColorAspectsInfo::output> mCodedColorAspects;
+ std::shared_ptr<C2StreamPictureQuantizationTuning::output> mPictureQuantization;
};
static size_t GetCPUCoreCount() {
@@ -654,12 +728,41 @@
mEncParams.s_coding_tools_prms.i4_max_temporal_layers = 3;
}
- switch (mBitrateMode->value) {
- case C2Config::BITRATE_IGNORE:
- mEncParams.s_config_prms.i4_rate_control_mode = 3;
- mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
- getQpFromQuality(mQuality->value);
+ // we resolved out-of-bound and unspecified values in PictureQuantizationSetter()
+ // so we can start with defaults that are overridden as needed.
+ int32_t maxFrameQP = mEncParams.s_config_prms.i4_max_frame_qp;
+ int32_t minFrameQP = mEncParams.s_config_prms.i4_min_frame_qp;
+
+ for (size_t i = 0; i < mQpBounds->flexCount(); ++i) {
+ const C2PictureQuantizationStruct &layer = mQpBounds->m.values[i];
+
+ // no need to loop, hevc library takes same range for I/P/B picture type
+ if (layer.type_ == C2Config::picture_type_t(I_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(P_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+
+ maxFrameQP = layer.max;
+ minFrameQP = layer.min;
break;
+ }
+ }
+ mEncParams.s_config_prms.i4_max_frame_qp = maxFrameQP;
+ mEncParams.s_config_prms.i4_min_frame_qp = minFrameQP;
+
+ ALOGV("MaxFrameQp: %d MinFrameQp: %d", maxFrameQP, minFrameQP);
+
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
+ std::clamp(kDefaultInitQP, minFrameQP, maxFrameQP);
+
+ switch (mBitrateMode->value) {
+ case C2Config::BITRATE_IGNORE: {
+ mEncParams.s_config_prms.i4_rate_control_mode = 3;
+ // ensure initial qp values are within our newly configured bounds
+ int32_t frameQp = getQpFromQuality(mQuality->value);
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
+ std::clamp(frameQp, minFrameQP, maxFrameQP);
+ break;
+ }
case C2Config::BITRATE_CONST:
mEncParams.s_config_prms.i4_rate_control_mode = 5;
break;
@@ -723,6 +826,7 @@
mGop = mIntf->getGop_l();
mRequestSync = mIntf->getRequestSync_l();
mColorAspects = mIntf->getCodedColorAspects_l();
+ mQpBounds = mIntf->getPictureQuantization_l();;
}
c2_status_t status = initEncParams();
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
index 9dbf682..4217a8b 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.h
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -42,6 +42,11 @@
#define DEFAULT_B_FRAMES 0
#define DEFAULT_RC_LOOKAHEAD 0
+#define HEVC_QP_MIN 1
+#define HEVC_QP_MAX 51
+
+constexpr int32_t kDefaultInitQP = 32;
+
struct C2SoftHevcEnc : public SimpleC2Component {
class IntfImpl;
@@ -90,6 +95,7 @@
std::shared_ptr<C2StreamGopTuning::output> mGop;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
+ std::shared_ptr<C2StreamPictureQuantizationTuning::output> mQpBounds;
#ifdef FILE_DUMP_ENABLE
char mInFile[200];
char mOutFile[200];
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 7486d27..617769b 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -31,6 +31,255 @@
namespace android {
+C2SoftVpxEnc::IntfImpl::IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
+ : SimpleInterface<void>::BaseParams(
+ helper,
+ COMPONENT_NAME,
+ C2Component::KIND_ENCODER,
+ C2Component::DOMAIN_VIDEO,
+ MEDIA_MIMETYPE_VIDEO) {
+ noPrivateBuffers(); // TODO: account for our buffers here
+ noInputReferences();
+ noOutputReferences();
+ noInputLatency();
+ noTimeStretch();
+ setDerivedInstance(this);
+
+ addParameter(
+ DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
+ .withConstValue(new C2ComponentAttributesSetting(
+ C2Component::ATTRIB_IS_TEMPORAL))
+ .build());
+
+ addParameter(
+ DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+ .withConstValue(new C2StreamUsageTuning::input(
+ 0u, (uint64_t)C2MemoryUsage::CPU_READ))
+ .build());
+
+ addParameter(
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+ .withFields({
+ C2F(mSize, width).inRange(2, 2048, 2),
+ C2F(mSize, height).inRange(2, 2048, 2),
+ })
+ .withSetter(SizeSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mBitrateMode, C2_PARAMKEY_BITRATE_MODE)
+ .withDefault(new C2StreamBitrateModeTuning::output(
+ 0u, C2Config::BITRATE_VARIABLE))
+ .withFields({
+ C2F(mBitrateMode, value).oneOf({
+ C2Config::BITRATE_CONST, C2Config::BITRATE_VARIABLE })
+ })
+ .withSetter(
+ Setter<decltype(*mBitrateMode)>::StrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
+ .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+ // TODO: More restriction?
+ .withFields({C2F(mFrameRate, value).greaterThan(0.)})
+ .withSetter(
+ Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mLayering, C2_PARAMKEY_TEMPORAL_LAYERING)
+ .withDefault(C2StreamTemporalLayeringTuning::output::AllocShared(0u, 0, 0, 0))
+ .withFields({
+ C2F(mLayering, m.layerCount).inRange(0, 4),
+ C2F(mLayering, m.bLayerCount).inRange(0, 0),
+ C2F(mLayering, m.bitrateRatios).inRange(0., 1.)
+ })
+ .withSetter(LayeringSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mSyncFramePeriod, C2_PARAMKEY_SYNC_FRAME_INTERVAL)
+ .withDefault(new C2StreamSyncFrameIntervalTuning::output(0u, 1000000))
+ .withFields({C2F(mSyncFramePeriod, value).any()})
+ .withSetter(Setter<decltype(*mSyncFramePeriod)>::StrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
+ .withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
+ .withSetter(BitrateSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mIntraRefresh, C2_PARAMKEY_INTRA_REFRESH)
+ .withConstValue(new C2StreamIntraRefreshTuning::output(
+ 0u, C2Config::INTRA_REFRESH_DISABLED, 0.))
+ .build());
+#ifdef VP9
+ addParameter(
+ DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::output(
+ 0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
+ .withFields({
+ C2F(mProfileLevel, profile).equalTo(
+ PROFILE_VP9_0
+ ),
+ C2F(mProfileLevel, level).equalTo(
+ LEVEL_VP9_4_1),
+ })
+ .withSetter(ProfileLevelSetter)
+ .build());
+#else
+ addParameter(
+ DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::output(
+ 0u, PROFILE_VP8_0, LEVEL_UNUSED))
+ .withFields({
+ C2F(mProfileLevel, profile).equalTo(
+ PROFILE_VP8_0
+ ),
+ C2F(mProfileLevel, level).equalTo(
+ LEVEL_UNUSED),
+ })
+ .withSetter(ProfileLevelSetter)
+ .build());
+#endif
+ addParameter(
+ DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
+ .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
+ .withFields({C2F(mRequestSync, value).oneOf({ C2_FALSE, C2_TRUE }) })
+ .withSetter(Setter<decltype(*mRequestSync)>::NonStrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
+ .withDefault(new C2StreamColorAspectsInfo::input(
+ 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+ .withFields({
+ C2F(mColorAspects, range).inRange(
+ C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
+ C2F(mColorAspects, primaries).inRange(
+ C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
+ C2F(mColorAspects, transfer).inRange(
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::TRANSFER_OTHER),
+ C2F(mColorAspects, matrix).inRange(
+ C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)
+ })
+ .withSetter(ColorAspectsSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
+ .withDefault(new C2StreamColorAspectsInfo::output(
+ 0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+ .withFields({
+ C2F(mCodedColorAspects, range).inRange(
+ C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
+ C2F(mCodedColorAspects, primaries).inRange(
+ C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
+ C2F(mCodedColorAspects, transfer).inRange(
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::TRANSFER_OTHER),
+ C2F(mCodedColorAspects, matrix).inRange(
+ C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)
+ })
+ .withSetter(CodedColorAspectsSetter, mColorAspects)
+ .build());
+}
+
+C2R C2SoftVpxEnc::IntfImpl::BitrateSetter(bool mayBlock, C2P<C2StreamBitrateInfo::output> &me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (me.v.value < 4096) {
+ me.set().value = 4096;
+ }
+ return res;
+}
+
+C2R C2SoftVpxEnc::IntfImpl::SizeSetter(bool mayBlock,
+ const C2P<C2StreamPictureSizeInfo::input>& oldMe,
+ C2P<C2StreamPictureSizeInfo::input>& me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+ me.set().width = oldMe.v.width;
+ }
+ if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+ me.set().height = oldMe.v.height;
+ }
+ return res;
+}
+
+C2R C2SoftVpxEnc::IntfImpl::ProfileLevelSetter(bool mayBlock,
+ C2P<C2StreamProfileLevelInfo::output>& me) {
+ (void)mayBlock;
+ if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
+ me.set().profile = PROFILE_VP9_0;
+ }
+ if (!me.F(me.v.level).supportsAtAll(me.v.level)) {
+ me.set().level = LEVEL_VP9_4_1;
+ }
+ return C2R::Ok();
+}
+
+C2R C2SoftVpxEnc::IntfImpl::LayeringSetter(bool mayBlock,
+ C2P<C2StreamTemporalLayeringTuning::output>& me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (me.v.m.layerCount > 4) {
+ me.set().m.layerCount = 4;
+ }
+ me.set().m.bLayerCount = 0;
+ // ensure ratios are monotonic and clamped between 0 and 1
+ for (size_t ix = 0; ix < me.v.flexCount(); ++ix) {
+ me.set().m.bitrateRatios[ix] = c2_clamp(
+ ix > 0 ? me.v.m.bitrateRatios[ix - 1] : 0, me.v.m.bitrateRatios[ix], 1.);
+ }
+ ALOGI("setting temporal layering %u + %u", me.v.m.layerCount, me.v.m.bLayerCount);
+ return res;
+}
+
+uint32_t C2SoftVpxEnc::IntfImpl::getSyncFramePeriod() const {
+ if (mSyncFramePeriod->value < 0 || mSyncFramePeriod->value == INT64_MAX) {
+ return 0;
+ }
+ double period = mSyncFramePeriod->value / 1e6 * mFrameRate->value;
+ return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
+}
+C2R C2SoftVpxEnc::IntfImpl::ColorAspectsSetter(bool mayBlock,
+ C2P<C2StreamColorAspectsInfo::input>& me) {
+ (void)mayBlock;
+ if (me.v.range > C2Color::RANGE_OTHER) {
+ me.set().range = C2Color::RANGE_OTHER;
+ }
+ if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+ me.set().primaries = C2Color::PRIMARIES_OTHER;
+ }
+ if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+ me.set().transfer = C2Color::TRANSFER_OTHER;
+ }
+ if (me.v.matrix > C2Color::MATRIX_OTHER) {
+ me.set().matrix = C2Color::MATRIX_OTHER;
+ }
+ return C2R::Ok();
+}
+C2R C2SoftVpxEnc::IntfImpl::CodedColorAspectsSetter(
+ bool mayBlock, C2P<C2StreamColorAspectsInfo::output>& me,
+ const C2P<C2StreamColorAspectsInfo::input>& coded) {
+ (void)mayBlock;
+ me.set().range = coded.v.range;
+ me.set().primaries = coded.v.primaries;
+ me.set().transfer = coded.v.transfer;
+ me.set().matrix = coded.v.matrix;
+ return C2R::Ok();
+}
+
#if 0
static size_t getCpuCoreCount() {
long cpuCoreCount = 1;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 926b2e9..e296c8f 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -237,259 +237,38 @@
class C2SoftVpxEnc::IntfImpl : public SimpleInterface<void>::BaseParams {
public:
- explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
- : SimpleInterface<void>::BaseParams(
- helper,
- COMPONENT_NAME,
- C2Component::KIND_ENCODER,
- C2Component::DOMAIN_VIDEO,
- MEDIA_MIMETYPE_VIDEO) {
- noPrivateBuffers(); // TODO: account for our buffers here
- noInputReferences();
- noOutputReferences();
- noInputLatency();
- noTimeStretch();
- setDerivedInstance(this);
-
- addParameter(
- DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
- .withConstValue(new C2ComponentAttributesSetting(
- C2Component::ATTRIB_IS_TEMPORAL))
- .build());
-
- addParameter(
- DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
- .withConstValue(new C2StreamUsageTuning::input(
- 0u, (uint64_t)C2MemoryUsage::CPU_READ))
- .build());
-
- addParameter(
- DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
- .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
- .withFields({
- C2F(mSize, width).inRange(2, 2048, 2),
- C2F(mSize, height).inRange(2, 2048, 2),
- })
- .withSetter(SizeSetter)
- .build());
-
- addParameter(
- DefineParam(mBitrateMode, C2_PARAMKEY_BITRATE_MODE)
- .withDefault(new C2StreamBitrateModeTuning::output(
- 0u, C2Config::BITRATE_VARIABLE))
- .withFields({
- C2F(mBitrateMode, value).oneOf({
- C2Config::BITRATE_CONST, C2Config::BITRATE_VARIABLE })
- })
- .withSetter(
- Setter<decltype(*mBitrateMode)>::StrictValueWithNoDeps)
- .build());
-
- addParameter(
- DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
- .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
- // TODO: More restriction?
- .withFields({C2F(mFrameRate, value).greaterThan(0.)})
- .withSetter(
- Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
- .build());
-
- addParameter(
- DefineParam(mLayering, C2_PARAMKEY_TEMPORAL_LAYERING)
- .withDefault(C2StreamTemporalLayeringTuning::output::AllocShared(0u, 0, 0, 0))
- .withFields({
- C2F(mLayering, m.layerCount).inRange(0, 4),
- C2F(mLayering, m.bLayerCount).inRange(0, 0),
- C2F(mLayering, m.bitrateRatios).inRange(0., 1.)
- })
- .withSetter(LayeringSetter)
- .build());
-
- addParameter(
- DefineParam(mSyncFramePeriod, C2_PARAMKEY_SYNC_FRAME_INTERVAL)
- .withDefault(new C2StreamSyncFrameIntervalTuning::output(0u, 1000000))
- .withFields({C2F(mSyncFramePeriod, value).any()})
- .withSetter(Setter<decltype(*mSyncFramePeriod)>::StrictValueWithNoDeps)
- .build());
-
- addParameter(
- DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
- .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
- .withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
- .withSetter(BitrateSetter)
- .build());
-
- addParameter(
- DefineParam(mIntraRefresh, C2_PARAMKEY_INTRA_REFRESH)
- .withConstValue(new C2StreamIntraRefreshTuning::output(
- 0u, C2Config::INTRA_REFRESH_DISABLED, 0.))
- .build());
-#ifdef VP9
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::output(
- 0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
- .withFields({
- C2F(mProfileLevel, profile).equalTo(
- PROFILE_VP9_0
- ),
- C2F(mProfileLevel, level).equalTo(
- LEVEL_VP9_4_1),
- })
- .withSetter(ProfileLevelSetter)
- .build());
-#else
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::output(
- 0u, PROFILE_VP8_0, LEVEL_UNUSED))
- .withFields({
- C2F(mProfileLevel, profile).equalTo(
- PROFILE_VP8_0
- ),
- C2F(mProfileLevel, level).equalTo(
- LEVEL_UNUSED),
- })
- .withSetter(ProfileLevelSetter)
- .build());
-#endif
- addParameter(
- DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
- .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
- .withFields({C2F(mRequestSync, value).oneOf({ C2_FALSE, C2_TRUE }) })
- .withSetter(Setter<decltype(*mRequestSync)>::NonStrictValueWithNoDeps)
- .build());
-
- addParameter(
- DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
- .withDefault(new C2StreamColorAspectsInfo::input(
- 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
- .withFields({
- C2F(mColorAspects, range).inRange(
- C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
- C2F(mColorAspects, primaries).inRange(
- C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
- C2F(mColorAspects, transfer).inRange(
- C2Color::TRANSFER_UNSPECIFIED, C2Color::TRANSFER_OTHER),
- C2F(mColorAspects, matrix).inRange(
- C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)
- })
- .withSetter(ColorAspectsSetter)
- .build());
-
- addParameter(
- DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
- .withDefault(new C2StreamColorAspectsInfo::output(
- 0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
- .withFields({
- C2F(mCodedColorAspects, range).inRange(
- C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
- C2F(mCodedColorAspects, primaries).inRange(
- C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
- C2F(mCodedColorAspects, transfer).inRange(
- C2Color::TRANSFER_UNSPECIFIED, C2Color::TRANSFER_OTHER),
- C2F(mCodedColorAspects, matrix).inRange(
- C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)
- })
- .withSetter(CodedColorAspectsSetter, mColorAspects)
- .build());
- }
-
- static C2R BitrateSetter(bool mayBlock, C2P<C2StreamBitrateInfo::output> &me) {
- (void)mayBlock;
- C2R res = C2R::Ok();
- if (me.v.value <= 4096) {
- me.set().value = 4096;
- }
- return res;
- }
+ explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper);
+ static C2R BitrateSetter(bool mayBlock, C2P<C2StreamBitrateInfo::output> &me);
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::input> &oldMe,
- C2P<C2StreamPictureSizeInfo::input> &me) {
- (void)mayBlock;
- C2R res = C2R::Ok();
- if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
- res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
- me.set().width = oldMe.v.width;
- }
- if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
- res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
- me.set().height = oldMe.v.height;
- }
- return res;
- }
+ C2P<C2StreamPictureSizeInfo::input> &me);
static C2R ProfileLevelSetter(
bool mayBlock,
- C2P<C2StreamProfileLevelInfo::output> &me) {
- (void)mayBlock;
- if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
- me.set().profile = PROFILE_VP9_0;
- }
- if (!me.F(me.v.level).supportsAtAll(me.v.level)) {
- me.set().level = LEVEL_VP9_4_1;
- }
- return C2R::Ok();
- }
+ C2P<C2StreamProfileLevelInfo::output> &me);
- static C2R LayeringSetter(bool mayBlock, C2P<C2StreamTemporalLayeringTuning::output>& me) {
- (void)mayBlock;
- C2R res = C2R::Ok();
- if (me.v.m.layerCount > 4) {
- me.set().m.layerCount = 4;
- }
- me.set().m.bLayerCount = 0;
- // ensure ratios are monotonic and clamped between 0 and 1
- for (size_t ix = 0; ix < me.v.flexCount(); ++ix) {
- me.set().m.bitrateRatios[ix] = c2_clamp(
- ix > 0 ? me.v.m.bitrateRatios[ix - 1] : 0, me.v.m.bitrateRatios[ix], 1.);
- }
- ALOGI("setting temporal layering %u + %u", me.v.m.layerCount, me.v.m.bLayerCount);
- return res;
- }
+ static C2R LayeringSetter(bool mayBlock, C2P<C2StreamTemporalLayeringTuning::output>& me);
// unsafe getters
std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const { return mSize; }
- std::shared_ptr<C2StreamIntraRefreshTuning::output> getIntraRefresh_l() const { return mIntraRefresh; }
+ std::shared_ptr<C2StreamIntraRefreshTuning::output> getIntraRefresh_l() const {
+ return mIntraRefresh;
+ }
std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const { return mFrameRate; }
std::shared_ptr<C2StreamBitrateInfo::output> getBitrate_l() const { return mBitrate; }
- std::shared_ptr<C2StreamBitrateModeTuning::output> getBitrateMode_l() const { return mBitrateMode; }
- std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const { return mRequestSync; }
- std::shared_ptr<C2StreamTemporalLayeringTuning::output> getTemporalLayers_l() const { return mLayering; }
- uint32_t getSyncFramePeriod() const {
- if (mSyncFramePeriod->value < 0 || mSyncFramePeriod->value == INT64_MAX) {
- return 0;
- }
- double period = mSyncFramePeriod->value / 1e6 * mFrameRate->value;
- return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
+ std::shared_ptr<C2StreamBitrateModeTuning::output> getBitrateMode_l() const {
+ return mBitrateMode;
}
- static C2R ColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::input> &me) {
- (void)mayBlock;
- if (me.v.range > C2Color::RANGE_OTHER) {
- me.set().range = C2Color::RANGE_OTHER;
- }
- if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
- me.set().primaries = C2Color::PRIMARIES_OTHER;
- }
- if (me.v.transfer > C2Color::TRANSFER_OTHER) {
- me.set().transfer = C2Color::TRANSFER_OTHER;
- }
- if (me.v.matrix > C2Color::MATRIX_OTHER) {
- me.set().matrix = C2Color::MATRIX_OTHER;
- }
- return C2R::Ok();
+ std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const {
+ return mRequestSync;
}
+ std::shared_ptr<C2StreamTemporalLayeringTuning::output> getTemporalLayers_l() const {
+ return mLayering;
+ }
+ uint32_t getSyncFramePeriod() const;
+ static C2R ColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::input> &me);
static C2R CodedColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::output> &me,
- const C2P<C2StreamColorAspectsInfo::input> &coded) {
- (void)mayBlock;
- me.set().range = coded.v.range;
- me.set().primaries = coded.v.primaries;
- me.set().transfer = coded.v.transfer;
- me.set().matrix = coded.v.matrix;
- return C2R::Ok();
- }
+ const C2P<C2StreamColorAspectsInfo::input> &coded);
private:
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 2cc7ab7..feaa98c 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -1673,7 +1673,7 @@
SYNC_FRAME = (1 << 0), ///< sync frame, e.g. IDR
I_FRAME = (1 << 1), ///< intra frame that is completely encoded
P_FRAME = (1 << 2), ///< inter predicted frame from previous frames
- B_FRAME = (1 << 3), ///< backward predicted (out-of-order) frame
+ B_FRAME = (1 << 3), ///< bidirectional predicted (out-of-order) frame
)
/**
diff --git a/media/codec2/fuzzer/C2Fuzzer.cpp b/media/codec2/fuzzer/C2Fuzzer.cpp
index 51e1013..e469d8b 100644
--- a/media/codec2/fuzzer/C2Fuzzer.cpp
+++ b/media/codec2/fuzzer/C2Fuzzer.cpp
@@ -194,12 +194,12 @@
}
std::vector<C2Param*> configParams;
+ C2StreamPictureSizeInfo::input inputSize(0u, kWidthOfVideo, kHeightOfVideo);
+ C2StreamSampleRateInfo::output sampleRateInfo(0u, kSamplingRateOfAudio);
+ C2StreamChannelCountInfo::output channelCountInfo(0u, kChannelsOfAudio);
if (domain.value == DOMAIN_VIDEO) {
- C2StreamPictureSizeInfo::input inputSize(0u, kWidthOfVideo, kHeightOfVideo);
configParams.push_back(&inputSize);
} else if (domain.value == DOMAIN_AUDIO) {
- C2StreamSampleRateInfo::output sampleRateInfo(0u, kSamplingRateOfAudio);
- C2StreamChannelCountInfo::output channelCountInfo(0u, kChannelsOfAudio);
configParams.push_back(&sampleRateInfo);
configParams.push_back(&channelCountInfo);
}
@@ -239,17 +239,17 @@
}
void Codec2Fuzzer::decodeFrames(const uint8_t* data, size_t size) {
- mBufferSource = new BufferSource(data, size);
- if (!mBufferSource) {
+ std::unique_ptr<BufferSource> bufferSource = std::make_unique<BufferSource>(data, size);
+ if (!bufferSource) {
return;
}
- mBufferSource->parse();
+ bufferSource->parse();
c2_status_t status = C2_OK;
size_t numFrames = 0;
- while (!mBufferSource->isEos()) {
+ while (!bufferSource->isEos()) {
uint8_t* frame = nullptr;
size_t frameSize = 0;
- FrameData frameData = mBufferSource->getFrame();
+ FrameData frameData = bufferSource->getFrame();
frame = std::get<0>(frameData);
frameSize = std::get<1>(frameData);
@@ -298,7 +298,6 @@
mConditionalVariable.wait_for(waitForDecodeComplete, kC2FuzzerTimeOut, [this] { return mEos; });
std::list<std::unique_ptr<C2Work>> c2flushedWorks;
mComponent->flush_sm(C2Component::FLUSH_COMPONENT, &c2flushedWorks);
- delete mBufferSource;
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
diff --git a/media/codec2/fuzzer/C2Fuzzer.h b/media/codec2/fuzzer/C2Fuzzer.h
index d5ac81a..da76885 100644
--- a/media/codec2/fuzzer/C2Fuzzer.h
+++ b/media/codec2/fuzzer/C2Fuzzer.h
@@ -104,7 +104,6 @@
static constexpr size_t kMarkerSuffixSize = 3;
};
- BufferSource* mBufferSource;
bool mEos = false;
C2BlockPool::local_id_t mBlockPoolId;
diff --git a/media/codec2/hidl/1.0/vts/.clang-format b/media/codec2/hidl/1.0/vts/.clang-format
deleted file mode 120000
index 136279c..0000000
--- a/media/codec2/hidl/1.0/vts/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/OWNERS b/media/codec2/hidl/1.0/vts/OWNERS
index dbe89cf..32b11b8 100644
--- a/media/codec2/hidl/1.0/vts/OWNERS
+++ b/media/codec2/hidl/1.0/vts/OWNERS
@@ -1,8 +1,5 @@
+# Bug component: 25690
# Media team
lajos@google.com
-pawin@google.com
taklee@google.com
wonsik@google.com
-
-# VTS team
-dshi@google.com
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 9e3a823..d47ef67 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -465,6 +465,11 @@
if (mMime.find("raw") != std::string::npos) {
bitStreamInfo[0] = 8000;
bitStreamInfo[1] = 1;
+ } else if ((mMime.find("g711-alaw") != std::string::npos) ||
+ (mMime.find("g711-mlaw") != std::string::npos)) {
+ // g711 test data is all 1-channel and has no embedded config info.
+ bitStreamInfo[0] = 8000;
+ bitStreamInfo[1] = 1;
} else {
ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
}
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index ffec897..275a721 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -61,6 +61,7 @@
public:
virtual void SetUp() override {
getParams();
+ mDisableTest = false;
mEos = false;
mClient = android::Codec2Client::CreateFromService(mInstanceName.c_str());
ASSERT_NE(mClient, nullptr);
@@ -73,6 +74,14 @@
for (int i = 0; i < MAX_INPUT_BUFFERS; ++i) {
mWorkQueue.emplace_back(new C2Work);
}
+
+ C2SecureModeTuning secureModeTuning{};
+ mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
+ if (secureModeTuning.value != C2Config::SM_UNPROTECTED) {
+ mDisableTest = true;
+ }
+
+ if (mDisableTest) std::cout << "[ WARN ] Test Disabled \n";
}
virtual void TearDown() override {
@@ -105,6 +114,7 @@
std::string mInstanceName;
std::string mComponentName;
bool mEos;
+ bool mDisableTest;
std::mutex mQueueLock;
std::condition_variable mQueueCondition;
std::list<std::unique_ptr<C2Work>> mWorkQueue;
@@ -324,6 +334,7 @@
};
TEST_P(Codec2ComponentInputTests, InputBufferTest) {
+ if (mDisableTest) GTEST_SKIP() << "Test is disabled";
description("Tests for different inputs");
uint32_t flags = std::get<2>(GetParam());
diff --git a/media/codec2/hidl/services/Android.bp b/media/codec2/hidl/services/Android.bp
index bb9f51f..b36e80a 100644
--- a/media/codec2/hidl/services/Android.bp
+++ b/media/codec2/hidl/services/Android.bp
@@ -52,7 +52,7 @@
// minijail is used to protect against unexpected system calls.
shared_libs: [
- "libavservices_minijail_vendor",
+ "libavservices_minijail",
"libbinder",
],
required: ["android.hardware.media.c2@1.2-default-seccomp_policy"],
diff --git a/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc b/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc
index 03f6e3d..12da593 100644
--- a/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc
+++ b/media/codec2/hidl/services/android.hardware.media.c2@1.2-default-service.rc
@@ -3,5 +3,5 @@
user mediacodec
group camera mediadrm drmrpc
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 82460c9..44a2c5b 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1332,8 +1332,8 @@
}
}
- // set channel-mask
if (config->mDomain & Config::IS_AUDIO) {
+ // set channel-mask
int32_t mask;
if (msg->findInt32(KEY_CHANNEL_MASK, &mask)) {
if (config->mDomain & Config::IS_ENCODER) {
@@ -1342,6 +1342,15 @@
config->mOutputFormat->setInt32(KEY_CHANNEL_MASK, mask);
}
}
+
+ // set PCM encoding
+ int32_t pcmEncoding = kAudioEncodingPcm16bit;
+ msg->findInt32(KEY_PCM_ENCODING, &pcmEncoding);
+ if (encoder) {
+ config->mInputFormat->setInt32("android._config-pcm-encoding", pcmEncoding);
+ } else {
+ config->mOutputFormat->setInt32("android._config-pcm-encoding", pcmEncoding);
+ }
}
std::unique_ptr<C2Param> colorTransferRequestParam;
@@ -1421,6 +1430,10 @@
}
}
+ if (config->mTunneled) {
+ config->mOutputFormat->setInt32("android._tunneled", 1);
+ }
+
ALOGD("setup formats input: %s",
config->mInputFormat->debugString().c_str());
ALOGD("setup formats output: %s",
@@ -1896,9 +1909,11 @@
{
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
+ sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
+ status_t err = OK;
+
if (config->mTunneled && config->mSidebandHandle != nullptr) {
- sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
- status_t err = native_window_set_sideband_stream(
+ err = native_window_set_sideband_stream(
nativeWindow.get(),
const_cast<native_handle_t *>(config->mSidebandHandle->handle()));
if (err != OK) {
@@ -1906,6 +1921,15 @@
nativeWindow.get(), config->mSidebandHandle->handle(), err);
return err;
}
+ } else {
+ // Explicitly reset the sideband handle of the window for
+ // non-tunneled video in case the window was previously used
+ // for a tunneled video playback.
+ err = native_window_set_sideband_stream(nativeWindow.get(), nullptr);
+ if (err != OK) {
+ ALOGE("native_window_set_sideband_stream(nullptr) failed! (err %d).", err);
+ return err;
+ }
}
}
return mChannel->setSurface(surface);
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index e9adfc9..23a326f 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <algorithm>
+#include <atomic>
#include <list>
#include <numeric>
@@ -155,6 +156,7 @@
input->pipelineDelay = 0u;
input->numSlots = kSmoothnessFactor;
input->numExtraSlots = 0u;
+ input->lastFlushIndex = 0u;
}
{
Mutexed<Output>::Locked output(mOutput);
@@ -252,7 +254,7 @@
bool released = input->buffers->releaseBuffer(buffer, nullptr, true);
ALOGV("[%s] queueInputBuffer: buffer copied; %sreleased",
mName, released ? "" : "not ");
- buffer.clear();
+ buffer = copy;
} else {
ALOGW("[%s] queueInputBuffer: failed to copy a buffer; this may cause input "
"buffer starvation on component.", mName);
@@ -280,6 +282,12 @@
}
}
} else if (eos) {
+ Mutexed<Input>::Locked input(mInput);
+ if (input->frameReassembler) {
+ usesFrameReassembler = true;
+ // drain any pending items with eos
+ input->frameReassembler.process(buffer, &items);
+ }
flags |= C2FrameData::FLAG_END_OF_STREAM;
}
if (usesFrameReassembler) {
@@ -339,10 +347,10 @@
} else {
Mutexed<Input>::Locked input(mInput);
bool released = false;
- if (buffer) {
- released = input->buffers->releaseBuffer(buffer, nullptr, true);
- } else if (copy) {
+ if (copy) {
released = input->extraBuffers.releaseSlot(copy, nullptr, true);
+ } else if (buffer) {
+ released = input->buffers->releaseBuffer(buffer, nullptr, true);
}
ALOGV("[%s] queueInputBuffer: buffer%s %sreleased",
mName, (buffer == nullptr) ? "(copy)" : "", released ? "" : "not ");
@@ -1116,6 +1124,7 @@
input->numSlots = numInputSlots;
input->extraBuffers.flush();
input->numExtraSlots = 0u;
+ input->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
if (audioEncoder && encoderFrameSize && sampleRate && channelCount) {
input->frameReassembler.init(
pool,
@@ -1379,6 +1388,12 @@
}
}
}
+
+ int32_t tunneled = 0;
+ if (!outputFormat->findInt32("android._tunneled", &tunneled)) {
+ tunneled = 0;
+ }
+ mTunneled = (tunneled != 0);
}
// Set up pipeline control. This has to be done after mInputBuffers and
@@ -1523,6 +1538,7 @@
ALOGV("[%s] flush", mName);
std::vector<uint64_t> indices;
std::list<std::unique_ptr<C2Work>> configs;
+ mInput.lock()->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
for (const std::unique_ptr<C2Work> &work : flushedWork) {
indices.push_back(work->input.ordinal.frameIndex.peeku());
if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
@@ -1589,12 +1605,18 @@
}
std::shared_ptr<C2Buffer> buffer =
mPipelineWatcher.lock()->onInputBufferReleased(frameIndex, arrayIndex);
- bool newInputSlotAvailable;
+ bool newInputSlotAvailable = false;
{
Mutexed<Input>::Locked input(mInput);
- newInputSlotAvailable = input->buffers->expireComponentBuffer(buffer);
- if (!newInputSlotAvailable) {
- (void)input->extraBuffers.expireComponentBuffer(buffer);
+ if (input->lastFlushIndex >= frameIndex) {
+ ALOGD("[%s] Ignoring stale input buffer done callback: "
+ "last flush index = %lld, frameIndex = %lld",
+ mName, input->lastFlushIndex.peekll(), (long long)frameIndex);
+ } else {
+ newInputSlotAvailable = input->buffers->expireComponentBuffer(buffer);
+ if (!newInputSlotAvailable) {
+ (void)input->extraBuffers.expireComponentBuffer(buffer);
+ }
}
}
if (newInputSlotAvailable) {
@@ -1881,10 +1903,21 @@
}
}
+ bool drop = false;
+ if (worklet->output.flags & C2FrameData::FLAG_DROP_FRAME) {
+ ALOGV("[%s] onWorkDone: drop buffer but keep metadata", mName);
+ drop = true;
+ }
+
if (notifyClient && !buffer && !flags) {
- ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
- mName, work->input.ordinal.frameIndex.peekull());
- notifyClient = false;
+ if (mTunneled && drop && outputFormat) {
+ ALOGV("[%s] onWorkDone: Keep tunneled, drop frame with format change (%lld)",
+ mName, work->input.ordinal.frameIndex.peekull());
+ } else {
+ ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
+ mName, work->input.ordinal.frameIndex.peekull());
+ notifyClient = false;
+ }
}
if (buffer) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 5a2aca2..26eef30 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -273,6 +273,7 @@
size_t numExtraSlots;
uint32_t inputDelay;
uint32_t pipelineDelay;
+ c2_cntr64_t lastFlushIndex;
FrameReassembler frameReassembler;
};
@@ -323,6 +324,8 @@
return mCrypto != nullptr || mDescrambler != nullptr;
}
std::atomic_bool mSendEncryptedInfoBuffer;
+
+ std::atomic_bool mTunneled;
};
// Conversion of a c2_status_t value to a status_t value may depend on the
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 333a2ca..20f2ecf 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -21,6 +21,7 @@
#include <C2PlatformSupport.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/SkipCutBuffer.h>
@@ -33,7 +34,7 @@
namespace {
-sp<GraphicBlockBuffer> AllocateGraphicBuffer(
+sp<GraphicBlockBuffer> AllocateInputGraphicBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const sp<AMessage> &format,
uint32_t pixelFormat,
@@ -45,9 +46,13 @@
return nullptr;
}
+ int64_t usageValue = 0;
+ (void)format->findInt64("android._C2MemoryUsage", &usageValue);
+ C2MemoryUsage fullUsage{usageValue | usage.expected};
+
std::shared_ptr<C2GraphicBlock> block;
c2_status_t err = pool->fetchGraphicBlock(
- width, height, pixelFormat, usage, &block);
+ width, height, pixelFormat, fullUsage, &block);
if (err != C2_OK) {
ALOGD("fetch graphic block failed: %d", err);
return nullptr;
@@ -132,6 +137,7 @@
if (!copy->copy(c2buffer)) {
return nullptr;
}
+ copy->meta()->extend(buffer->meta());
return copy;
}
@@ -199,6 +205,56 @@
mSkipCutBuffer = new SkipCutBuffer(skip, cut, mChannelCount);
}
+bool OutputBuffers::convert(
+ const std::shared_ptr<C2Buffer> &src, sp<Codec2Buffer> *dst) {
+ if (!src || src->data().type() != C2BufferData::LINEAR) {
+ return false;
+ }
+ int32_t configEncoding = kAudioEncodingPcm16bit;
+ int32_t codecEncoding = kAudioEncodingPcm16bit;
+ if (mFormat->findInt32("android._codec-pcm-encoding", &codecEncoding)
+ && mFormat->findInt32("android._config-pcm-encoding", &configEncoding)) {
+ if (mSrcEncoding != codecEncoding || mDstEncoding != configEncoding) {
+ if (codecEncoding != configEncoding) {
+ mDataConverter = AudioConverter::Create(
+ (AudioEncoding)codecEncoding, (AudioEncoding)configEncoding);
+ ALOGD_IF(mDataConverter, "[%s] Converter created from %d to %d",
+ mName, codecEncoding, configEncoding);
+ mFormatWithConverter = mFormat->dup();
+ mFormatWithConverter->setInt32(KEY_PCM_ENCODING, configEncoding);
+ } else {
+ mDataConverter = nullptr;
+ mFormatWithConverter = nullptr;
+ }
+ mSrcEncoding = codecEncoding;
+ mDstEncoding = configEncoding;
+ }
+ if (int encoding; !mFormat->findInt32(KEY_PCM_ENCODING, &encoding)
+ || encoding != mDstEncoding) {
+ }
+ }
+ if (!mDataConverter) {
+ return false;
+ }
+ sp<MediaCodecBuffer> srcBuffer = ConstLinearBlockBuffer::Allocate(mFormat, src);
+ if (!srcBuffer) {
+ return false;
+ }
+ if (!*dst) {
+ *dst = new Codec2Buffer(
+ mFormat,
+ new ABuffer(mDataConverter->targetSize(srcBuffer->size())));
+ }
+ sp<MediaCodecBuffer> dstBuffer = *dst;
+ status_t err = mDataConverter->convert(srcBuffer, dstBuffer);
+ if (err != OK) {
+ ALOGD("[%s] buffer conversion failed: %d", mName, err);
+ return false;
+ }
+ dstBuffer->setFormat(mFormatWithConverter);
+ return true;
+}
+
void OutputBuffers::clearStash() {
mPending.clear();
mReorderStash.clear();
@@ -887,6 +943,10 @@
return nullptr;
}
+ int64_t usageValue = 0;
+ (void)format->findInt64("android._C2MemoryUsage", &usageValue);
+ usage = C2MemoryUsage(usage.expected | usageValue);
+
std::shared_ptr<C2LinearBlock> block;
c2_status_t err = pool->fetchLinearBlock(capacity, usage, &block);
if (err != C2_OK || block == nullptr) {
@@ -1031,7 +1091,7 @@
[pool = mPool, format = mFormat, lbp = mLocalBufferPool, pixelFormat]()
-> sp<Codec2Buffer> {
C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
- return AllocateGraphicBuffer(
+ return AllocateInputGraphicBuffer(
pool, format, pixelFormat, usage, lbp);
});
return std::move(array);
@@ -1042,10 +1102,8 @@
}
sp<Codec2Buffer> GraphicInputBuffers::createNewBuffer() {
- int64_t usageValue = 0;
- (void)mFormat->findInt64("android._C2MemoryUsage", &usageValue);
- C2MemoryUsage usage{usageValue | C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE};
- return AllocateGraphicBuffer(
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ return AllocateInputGraphicBuffer(
mPool, mFormat, extractPixelFormat(mFormat), usage, mLocalBufferPool);
}
@@ -1078,7 +1136,7 @@
return err;
}
c2Buffer->setFormat(mFormat);
- if (!c2Buffer->copy(buffer)) {
+ if (!convert(buffer, &c2Buffer) && !c2Buffer->copy(buffer)) {
ALOGD("[%s] copy buffer failed", mName);
return WOULD_BLOCK;
}
@@ -1194,9 +1252,12 @@
const std::shared_ptr<C2Buffer> &buffer,
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) {
- sp<Codec2Buffer> newBuffer = wrap(buffer);
- if (newBuffer == nullptr) {
- return NO_MEMORY;
+ sp<Codec2Buffer> newBuffer;
+ if (!convert(buffer, &newBuffer)) {
+ newBuffer = wrap(buffer);
+ if (newBuffer == nullptr) {
+ return NO_MEMORY;
+ }
}
newBuffer->setFormat(mFormat);
*index = mImpl.assignSlot(newBuffer);
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index 995d3a4..c8e9930 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -18,9 +18,11 @@
#define CCODEC_BUFFERS_H_
+#include <optional>
#include <string>
#include <C2Config.h>
+#include <DataConverter.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/MediaCodecBuffer.h>
@@ -382,6 +384,14 @@
*/
void submit(const sp<MediaCodecBuffer> &buffer);
+ /**
+ * Apply DataConverter from |src| to |*dst| if needed. If |*dst| is nullptr,
+ * a new buffer is allocated.
+ *
+ * Returns true if conversion was needed and executed; false otherwise.
+ */
+ bool convert(const std::shared_ptr<C2Buffer> &src, sp<Codec2Buffer> *dst);
+
private:
// SkipCutBuffer
int32_t mDelay;
@@ -391,6 +401,12 @@
void setSkipCutBuffer(int32_t skip, int32_t cut);
+ // DataConverter
+ sp<DataConverter> mDataConverter;
+ sp<AMessage> mFormatWithConverter;
+ std::optional<int32_t> mSrcEncoding;
+ std::optional<int32_t> mDstEncoding;
+
// Output stash
// Struct for an entry in the output stash (mPending and mReorderStash)
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index c275187..03418d9 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -657,24 +657,29 @@
add(ConfigMapper(KEY_SAMPLE_RATE, C2_PARAMKEY_CODED_SAMPLE_RATE, "value")
.limitTo(D::AUDIO & D::CODED));
- add(ConfigMapper(KEY_PCM_ENCODING, C2_PARAMKEY_PCM_ENCODING, "value")
+ auto pcmEncodingMapper = [](C2Value v) -> C2Value {
+ int32_t value;
+ C2Config::pcm_encoding_t to;
+ if (v.get(&value) && C2Mapper::map(value, &to)) {
+ return to;
+ }
+ return C2Value();
+ };
+ auto pcmEncodingReverse = [](C2Value v) -> C2Value {
+ C2Config::pcm_encoding_t value;
+ int32_t to;
+ using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
+ if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
+ return to;
+ }
+ return C2Value();
+ };
+ add(ConfigMapper(KEY_PCM_ENCODING, C2_PARAMKEY_PCM_ENCODING, "value")
.limitTo(D::AUDIO)
- .withMappers([](C2Value v) -> C2Value {
- int32_t value;
- C2Config::pcm_encoding_t to;
- if (v.get(&value) && C2Mapper::map(value, &to)) {
- return to;
- }
- return C2Value();
- }, [](C2Value v) -> C2Value {
- C2Config::pcm_encoding_t value;
- int32_t to;
- using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(value)>::type;
- if (v.get((C2ValueType*)&value) && C2Mapper::map(value, &to)) {
- return to;
- }
- return C2Value();
- }));
+ .withMappers(pcmEncodingMapper, pcmEncodingReverse));
+ add(ConfigMapper("android._codec-pcm-encoding", C2_PARAMKEY_PCM_ENCODING, "value")
+ .limitTo(D::AUDIO & D::READ)
+ .withMappers(pcmEncodingMapper, pcmEncodingReverse));
add(ConfigMapper(KEY_IS_ADTS, C2_PARAMKEY_AAC_PACKAGING, "value")
.limitTo(D::AUDIO & D::CODED)
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 7c4bfb6..67d7ed2 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -96,9 +96,12 @@
return false;
}
- // determine if codec supports HDR
+ // determine if codec supports HDR; imply 10-bit support
bool supportsHdr = false;
+ // determine if codec supports HDR10Plus; imply 10-bit support
bool supportsHdr10Plus = false;
+ // determine if codec supports 10-bit format
+ bool supports10Bit = false;
std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
c2_status_t err1 = intf->querySupportedParams(¶mDescs);
@@ -126,6 +129,10 @@
supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
supportsHdr |= (mediaType == MIMETYPE_VIDEO_AV1);
+ // HDR support implies 10-bit support.
+ // TODO: directly check this from the component interface
+ supports10Bit = (supportsHdr || supportsHdr10Plus);
+
bool added = false;
for (C2Value::Primitive profile : profileQuery[0].values.values) {
@@ -165,6 +172,12 @@
}
}
}
+ if (supports10Bit) {
+ auto bitnessMapper = C2Mapper::GetBitDepthProfileLevelMapper(trait.mediaType, 10);
+ if (bitnessMapper && bitnessMapper->mapProfile(pl.profile, &sdkProfile)) {
+ caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+ }
+ }
} else if (!mapper) {
caps->addProfileLevel(pl.profile, pl.level);
}
diff --git a/media/codec2/sfplugin/FrameReassembler.cpp b/media/codec2/sfplugin/FrameReassembler.cpp
index af054c7..cb8b6ab 100644
--- a/media/codec2/sfplugin/FrameReassembler.cpp
+++ b/media/codec2/sfplugin/FrameReassembler.cpp
@@ -88,8 +88,7 @@
const sp<MediaCodecBuffer> &buffer,
std::list<std::unique_ptr<C2Work>> *items) {
int64_t timeUs;
- if (buffer->size() == 0u
- || !buffer->meta()->findInt64("timeUs", &timeUs)) {
+ if (!buffer->meta()->findInt64("timeUs", &timeUs)) {
return C2_BAD_VALUE;
}
diff --git a/media/codec2/sfplugin/tests/FrameReassembler_test.cpp b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
index 6738ee7..0be934a 100644
--- a/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
+++ b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
@@ -53,7 +53,8 @@
C2Config::pcm_encoding_t encoding,
size_t inputFrameSizeInBytes,
size_t count,
- size_t expectedOutputSize) {
+ size_t expectedOutputSize,
+ bool separateEos) {
FrameReassembler frameReassembler;
frameReassembler.init(
mPool,
@@ -67,7 +68,7 @@
size_t inputIndex = 0, outputIndex = 0;
size_t expectCount = 0;
- for (size_t i = 0; i < count; ++i) {
+ for (size_t i = 0; i < count + (separateEos ? 1 : 0); ++i) {
sp<MediaCodecBuffer> buffer = new MediaCodecBuffer(
new AMessage, new ABuffer(inputFrameSizeInBytes));
buffer->setRange(0, inputFrameSizeInBytes);
@@ -77,8 +78,12 @@
if (i == count - 1) {
buffer->meta()->setInt32("eos", 1);
}
- for (size_t j = 0; j < inputFrameSizeInBytes; ++j, ++inputIndex) {
- buffer->base()[j] = (inputIndex & 0xFF);
+ if (i == count && separateEos) {
+ buffer->setRange(0, 0);
+ } else {
+ for (size_t j = 0; j < inputFrameSizeInBytes; ++j, ++inputIndex) {
+ buffer->base()[j] = (inputIndex & 0xFF);
+ }
}
std::list<std::unique_ptr<C2Work>> items;
ASSERT_EQ(C2_OK, frameReassembler.process(buffer, &items));
@@ -105,7 +110,8 @@
ASSERT_EQ(encoderFrameSize * BytesPerSample(encoding), view.capacity());
for (size_t j = 0; j < view.capacity(); ++j, ++outputIndex) {
ASSERT_TRUE(outputIndex < inputIndex
- || inputIndex == inputFrameSizeInBytes * count);
+ || inputIndex == inputFrameSizeInBytes * count)
+ << "inputIndex = " << inputIndex << " outputIndex = " << outputIndex;
uint8_t expected = outputIndex < inputIndex ? (outputIndex & 0xFF) : 0;
if (expectCount < 10) {
++expectCount;
@@ -137,204 +143,239 @@
// Push frames with exactly the same size as the encoder requested.
TEST_F(FrameReassemblerTest, PushExactFrameSize) {
ASSERT_EQ(OK, initStatus());
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_8,
- 1024 /* input frame size in bytes = 1024 samples * 1 channel * 1 bytes/sample */,
- 10 /* count */,
- 10240 /* expected output size = 10 * 1024 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_16,
- 2048 /* input frame size in bytes = 1024 samples * 1 channel * 2 bytes/sample */,
- 10 /* count */,
- 20480 /* expected output size = 10 * 2048 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_FLOAT,
- 4096 /* input frame size in bytes = 1024 samples * 1 channel * 4 bytes/sample */,
- 10 /* count */,
- 40960 /* expected output size = 10 * 4096 bytes/frame */);
+ for (bool separateEos : {false, true}) {
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1024 /* input frame size in bytes = 1024 samples * 1 channel * 1 bytes/sample */,
+ 10 /* count */,
+ 10240 /* expected output size = 10 * 1024 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 2048 /* input frame size in bytes = 1024 samples * 1 channel * 2 bytes/sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 10 * 2048 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 4096 /* input frame size in bytes = 1024 samples * 1 channel * 4 bytes/sample */,
+ 10 /* count */,
+ 40960 /* expected output size = 10 * 4096 bytes/frame */,
+ separateEos);
+ }
}
// Push frames with half the size that the encoder requested.
TEST_F(FrameReassemblerTest, PushHalfFrameSize) {
ASSERT_EQ(OK, initStatus());
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_8,
- 512 /* input frame size in bytes = 512 samples * 1 channel * 1 bytes per sample */,
- 10 /* count */,
- 5120 /* expected output size = 5 * 1024 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_16,
- 1024 /* input frame size in bytes = 512 samples * 1 channel * 2 bytes per sample */,
- 10 /* count */,
- 10240 /* expected output size = 5 * 2048 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_FLOAT,
- 2048 /* input frame size in bytes = 512 samples * 1 channel * 4 bytes per sample */,
- 10 /* count */,
- 20480 /* expected output size = 5 * 4096 bytes/frame */);
+ for (bool separateEos : {false, true}) {
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 512 /* input frame size in bytes = 512 samples * 1 channel * 1 bytes/sample */,
+ 10 /* count */,
+ 5120 /* expected output size = 5 * 1024 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 1024 /* input frame size in bytes = 512 samples * 1 channel * 2 bytes/sample */,
+ 10 /* count */,
+ 10240 /* expected output size = 5 * 2048 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 2048 /* input frame size in bytes = 512 samples * 1 channel * 4 bytes/sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 5 * 4096 bytes/frame */,
+ separateEos);
+ }
}
// Push frames with twice the size that the encoder requested.
TEST_F(FrameReassemblerTest, PushDoubleFrameSize) {
ASSERT_EQ(OK, initStatus());
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_8,
- 2048 /* input frame size in bytes = 2048 samples * 1 channel * 1 bytes per sample */,
- 10 /* count */,
- 20480 /* expected output size = 20 * 1024 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_16,
- 4096 /* input frame size in bytes = 2048 samples * 1 channel * 2 bytes per sample */,
- 10 /* count */,
- 40960 /* expected output size = 20 * 2048 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_FLOAT,
- 8192 /* input frame size in bytes = 2048 samples * 1 channel * 4 bytes per sample */,
- 10 /* count */,
- 81920 /* expected output size = 20 * 4096 bytes/frame */);
+ for (bool separateEos : {false, true}) {
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 2048 /* input frame size in bytes = 2048 samples * 1 channel * 1 bytes/sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 20 * 1024 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 4096 /* input frame size in bytes = 2048 samples * 1 channel * 2 bytes/sample */,
+ 10 /* count */,
+ 40960 /* expected output size = 20 * 2048 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 8192 /* input frame size in bytes = 2048 samples * 1 channel * 4 bytes/sample */,
+ 10 /* count */,
+ 81920 /* expected output size = 20 * 4096 bytes/frame */,
+ separateEos);
+ }
}
// Push frames with a little bit larger (+5 samples) than the requested size.
TEST_F(FrameReassemblerTest, PushLittleLargerFrameSize) {
ASSERT_EQ(OK, initStatus());
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_8,
- 1029 /* input frame size in bytes = 1029 samples * 1 channel * 1 bytes per sample */,
- 10 /* count */,
- 11264 /* expected output size = 11 * 1024 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_16,
- 2058 /* input frame size in bytes = 1029 samples * 1 channel * 2 bytes per sample */,
- 10 /* count */,
- 22528 /* expected output size = 11 * 2048 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_FLOAT,
- 4116 /* input frame size in bytes = 1029 samples * 1 channel * 4 bytes per sample */,
- 10 /* count */,
- 45056 /* expected output size = 11 * 4096 bytes/frame */);
+ for (bool separateEos : {false, true}) {
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1029 /* input frame size in bytes = 1029 samples * 1 channel * 1 bytes/sample */,
+ 10 /* count */,
+ 11264 /* expected output size = 11 * 1024 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 2058 /* input frame size in bytes = 1029 samples * 1 channel * 2 bytes/sample */,
+ 10 /* count */,
+ 22528 /* expected output size = 11 * 2048 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 4116 /* input frame size in bytes = 1029 samples * 1 channel * 4 bytes/sample */,
+ 10 /* count */,
+ 45056 /* expected output size = 11 * 4096 bytes/frame */,
+ separateEos);
+ }
}
// Push frames with a little bit smaller (-5 samples) than the requested size.
TEST_F(FrameReassemblerTest, PushLittleSmallerFrameSize) {
ASSERT_EQ(OK, initStatus());
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_8,
- 1019 /* input frame size in bytes = 1019 samples * 1 channel * 1 bytes per sample */,
- 10 /* count */,
- 10240 /* expected output size = 10 * 1024 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_16,
- 2038 /* input frame size in bytes = 1019 samples * 1 channel * 2 bytes per sample */,
- 10 /* count */,
- 20480 /* expected output size = 10 * 2048 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_FLOAT,
- 4076 /* input frame size in bytes = 1019 samples * 1 channel * 4 bytes per sample */,
- 10 /* count */,
- 40960 /* expected output size = 10 * 4096 bytes/frame */);
+ for (bool separateEos : {false, true}) {
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1019 /* input frame size in bytes = 1019 samples * 1 channel * 1 bytes/sample */,
+ 10 /* count */,
+ 10240 /* expected output size = 10 * 1024 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 2038 /* input frame size in bytes = 1019 samples * 1 channel * 2 bytes/sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 10 * 2048 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 4076 /* input frame size in bytes = 1019 samples * 1 channel * 4 bytes/sample */,
+ 10 /* count */,
+ 40960 /* expected output size = 10 * 4096 bytes/frame */,
+ separateEos);
+ }
}
// Push single-byte frames
TEST_F(FrameReassemblerTest, PushSingleByte) {
ASSERT_EQ(OK, initStatus());
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_8,
- 1 /* input frame size in bytes */,
- 100000 /* count */,
- 100352 /* expected output size = 98 * 1024 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_16,
- 1 /* input frame size in bytes */,
- 100000 /* count */,
- 100352 /* expected output size = 49 * 2048 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_FLOAT,
- 1 /* input frame size in bytes */,
- 100000 /* count */,
- 102400 /* expected output size = 25 * 4096 bytes/frame */);
+ for (bool separateEos : {false, true}) {
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1 /* input frame size in bytes */,
+ 100000 /* count */,
+ 100352 /* expected output size = 98 * 1024 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 1 /* input frame size in bytes */,
+ 100000 /* count */,
+ 100352 /* expected output size = 49 * 2048 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 1 /* input frame size in bytes */,
+ 100000 /* count */,
+ 102400 /* expected output size = 25 * 4096 bytes/frame */,
+ separateEos);
+ }
}
// Push one big chunk.
TEST_F(FrameReassemblerTest, PushBigChunk) {
ASSERT_EQ(OK, initStatus());
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_8,
- 100000 /* input frame size in bytes */,
- 1 /* count */,
- 100352 /* expected output size = 98 * 1024 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_16,
- 100000 /* input frame size in bytes */,
- 1 /* count */,
- 100352 /* expected output size = 49 * 2048 bytes/frame */);
- testPushSameSize(
- 1024 /* frame size in samples */,
- 48000 /* sample rate */,
- 1 /* channel count */,
- PCM_FLOAT,
- 100000 /* input frame size in bytes */,
- 1 /* count */,
- 102400 /* expected output size = 25 * 4096 bytes/frame */);
+ for (bool separateEos : {false, true}) {
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 100000 /* input frame size in bytes */,
+ 1 /* count */,
+ 100352 /* expected output size = 98 * 1024 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 100000 /* input frame size in bytes */,
+ 1 /* count */,
+ 100352 /* expected output size = 49 * 2048 bytes/frame */,
+ separateEos);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 100000 /* input frame size in bytes */,
+ 1 /* count */,
+ 102400 /* expected output size = 25 * 4096 bytes/frame */,
+ separateEos);
+ }
}
} // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 4d939fa..ca6a328 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -381,15 +381,17 @@
{ C2Config::LEVEL_AV1_7_3, AV1Level73 },
};
-
ALookup<C2Config::profile_t, int32_t> sAv1Profiles = {
- // TODO: will need to disambiguate between Main8 and Main10
{ C2Config::PROFILE_AV1_0, AV1ProfileMain8 },
{ C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
{ C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
{ C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10Plus },
};
+ALookup<C2Config::profile_t, int32_t> sAv1TenbitProfiles = {
+ { C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
+};
+
ALookup<C2Config::profile_t, int32_t> sAv1HdrProfiles = {
{ C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
};
@@ -603,9 +605,9 @@
};
struct Av1ProfileLevelMapper : ProfileLevelMapperHelper {
- Av1ProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false) :
+ Av1ProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false, int32_t bitDepth = 8) :
ProfileLevelMapperHelper(),
- mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus) {}
+ mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus), mBitDepth(bitDepth) {}
virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
return sAv1Levels.map(from, to);
@@ -614,19 +616,22 @@
return sAv1Levels.map(from, to);
}
virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
- return mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
- mIsHdr ? sAv1HdrProfiles.map(from, to) :
- sAv1Profiles.map(from, to);
+ return (mBitDepth == 10) ? sAv1TenbitProfiles.map(from, to) :
+ mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
+ mIsHdr ? sAv1HdrProfiles.map(from, to) :
+ sAv1Profiles.map(from, to);
}
virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
- return mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
- mIsHdr ? sAv1HdrProfiles.map(from, to) :
- sAv1Profiles.map(from, to);
+ return (mBitDepth == 10) ? sAv1TenbitProfiles.map(from, to) :
+ mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
+ mIsHdr ? sAv1HdrProfiles.map(from, to) :
+ sAv1Profiles.map(from, to);
}
private:
bool mIsHdr;
bool mIsHdr10Plus;
+ int32_t mBitDepth;
};
} // namespace
@@ -674,6 +679,18 @@
}
// static
+std::shared_ptr<C2Mapper::ProfileLevelMapper>
+C2Mapper::GetBitDepthProfileLevelMapper(std::string mediaType, int32_t bitDepth) {
+ std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
+ if (bitDepth == 8) {
+ return GetProfileLevelMapper(mediaType);
+ } else if (mediaType == MIMETYPE_VIDEO_AV1 && bitDepth == 10) {
+ return std::make_shared<Av1ProfileLevelMapper>(false, false, bitDepth);
+ }
+ return nullptr;
+}
+
+// static
bool C2Mapper::map(C2Config::bitrate_mode_t from, int32_t *to) {
return sBitrateModes.map(from, to);
}
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.h b/media/codec2/sfplugin/utils/Codec2Mapper.h
index 797c8a8..33d305e 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.h
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.h
@@ -43,6 +43,9 @@
static std::shared_ptr<ProfileLevelMapper>
GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus = false);
+ static std::shared_ptr<ProfileLevelMapper>
+ GetBitDepthProfileLevelMapper(std::string mediaType, int32_t bitDepth = 8);
+
// convert between bitrates
static bool map(C2Config::bitrate_mode_t, int32_t*);
static bool map(int32_t, C2Config::bitrate_mode_t*);
diff --git a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
index 00b2ab6..b295258 100644
--- a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
+++ b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
@@ -501,13 +501,16 @@
/* check frame rate */
for (i = 0; i < encParams->nLayers; i++)
{
+ if (encOption->encFrameRate[i] <= 0. || encOption->encFrameRate[i] > 120)
+ {
+ goto CLEAN_UP;
+ }
encParams->LayerFrameRate[i] = encOption->encFrameRate[i];
}
if (encParams->nLayers > 1)
{
- if (encOption->encFrameRate[0] == encOption->encFrameRate[1] ||
- encOption->encFrameRate[0] == 0. || encOption->encFrameRate[1] == 0.) /* 7/31/03 */
+ if (encOption->encFrameRate[0] == encOption->encFrameRate[1])
goto CLEAN_UP;
}
/* set max frame rate */
diff --git a/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp b/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
index 912c821..5e613d9 100644
--- a/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
+++ b/media/codecs/m4v_h263/fuzzer/mpeg4_h263_dec_fuzzer.cpp
@@ -50,7 +50,7 @@
private:
tagvideoDecControls *mDecHandle = nullptr;
- uint8_t *mOutputBuffer[kNumOutputBuffers];
+ uint8_t *mOutputBuffer[kNumOutputBuffers] = {};
bool mInitialized = false;
bool mFramesConfigured = false;
#ifdef MPEG4
diff --git a/media/extractors/Android.bp b/media/extractors/Android.bp
index 7513cb1..66585da 100644
--- a/media/extractors/Android.bp
+++ b/media/extractors/Android.bp
@@ -24,10 +24,6 @@
cc_defaults {
name: "extractor-defaults",
- include_dirs: [
- "frameworks/av/media/libstagefright/include",
- ],
-
shared_libs: [
"liblog",
],
diff --git a/media/extractors/TEST_MAPPING b/media/extractors/TEST_MAPPING
index 4984b8f..a7c2cfe 100644
--- a/media/extractors/TEST_MAPPING
+++ b/media/extractors/TEST_MAPPING
@@ -1,6 +1,9 @@
{
"presubmit": [
+ {
+ "name": "CtsMediaTranscodingTestCases"
+ }
// TODO(b/153661591) enable test once the bug is fixed
// This tests the extractor path
// {
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
index 7bf3a13..a926422 100644
--- a/media/extractors/aac/Android.bp
+++ b/media/extractors/aac/Android.bp
@@ -21,6 +21,10 @@
srcs: ["AACExtractor.cpp"],
+ export_include_dirs: [
+ "include",
+ ],
+
static_libs: [
"libstagefright_foundation",
"libstagefright_metadatautils",
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/include/AACExtractor.h
similarity index 100%
rename from media/extractors/aac/AACExtractor.h
rename to media/extractors/aac/include/AACExtractor.h
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
index 712360d..121b7a3 100644
--- a/media/extractors/amr/Android.bp
+++ b/media/extractors/amr/Android.bp
@@ -21,6 +21,10 @@
srcs: ["AMRExtractor.cpp"],
+ export_include_dirs: [
+ "include",
+ ],
+
static_libs: [
"libstagefright_foundation",
],
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/include/AMRExtractor.h
similarity index 100%
rename from media/extractors/amr/AMRExtractor.h
rename to media/extractors/amr/include/AMRExtractor.h
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 9a2a76b..fd51622 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -23,8 +23,8 @@
srcs: ["FLACExtractor.cpp"],
- include_dirs: [
- "external/flac/include",
+ export_include_dirs: [
+ "include",
],
shared_libs: [
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/include/FLACExtractor.h
similarity index 100%
rename from media/extractors/flac/FLACExtractor.h
rename to media/extractors/flac/include/FLACExtractor.h
diff --git a/media/extractors/fuzzers/Android.bp b/media/extractors/fuzzers/Android.bp
index 0e54b58..490e195 100644
--- a/media/extractors/fuzzers/Android.bp
+++ b/media/extractors/fuzzers/Android.bp
@@ -80,11 +80,6 @@
defaults: ["extractor-fuzzer-defaults"],
host_supported: true,
- include_dirs: [
- "frameworks/av/media/extractors/mpeg2",
- "frameworks/av/media/libstagefright",
- ],
-
static_libs: [
"libstagefright_foundation_without_imemory",
"libstagefright_mpeg2support",
@@ -124,14 +119,6 @@
"mp4_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/mp4",
- ],
-
- header_libs: [
- "libaudioclient_headers",
- ],
-
static_libs: [
"libstagefright_id3",
"libstagefright_esds",
@@ -150,10 +137,6 @@
"wav_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/wav",
- ],
-
static_libs: [
"libfifo",
"libwavextractor",
@@ -173,10 +156,6 @@
"amr_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/amr",
- ],
-
static_libs: [
"libamrextractor",
],
@@ -193,10 +172,6 @@
"mkv_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/mkv",
- ],
-
static_libs: [
"libwebm",
"libstagefright_flacdec",
@@ -217,9 +192,6 @@
"ogg_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/ogg",
- ],
static_libs: [
"libstagefright_metadatautils",
@@ -265,10 +237,6 @@
"mp3_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/mp3",
- ],
-
static_libs: [
"libfifo",
"libmp3extractor",
@@ -285,10 +253,6 @@
"aac_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/aac",
- ],
-
static_libs: [
"libaacextractor",
"libstagefright_metadatautils",
@@ -304,10 +268,6 @@
"flac_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/flac",
- ],
-
static_libs: [
"libstagefright_metadatautils",
"libFLAC",
@@ -329,10 +289,6 @@
"midi_extractor_fuzzer.cpp",
],
- include_dirs: [
- "frameworks/av/media/extractors/midi",
- ],
-
static_libs: [
"libsonivox",
"libmedia_midiiowrapper",
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index 08a6fa0..feabf9e 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -23,6 +23,10 @@
srcs: ["MidiExtractor.cpp"],
+ export_include_dirs: [
+ "include",
+ ],
+
header_libs: [
"libmedia_datasource_headers",
],
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/include/MidiExtractor.h
similarity index 100%
rename from media/extractors/midi/MidiExtractor.h
rename to media/extractors/midi/include/MidiExtractor.h
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 54c5b27..98ce305 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -21,10 +21,8 @@
srcs: ["MatroskaExtractor.cpp"],
- include_dirs: [
- "external/flac/include",
- "external/libvpx/libwebm",
- "frameworks/av/media/libstagefright/flac/dec",
+ export_include_dirs: [
+ "include",
],
shared_libs: [
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/include/MatroskaExtractor.h
similarity index 100%
rename from media/extractors/mkv/MatroskaExtractor.h
rename to media/extractors/mkv/include/MatroskaExtractor.h
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
index 75b9b7b..396a13a 100644
--- a/media/extractors/mp3/Android.bp
+++ b/media/extractors/mp3/Android.bp
@@ -16,6 +16,10 @@
"XINGSeeker.cpp",
],
+ export_include_dirs: [
+ "include",
+ ],
+
static_libs: [
"libutils",
"libstagefright_id3",
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/include/MP3Extractor.h
similarity index 100%
rename from media/extractors/mp3/MP3Extractor.h
rename to media/extractors/mp3/include/MP3Extractor.h
diff --git a/media/extractors/mp3/MP3Seeker.h b/media/extractors/mp3/include/MP3Seeker.h
similarity index 100%
rename from media/extractors/mp3/MP3Seeker.h
rename to media/extractors/mp3/include/MP3Seeker.h
diff --git a/media/extractors/mp3/VBRISeeker.h b/media/extractors/mp3/include/VBRISeeker.h
similarity index 100%
rename from media/extractors/mp3/VBRISeeker.h
rename to media/extractors/mp3/include/VBRISeeker.h
diff --git a/media/extractors/mp3/XINGSeeker.h b/media/extractors/mp3/include/XINGSeeker.h
similarity index 100%
rename from media/extractors/mp3/XINGSeeker.h
rename to media/extractors/mp3/include/XINGSeeker.h
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 7fa6bfd..540d75d 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -15,6 +15,15 @@
],
}
+cc_library_headers {
+ name: "libmp4extractor_headers",
+ host_supported: true,
+
+ export_include_dirs: [
+ "include",
+ ],
+}
+
cc_library {
name: "libmp4extractor",
defaults: ["extractor-defaults"],
@@ -27,6 +36,10 @@
"SampleTable.cpp",
],
+ export_include_dirs: [
+ "include",
+ ],
+
static_libs: [
"libstagefright_esds",
"libstagefright_foundation",
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/include/AC4Parser.h
similarity index 100%
rename from media/extractors/mp4/AC4Parser.h
rename to media/extractors/mp4/include/AC4Parser.h
diff --git a/media/extractors/mp4/ItemTable.h b/media/extractors/mp4/include/ItemTable.h
similarity index 100%
rename from media/extractors/mp4/ItemTable.h
rename to media/extractors/mp4/include/ItemTable.h
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/include/MPEG4Extractor.h
similarity index 100%
rename from media/extractors/mp4/MPEG4Extractor.h
rename to media/extractors/mp4/include/MPEG4Extractor.h
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/include/SampleIterator.h
similarity index 100%
rename from media/extractors/mp4/SampleIterator.h
rename to media/extractors/mp4/include/SampleIterator.h
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/include/SampleTable.h
similarity index 100%
rename from media/extractors/mp4/SampleTable.h
rename to media/extractors/mp4/include/SampleTable.h
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 7e6247b..8faecae 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -38,6 +38,10 @@
"MPEG2TSExtractor.cpp",
],
+ export_include_dirs: [
+ "include",
+ ],
+
shared_libs: [
"libbase",
"libcgrouprc#29",
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/include/MPEG2PSExtractor.h
similarity index 100%
rename from media/extractors/mpeg2/MPEG2PSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2PSExtractor.h
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/include/MPEG2TSExtractor.h
similarity index 100%
rename from media/extractors/mpeg2/MPEG2TSExtractor.h
rename to media/extractors/mpeg2/include/MPEG2TSExtractor.h
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index d7540c4..dc3c25c 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -22,8 +22,8 @@
srcs: ["OggExtractor.cpp"],
- include_dirs: [
- "external/tremolo",
+ export_include_dirs: [
+ "include",
],
header_libs: [
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/include/OggExtractor.h
similarity index 100%
rename from media/extractors/ogg/OggExtractor.h
rename to media/extractors/ogg/include/OggExtractor.h
diff --git a/media/extractors/tests/Android.bp b/media/extractors/tests/Android.bp
index 23c74f7..3c3bbdc 100644
--- a/media/extractors/tests/Android.bp
+++ b/media/extractors/tests/Android.bp
@@ -79,11 +79,6 @@
"libbase",
],
- include_dirs: [
- "frameworks/av/media/extractors/",
- "frameworks/av/media/libstagefright/",
- ],
-
compile_multilib: "first",
cflags: [
diff --git a/media/extractors/tests/ExtractorUnitTest.cpp b/media/extractors/tests/ExtractorUnitTest.cpp
index 84ec1f2..2bd9c6a 100644
--- a/media/extractors/tests/ExtractorUnitTest.cpp
+++ b/media/extractors/tests/ExtractorUnitTest.cpp
@@ -27,18 +27,18 @@
#include <media/stagefright/MetaDataUtils.h>
#include <media/stagefright/foundation/OpusHeader.h>
-#include "aac/AACExtractor.h"
-#include "amr/AMRExtractor.h"
-#include "flac/FLACExtractor.h"
-#include "midi/MidiExtractor.h"
-#include "mkv/MatroskaExtractor.h"
-#include "mp3/MP3Extractor.h"
-#include "mp4/MPEG4Extractor.h"
-#include "mp4/SampleTable.h"
-#include "mpeg2/MPEG2PSExtractor.h"
-#include "mpeg2/MPEG2TSExtractor.h"
-#include "ogg/OggExtractor.h"
-#include "wav/WAVExtractor.h"
+#include <AACExtractor.h>
+#include <AMRExtractor.h>
+#include <FLACExtractor.h>
+#include <MidiExtractor.h>
+#include <MatroskaExtractor.h>
+#include <MP3Extractor.h>
+#include <MPEG4Extractor.h>
+#include <SampleTable.h>
+#include <MPEG2PSExtractor.h>
+#include <MPEG2TSExtractor.h>
+#include <OggExtractor.h>
+#include <WAVExtractor.h>
#include "ExtractorUnitTestEnvironment.h"
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index cc5e1c7..b7e2af3 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -22,8 +22,8 @@
srcs: ["WAVExtractor.cpp"],
- include_dirs: [
- "frameworks/av/media/libstagefright/include",
+ export_include_dirs: [
+ "include",
],
shared_libs: [
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/include/WAVExtractor.h
similarity index 100%
rename from media/extractors/wav/WAVExtractor.h
rename to media/extractors/wav/include/WAVExtractor.h
diff --git a/media/janitors/codec_OWNERS b/media/janitors/codec_OWNERS
index e201399..d4ee51b 100644
--- a/media/janitors/codec_OWNERS
+++ b/media/janitors/codec_OWNERS
@@ -2,4 +2,4 @@
# differentiated from plugins connecting those codecs to either omx or codec2 infrastructure
essick@google.com
lajos@google.com
-marcone@google.com
+wonsik@google.com
diff --git a/media/libaaudio/fuzzer/Android.bp b/media/libaaudio/fuzzer/Android.bp
new file mode 100644
index 0000000..e2eec7a
--- /dev/null
+++ b/media/libaaudio/fuzzer/Android.bp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_fuzz {
+ name: "libaaudio_fuzzer",
+ srcs: [
+ "libaaudio_fuzzer.cpp",
+ ],
+ header_libs: [
+ "libaaudio_headers",
+ ],
+ shared_libs: [
+ "libbinder",
+ "libaudiomanager",
+ "libaudiopolicy",
+ "libaudioclient_aidl_conversion",
+ ],
+ static_libs: [
+ "android.media.audio.common.types-V1-cpp",
+ "liblog",
+ "libutils",
+ "libcutils",
+ "libaaudio",
+ "libjsoncpp",
+ "libbase_ndk",
+ "libcgrouprc",
+ "libaudioutils",
+ "libaudioclient",
+ "aaudio-aidl-cpp",
+ "libmedia_helper",
+ "libmediametrics",
+ "libprocessgroup",
+ "av-types-aidl-cpp",
+ "libaaudio_internal",
+ "libcgrouprc_format",
+ "audiopolicy-aidl-cpp",
+ "audioflinger-aidl-cpp",
+ "audiopolicy-types-aidl-cpp",
+ "audioclient-types-aidl-cpp",
+ "shared-file-region-aidl-cpp",
+ "framework-permission-aidl-cpp",
+ "mediametricsservice-aidl-cpp",
+ ],
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
diff --git a/media/libaaudio/fuzzer/README.md b/media/libaaudio/fuzzer/README.md
new file mode 100644
index 0000000..4ba15c5
--- /dev/null
+++ b/media/libaaudio/fuzzer/README.md
@@ -0,0 +1,77 @@
+# Fuzzer for libaaudio
+
+## Plugin Design Considerations
+The fuzzer plugin for `libaaudio` are designed based on the understanding of the
+source code and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+Fuzzers assigns values to the following parameters to pass on to libaaudio:
+1. Device Id (parameter name: `deviceId`)
+2. Sampling Rate (parameter name: `sampleRate`)
+3. Number of channels (parameter name: `channelCount`)
+4. Audio Travel Direction (parameter name: `direction`)
+5. Audio Format (parameter name: `format`)
+6. Audio Sharing Mode (parameter name: `sharingMode`)
+7. Audio Usage (parameter name: `usage`)
+8. Audio Content type (parameter name: `contentType`)
+9. Audio Input Preset (parameter name: `inputPreset`)
+10. Audio Privacy Sensitivity (parameter name: `privacySensitive`)
+11. Buffer Capacity In Frames (parameter name: `frames`)
+12. Performance Mode (parameter name: `mode`)
+13. Allowed Capture Policy (parameter name: `allowedCapturePolicy`)
+14. Session Id (parameter name: `sessionId`)
+15. Frames per Data Callback (parameter name: `framesPerDataCallback`)
+16. MMap Policy (parameter name: `policy`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `deviceId` | Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `sampleRate` | Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `channelCount` | Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `direction` | 0. `AAUDIO_DIRECTION_OUTPUT` 1. `AAUDIO_DIRECTION_INPUT` | Value obtained from FuzzedDataProvider |
+| `format` | 0. `AAUDIO_FORMAT_INVALID` 1. `AAUDIO_FORMAT_UNSPECIFIED` 2. `AAUDIO_FORMAT_PCM_I16` 3. `AAUDIO_FORMAT_PCM_FLOAT` | Value obtained from FuzzedDataProvider |
+| `sharingMode` | 0. `AAUDIO_SHARING_MODE_EXCLUSIVE` 1. `AAUDIO_SHARING_MODE_SHARED` | Value obtained from FuzzedDataProvider |
+| `usage` | 0. `AAUDIO_USAGE_MEDIA` 1. `AAUDIO_USAGE_VOICE_COMMUNICATION` 2. `AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING` 3. `AAUDIO_USAGE_ALARM` 4. `AAUDIO_USAGE_NOTIFICATION` 5. `AAUDIO_USAGE_NOTIFICATION_RINGTONE` 6. `AAUDIO_USAGE_NOTIFICATION_EVENT` 7. `AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY` 8. `AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE` 9. `AAUDIO_USAGE_ASSISTANCE_SONIFICATION` 10. `AAUDIO_USAGE_GAME` 11. `AAUDIO_USAGE_ASSISTANT` 12. `AAUDIO_SYSTEM_USAGE_EMERGENCY` 13. `AAUDIO_SYSTEM_USAGE_SAFETY` 14. `AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS` 15. `AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT` | Value obtained from FuzzedDataProvider |
+| `contentType` | 0. `AAUDIO_CONTENT_TYPE_SPEECH` 1. `AAUDIO_CONTENT_TYPE_MUSIC` 2. `AAUDIO_CONTENT_TYPE_MOVIE` 3. `AAUDIO_CONTENT_TYPE_SONIFICATION` | Value obtained from FuzzedDataProvider |
+| `inputPreset` | 0. `AAUDIO_INPUT_PRESET_GENERIC` 1. `AAUDIO_INPUT_PRESET_CAMCORDER` 2. `AAUDIO_INPUT_PRESET_VOICE_RECOGNITION` 3. `AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION` 4. `AAUDIO_INPUT_PRESET_UNPROCESSED` 5. `AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE` | Value obtained from FuzzedDataProvider |
+| `privacySensitive` | 0. `true` 1. `false` | Value obtained from FuzzedDataProvider |
+| `frames` | Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `mode` | 0. `AAUDIO_PERFORMANCE_MODE_NONE` 1. `AAUDIO_PERFORMANCE_MODE_POWER_SAVING` 2. `AAUDIO_PERFORMANCE_MODE_LOW_LATENCY` | Value obtained from FuzzedDataProvider |
+| `allowedCapturePolicy` | 0. `AAUDIO_ALLOW_CAPTURE_BY_ALL` 1. `AAUDIO_ALLOW_CAPTURE_BY_SYSTEM` 2. `AAUDIO_ALLOW_CAPTURE_BY_NONE` | Value obtained from FuzzedDataProvider |
+| `sessionId` | 0. `AAUDIO_SESSION_ID_NONE` 1. `AAUDIO_SESSION_ID_ALLOCATE` | Value obtained from FuzzedDataProvider |
+| `framesPerDataCallback` | Any value of type `int32_t` | Value obtained from FuzzedDataProvider |
+| `policy` | 0. `AAUDIO_POLICY_NEVER` 1. `AAUDIO_POLICY_AUTO` 2. `AAUDIO_POLICY_ALWAYS` | Value obtained from FuzzedDataProvider |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feed the entire input data to the module.
+This ensures that the plugins tolerates any kind of input (empty, huge,
+malformed, etc) and doesn't `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build libaaudio_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) libaaudio_fuzzer
+```
+### Steps to run
+
+To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/libaaudio_fuzzer/libaaudio_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp b/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp
new file mode 100644
index 0000000..1167bb0
--- /dev/null
+++ b/media/libaaudio/fuzzer/libaaudio_fuzzer.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "aaudio/AAudio.h"
+#include "aaudio/AAudioTesting.h"
+#include <fuzzer/FuzzedDataProvider.h>
+
+constexpr int32_t kRandomStringLength = 256;
+
+constexpr int64_t kNanosPerMillisecond = 1000 * 1000;
+
+constexpr aaudio_direction_t kDirections[] = {
+ AAUDIO_DIRECTION_OUTPUT, AAUDIO_DIRECTION_INPUT, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_performance_mode_t kPerformanceModes[] = {
+ AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_format_t kFormats[] = {
+ AAUDIO_FORMAT_INVALID, AAUDIO_FORMAT_UNSPECIFIED,
+ AAUDIO_FORMAT_PCM_I16, AAUDIO_FORMAT_PCM_FLOAT,
+ AAUDIO_FORMAT_PCM_I24_PACKED, AAUDIO_FORMAT_PCM_I32};
+
+constexpr aaudio_sharing_mode_t kSharingModes[] = {
+ AAUDIO_SHARING_MODE_EXCLUSIVE, AAUDIO_SHARING_MODE_SHARED};
+
+constexpr int32_t kSampleRates[] = {AAUDIO_UNSPECIFIED,
+ 8000,
+ 11025,
+ 16000,
+ 22050,
+ 32000,
+ 44100,
+ 48000,
+ 88200,
+ 96000};
+
+constexpr aaudio_usage_t kUsages[] = {
+ AAUDIO_USAGE_MEDIA,
+ AAUDIO_USAGE_VOICE_COMMUNICATION,
+ AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+ AAUDIO_USAGE_ALARM,
+ AAUDIO_USAGE_NOTIFICATION,
+ AAUDIO_USAGE_NOTIFICATION_RINGTONE,
+ AAUDIO_USAGE_NOTIFICATION_EVENT,
+ AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+ AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
+ AAUDIO_USAGE_GAME,
+ AAUDIO_USAGE_ASSISTANT,
+ AAUDIO_SYSTEM_USAGE_EMERGENCY,
+ AAUDIO_SYSTEM_USAGE_SAFETY,
+ AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS,
+ AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT,
+ AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_content_type_t kContentTypes[] = {
+ AAUDIO_CONTENT_TYPE_SPEECH, AAUDIO_CONTENT_TYPE_MUSIC,
+ AAUDIO_CONTENT_TYPE_MOVIE, AAUDIO_CONTENT_TYPE_SONIFICATION,
+ AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_input_preset_t kInputPresets[] = {
+ AAUDIO_INPUT_PRESET_GENERIC,
+ AAUDIO_INPUT_PRESET_CAMCORDER,
+ AAUDIO_INPUT_PRESET_VOICE_RECOGNITION,
+ AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION,
+ AAUDIO_INPUT_PRESET_UNPROCESSED,
+ AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
+ AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_allowed_capture_policy_t kAllowedCapturePolicies[] = {
+ AAUDIO_ALLOW_CAPTURE_BY_ALL, AAUDIO_ALLOW_CAPTURE_BY_SYSTEM,
+ AAUDIO_ALLOW_CAPTURE_BY_NONE, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_session_id_t kSessionIds[] = {
+ AAUDIO_SESSION_ID_NONE, AAUDIO_SESSION_ID_ALLOCATE, AAUDIO_UNSPECIFIED};
+
+constexpr aaudio_policy_t kPolicies[] = {
+ AAUDIO_POLICY_NEVER, AAUDIO_POLICY_AUTO, AAUDIO_POLICY_ALWAYS,
+ AAUDIO_UNSPECIFIED};
+
+class LibAaudioFuzzer {
+public:
+ ~LibAaudioFuzzer() { deInit(); }
+ bool init();
+ void process(const uint8_t *data, size_t size);
+ void deInit();
+
+private:
+ AAudioStreamBuilder *mAaudioBuilder = nullptr;
+ AAudioStream *mAaudioStream = nullptr;
+};
+
+bool LibAaudioFuzzer::init() {
+ aaudio_result_t result = AAudio_createStreamBuilder(&mAaudioBuilder);
+ if ((result != AAUDIO_OK) || (!mAaudioBuilder)) {
+ return false;
+ }
+ return true;
+}
+
+void LibAaudioFuzzer::process(const uint8_t *data, size_t size) {
+ FuzzedDataProvider fdp(data, size);
+ aaudio_performance_mode_t mode =
+ fdp.PickValueInArray({fdp.PickValueInArray(kPerformanceModes),
+ fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setPerformanceMode(mAaudioBuilder, mode);
+
+ int32_t deviceId = fdp.PickValueInArray(
+ {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setDeviceId(mAaudioBuilder, deviceId);
+
+ std::string packageName = fdp.PickValueInArray<std::string>(
+ {"android.nativemedia.aaudio", "android.app.appops.cts",
+ fdp.ConsumeRandomLengthString(kRandomStringLength)});
+ AAudioStreamBuilder_setPackageName(mAaudioBuilder, packageName.c_str());
+
+ std::string attributionTag =
+ fdp.ConsumeRandomLengthString(kRandomStringLength);
+ AAudioStreamBuilder_setAttributionTag(mAaudioBuilder, attributionTag.c_str());
+
+ int32_t sampleRate = fdp.PickValueInArray(kSampleRates);
+ AAudioStreamBuilder_setSampleRate(mAaudioBuilder, sampleRate);
+
+ int32_t channelCount = fdp.PickValueInArray(
+ {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setChannelCount(mAaudioBuilder, channelCount);
+
+ aaudio_direction_t direction = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kDirections), fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setDirection(mAaudioBuilder, direction);
+
+ aaudio_format_t format = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kFormats), fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setFormat(mAaudioBuilder, format);
+
+ aaudio_sharing_mode_t sharingMode = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kSharingModes), fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setSharingMode(mAaudioBuilder, sharingMode);
+
+ aaudio_usage_t usage = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kUsages), fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setUsage(mAaudioBuilder, usage);
+
+ aaudio_content_type_t contentType = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kContentTypes), fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setContentType(mAaudioBuilder, contentType);
+
+ aaudio_input_preset_t inputPreset = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kInputPresets), fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setInputPreset(mAaudioBuilder, inputPreset);
+
+ bool privacySensitive = fdp.ConsumeBool();
+ AAudioStreamBuilder_setPrivacySensitive(mAaudioBuilder, privacySensitive);
+
+ int32_t frames = fdp.PickValueInArray(
+ {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setBufferCapacityInFrames(mAaudioBuilder, frames);
+
+ aaudio_allowed_capture_policy_t allowedCapturePolicy =
+ fdp.PickValueInArray({fdp.PickValueInArray(kAllowedCapturePolicies),
+ fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setAllowedCapturePolicy(mAaudioBuilder,
+ allowedCapturePolicy);
+
+ aaudio_session_id_t sessionId = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kSessionIds), fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setSessionId(mAaudioBuilder, sessionId);
+
+ AAudioStreamBuilder_setDataCallback(mAaudioBuilder, nullptr, nullptr);
+ AAudioStreamBuilder_setErrorCallback(mAaudioBuilder, nullptr, nullptr);
+
+ int32_t framesPerDataCallback = fdp.PickValueInArray(
+ {AAUDIO_UNSPECIFIED, fdp.ConsumeIntegral<int32_t>()});
+ AAudioStreamBuilder_setFramesPerDataCallback(mAaudioBuilder,
+ framesPerDataCallback);
+
+ aaudio_policy_t policy = fdp.PickValueInArray(
+ {fdp.PickValueInArray(kPolicies), fdp.ConsumeIntegral<int32_t>()});
+ AAudio_setMMapPolicy(policy);
+ (void)AAudio_getMMapPolicy();
+
+ aaudio_result_t result =
+ AAudioStreamBuilder_openStream(mAaudioBuilder, &mAaudioStream);
+ if ((result != AAUDIO_OK) || (!mAaudioStream)) {
+ return;
+ }
+
+ int32_t framesPerBurst = AAudioStream_getFramesPerBurst(mAaudioStream);
+ uint8_t numberOfBursts = fdp.ConsumeIntegral<uint8_t>();
+ int32_t maxInputFrames = numberOfBursts * framesPerBurst;
+ int32_t requestedBufferSize =
+ fdp.ConsumeIntegral<uint16_t>() * framesPerBurst;
+ AAudioStream_setBufferSizeInFrames(mAaudioStream, requestedBufferSize);
+
+ int64_t position = 0, nanoseconds = 0;
+ AAudioStream_getTimestamp(mAaudioStream, CLOCK_MONOTONIC, &position,
+ &nanoseconds);
+
+ AAudioStream_requestStart(mAaudioStream);
+
+ aaudio_format_t actualFormat = AAudioStream_getFormat(mAaudioStream);
+ int32_t actualChannelCount = AAudioStream_getChannelCount(mAaudioStream);
+
+ int32_t count = fdp.ConsumeIntegral<int32_t>();
+ direction = AAudioStream_getDirection(mAaudioStream);
+ framesPerDataCallback = AAudioStream_getFramesPerDataCallback(mAaudioStream);
+
+ if (actualFormat == AAUDIO_FORMAT_PCM_I16) {
+ std::vector<int16_t> inputShortData(maxInputFrames * actualChannelCount,
+ 0x0);
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ AAudioStream_read(mAaudioStream, inputShortData.data(),
+ framesPerDataCallback, count * kNanosPerMillisecond);
+ } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+ AAudioStream_write(mAaudioStream, inputShortData.data(),
+ framesPerDataCallback, count * kNanosPerMillisecond);
+ }
+ } else if (actualFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ std::vector<float> inputFloatData(maxInputFrames * actualChannelCount, 0x0);
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ AAudioStream_read(mAaudioStream, inputFloatData.data(),
+ framesPerDataCallback, count * kNanosPerMillisecond);
+ } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+ AAudioStream_write(mAaudioStream, inputFloatData.data(),
+ framesPerDataCallback, count * kNanosPerMillisecond);
+ }
+ }
+
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ AAudioStream_waitForStateChange(mAaudioStream, AAUDIO_STREAM_STATE_UNKNOWN,
+ &state, count * kNanosPerMillisecond);
+ (void)AAudio_convertStreamStateToText(state);
+
+ (void)AAudioStream_getUsage(mAaudioStream);
+ (void)AAudioStream_getSampleRate(mAaudioStream);
+ (void)AAudioStream_getState(mAaudioStream);
+ (void)AAudioStream_getSamplesPerFrame(mAaudioStream);
+ (void)AAudioStream_getContentType(mAaudioStream);
+ (void)AAudioStream_getInputPreset(mAaudioStream);
+ (void)AAudioStream_isPrivacySensitive(mAaudioStream);
+ (void)AAudioStream_getAllowedCapturePolicy(mAaudioStream);
+ (void)AAudioStream_getPerformanceMode(mAaudioStream);
+ (void)AAudioStream_getDeviceId(mAaudioStream);
+ (void)AAudioStream_getSharingMode(mAaudioStream);
+ (void)AAudioStream_getSessionId(mAaudioStream);
+ (void)AAudioStream_getFramesRead(mAaudioStream);
+ (void)AAudioStream_getFramesWritten(mAaudioStream);
+ (void)AAudioStream_getXRunCount(mAaudioStream);
+ (void)AAudioStream_getBufferCapacityInFrames(mAaudioStream);
+ (void)AAudioStream_getBufferSizeInFrames(mAaudioStream);
+ (void)AAudioStream_isMMapUsed(mAaudioStream);
+
+ AAudioStream_requestPause(mAaudioStream);
+ AAudioStream_requestFlush(mAaudioStream);
+ AAudioStream_release(mAaudioStream);
+ AAudioStream_requestStop(mAaudioStream);
+}
+
+void LibAaudioFuzzer::deInit() {
+ if (mAaudioBuilder) {
+ AAudioStreamBuilder_delete(mAaudioBuilder);
+ }
+ if (mAaudioStream) {
+ AAudioStream_close(mAaudioStream);
+ }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ LibAaudioFuzzer libAaudioFuzzer;
+ if (libAaudioFuzzer.init()) {
+ libAaudioFuzzer.process(data, size);
+ }
+ return 0;
+}
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 4b08295..efa9941 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -20,7 +20,7 @@
*/
/**
- * @file AAudio.h
+ * @file aaudio/AAudio.h
*/
/**
@@ -444,6 +444,22 @@
};
typedef int32_t aaudio_content_type_t;
+enum {
+
+ /**
+ * Constant indicating the audio content associated with these attributes will follow the
+ * default platform behavior with regards to which content will be spatialized or not.
+ */
+ AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO = 1,
+
+ /**
+ * Constant indicating the audio content associated with these attributes should never
+ * be spatialized.
+ */
+ AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER = 2,
+};
+typedef int32_t aaudio_spatialization_behavior_t;
+
/**
* Defines the audio source.
* An audio source defines both a default physical source of audio signal, and a recording
@@ -565,6 +581,145 @@
};
typedef int32_t aaudio_session_id_t;
+/**
+ * Defines the audio channel mask.
+ * Channel masks are used to describe the samples and their
+ * arrangement in the audio frame. They are also used in the endpoint
+ * (e.g. a USB audio interface, a DAC connected to headphones) to
+ * specify allowable configurations of a particular device.
+ *
+ * Added in API level 32.
+ */
+enum {
+ /**
+ * Invalid channel mask
+ */
+ AAUDIO_CHANNEL_INVALID = -1,
+
+ /**
+ * Output audio channel mask
+ */
+ AAUDIO_CHANNEL_FRONT_LEFT = 1 << 0,
+ AAUDIO_CHANNEL_FRONT_RIGHT = 1 << 1,
+ AAUDIO_CHANNEL_FRONT_CENTER = 1 << 2,
+ AAUDIO_CHANNEL_LOW_FREQUENCY = 1 << 3,
+ AAUDIO_CHANNEL_BACK_LEFT = 1 << 4,
+ AAUDIO_CHANNEL_BACK_RIGHT = 1 << 5,
+ AAUDIO_CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6,
+ AAUDIO_CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7,
+ AAUDIO_CHANNEL_BACK_CENTER = 1 << 8,
+ AAUDIO_CHANNEL_SIDE_LEFT = 1 << 9,
+ AAUDIO_CHANNEL_SIDE_RIGHT = 1 << 10,
+ AAUDIO_CHANNEL_TOP_CENTER = 1 << 11,
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT = 1 << 12,
+ AAUDIO_CHANNEL_TOP_FRONT_CENTER = 1 << 13,
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT = 1 << 14,
+ AAUDIO_CHANNEL_TOP_BACK_LEFT = 1 << 15,
+ AAUDIO_CHANNEL_TOP_BACK_CENTER = 1 << 16,
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT = 1 << 17,
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT = 1 << 18,
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT = 1 << 19,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22,
+ AAUDIO_CHANNEL_LOW_FREQUENCY_2 = 1 << 23,
+ AAUDIO_CHANNEL_FRONT_WIDE_LEFT = 1 << 24,
+ AAUDIO_CHANNEL_FRONT_WIDE_RIGHT = 1 << 25,
+
+ AAUDIO_CHANNEL_MONO = AAUDIO_CHANNEL_FRONT_LEFT,
+ AAUDIO_CHANNEL_STEREO = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT,
+ AAUDIO_CHANNEL_2POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_TRI = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER,
+ AAUDIO_CHANNEL_TRI_BACK = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_3POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_2POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_2POINT1POINT2 = AAUDIO_CHANNEL_2POINT0POINT2 |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_3POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_3POINT1POINT2 = AAUDIO_CHANNEL_3POINT0POINT2 |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_QUAD = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT,
+ AAUDIO_CHANNEL_QUAD_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_SURROUND = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_PENTA = AAUDIO_CHANNEL_QUAD |
+ AAUDIO_CHANNEL_FRONT_CENTER,
+ // aka 5POINT1_BACK
+ AAUDIO_CHANNEL_5POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT,
+ AAUDIO_CHANNEL_5POINT1_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_6POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_7POINT1 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_5POINT1POINT2 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_5POINT1POINT4 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_BACK_LEFT |
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+ AAUDIO_CHANNEL_7POINT1POINT2 = AAUDIO_CHANNEL_7POINT1 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_7POINT1POINT4 = AAUDIO_CHANNEL_7POINT1 |
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_BACK_LEFT |
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+ AAUDIO_CHANNEL_9POINT1POINT4 = AAUDIO_CHANNEL_7POINT1POINT4 |
+ AAUDIO_CHANNEL_FRONT_WIDE_LEFT |
+ AAUDIO_CHANNEL_FRONT_WIDE_RIGHT,
+ AAUDIO_CHANNEL_9POINT1POINT6 = AAUDIO_CHANNEL_9POINT1POINT4 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+
+ AAUDIO_CHANNEL_FRONT_BACK = AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_BACK_CENTER,
+};
+typedef uint32_t aaudio_channel_mask_t;
+
typedef struct AAudioStreamStruct AAudioStream;
typedef struct AAudioStreamBuilderStruct AAudioStreamBuilder;
@@ -643,8 +798,11 @@
* This is usually {@code Context#getPackageName()}.
*
* The default, if you do not call this function, is a random package in the calling uid.
- * The vast majority of apps have only one package per calling UID. If the package
- * name does not match the calling UID, then requests will be rejected.
+ * The vast majority of apps have only one package per calling UID.
+ * If an invalid package name is set, input streams may not be given permission to
+ * record when started.
+ *
+ * The package name is usually the applicationId in your app's build.gradle file.
*
* Available since API level 31.
*
@@ -699,6 +857,11 @@
* If an exact value is specified then an opened stream will use that value.
* If a stream cannot be opened with the specified value then the open will fail.
*
+ * As the channel count provided here may be different from the corresponding channel count
+ * of channel mask used in {@link AAudioStreamBuilder_setChannelMask}, the last called function
+ * will be respected if both this function and {@link AAudioStreamBuilder_setChannelMask} are
+ * called.
+ *
* Available since API level 26.
*
* @param builder reference provided by AAudio_createStreamBuilder()
@@ -714,6 +877,8 @@
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param samplesPerFrame Number of samples in a frame.
+ *
+ * @deprecated use {@link AAudioStreamBuilder_setChannelCount}
*/
AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
int32_t samplesPerFrame) __INTRODUCED_IN(26);
@@ -836,6 +1001,37 @@
aaudio_content_type_t contentType) __INTRODUCED_IN(28);
/**
+ * Sets the behavior affecting whether spatialization will be used.
+ *
+ * The AAudio system will use this information to select whether the stream will go
+ * through a spatializer effect or not when the effect is supported and enabled.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param spatializationBehavior the desired behavior with regards to spatialization, eg.
+ * {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+ aaudio_spatialization_behavior_t spatializationBehavior) __INTRODUCED_IN(32);
+
+/**
+ * Specifies whether the audio data of this output stream has already been processed for
+ * spatialization.
+ *
+ * If the stream has been processed for spatialization, setting this to true will prevent
+ * issues such as double-processing on platforms that will spatialize audio data.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param isSpatialized true if the content is already processed for binaural or transaural spatial
+ * rendering, false otherwise.
+ */
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+ bool isSpatialized) __INTRODUCED_IN(32);
+
+/**
* Set the input (capture) preset for the stream.
*
* The AAudio system will use this information to optimize the
@@ -1136,6 +1332,32 @@
AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
__INTRODUCED_IN(26);
+/**
+ * Set audio channel mask for the stream.
+ *
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
+ * If both channel mask and count are not set, then stereo will then be chosen when the
+ * stream is opened.
+ * After opening a stream with an unspecified value, the application must query for the
+ * actual value, which may vary by device.
+ *
+ * If an exact value is specified then an opened stream will use that value.
+ * If a stream cannot be opened with the specified value then the open will fail.
+ *
+ * As the corresponding channel count of provided channel mask here may be different
+ * from the channel count used in {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame}, the last called function will be
+ * respected if this function and {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame} are called.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param channelMask Audio channel mask desired.
+ */
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+ aaudio_channel_mask_t channelMask) __INTRODUCED_IN(32);
+
// ============================================================
// Stream Control
// ============================================================
@@ -1616,6 +1838,31 @@
__INTRODUCED_IN(28);
/**
+ * Return the spatialization behavior for the stream.
+ *
+ * If none was explicitly set, it will return the default
+ * {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO} behavior.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return spatialization behavior, for example {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+ AAudioStream* stream) __INTRODUCED_IN(32);
+
+/**
+ * Return whether the content of the stream is spatialized.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return true if the content is spatialized
+ */
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream) __INTRODUCED_IN(32);
+
+
+/**
* Return the input preset for the stream.
*
* Available since API level 28.
@@ -1652,6 +1899,18 @@
AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* stream)
__INTRODUCED_IN(30);
+/**
+ * Return the channel mask for the stream. This will be the mask set using
+ * {@link #AAudioStreamBuilder_setChannelMask}, or {@link #AAUDIO_UNSPECIFIED} otherwise.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual channel mask
+ */
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+ __INTRODUCED_IN(32);
+
#ifdef __cplusplus
}
#endif
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 33a5c7f..ddd3f97 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -7,6 +7,65 @@
default_applicable_licenses: ["frameworks_av_license"],
}
+tidy_errors = [
+ // https://clang.llvm.org/extra/clang-tidy/checks/list.html
+ // For many categories, the checks are too many to specify individually.
+ // Feel free to disable as needed - as warnings are generally ignored,
+ // we treat warnings as errors.
+ "android-*",
+ "bugprone-*",
+ "cert-*",
+ "clang-analyzer-security*",
+ "google-*",
+ "misc-*",
+ //"modernize-*", // explicitly list the modernize as they can be subjective.
+ "modernize-avoid-bind",
+ //"modernize-avoid-c-arrays", // std::array<> can be verbose
+ "modernize-concat-nested-namespaces",
+ //"modernize-deprecated-headers", // C headers still ok even if there is C++ equivalent.
+ "modernize-deprecated-ios-base-aliases",
+ "modernize-loop-convert",
+ "modernize-make-shared",
+ "modernize-make-unique",
+ "modernize-pass-by-value",
+ "modernize-raw-string-literal",
+ "modernize-redundant-void-arg",
+ "modernize-replace-auto-ptr",
+ "modernize-replace-random-shuffle",
+ "modernize-return-braced-init-list",
+ "modernize-shrink-to-fit",
+ "modernize-unary-static-assert",
+ // "modernize-use-auto", // found in AAudioAudio.cpp
+ "modernize-use-bool-literals",
+ "modernize-use-default-member-init",
+ "modernize-use-emplace",
+ "modernize-use-equals-default",
+ "modernize-use-equals-delete",
+ // "modernize-use-nodiscard", // found in aidl generated files
+ "modernize-use-noexcept",
+ "modernize-use-nullptr",
+ // "modernize-use-override", // found in aidl generated files
+ // "modernize-use-trailing-return-type", // not necessarily more readable
+ "modernize-use-transparent-functors",
+ "modernize-use-uncaught-exceptions",
+ // "modernize-use-using", // found typedef in several files
+ "performance-*",
+
+ // Remove some pedantic stylistic requirements.
+ "-android-cloexec-dup", // found in SharedMemoryParcelable.cpp
+ "-bugprone-macro-parentheses", // found in SharedMemoryParcelable.h
+ "-bugprone-narrowing-conversions", // found in several interface from size_t to int32_t
+
+ "-google-readability-casting", // C++ casts not always necessary and may be verbose
+ "-google-readability-todo", // do not require TODO(info)
+ "-google-build-using-namespace", // Reenable and fix later.
+ "-google-global-names-in-headers", // found in several files
+
+ "-misc-non-private-member-variables-in-classes", // found in aidl generated files
+
+ "-performance-no-int-to-ptr", // found in SharedMemoryParcelable.h
+]
+
cc_library {
name: "libaaudio",
@@ -52,7 +111,7 @@
"libcutils",
"libutils",
"libbinder",
- "libpermission",
+ "framework-permission-aidl-cpp",
],
sanitize: {
@@ -64,6 +123,13 @@
symbol_file: "libaaudio.map.txt",
versions: ["28"],
},
+
+ tidy: true,
+ tidy_checks: tidy_errors,
+ tidy_checks_as_errors: tidy_errors,
+ tidy_flags: [
+ "-format-style=file",
+ ]
}
cc_library {
@@ -102,6 +168,8 @@
"libbinder",
"framework-permission-aidl-cpp",
"aaudio-aidl-cpp",
+ "android.media.audio.common.types-V1-cpp",
+ "audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
],
@@ -139,10 +207,14 @@
"binding/RingBufferParcelable.cpp",
"binding/SharedMemoryParcelable.cpp",
"binding/SharedRegionParcelable.cpp",
- "flowgraph/AudioProcessorBase.cpp",
+ "flowgraph/ChannelCountConverter.cpp",
"flowgraph/ClipToRange.cpp",
+ "flowgraph/FlowGraphNode.cpp",
+ "flowgraph/ManyToMultiConverter.cpp",
"flowgraph/MonoToMultiConverter.cpp",
+ "flowgraph/MultiToMonoConverter.cpp",
"flowgraph/RampLinear.cpp",
+ "flowgraph/SampleRateConverter.cpp",
"flowgraph/SinkFloat.cpp",
"flowgraph/SinkI16.cpp",
"flowgraph/SinkI24.cpp",
@@ -151,11 +223,26 @@
"flowgraph/SourceI16.cpp",
"flowgraph/SourceI24.cpp",
"flowgraph/SourceI32.cpp",
+ "flowgraph/resampler/IntegerRatio.cpp",
+ "flowgraph/resampler/LinearResampler.cpp",
+ "flowgraph/resampler/MultiChannelResampler.cpp",
+ "flowgraph/resampler/PolyphaseResampler.cpp",
+ "flowgraph/resampler/PolyphaseResamplerMono.cpp",
+ "flowgraph/resampler/PolyphaseResamplerStereo.cpp",
+ "flowgraph/resampler/SincResampler.cpp",
+ "flowgraph/resampler/SincResamplerStereo.cpp",
],
sanitize: {
integer_overflow: true,
misc_undefined: ["bounds"],
},
+
+ tidy: true,
+ tidy_checks: tidy_errors,
+ tidy_checks_as_errors: tidy_errors,
+ tidy_flags: [
+ "-format-style=file",
+ ]
}
aidl_interface {
@@ -172,19 +259,15 @@
"binding/aidl/aaudio/IAAudioService.aidl",
],
imports: [
- "audio_common-aidl",
+ "android.media.audio.common.types",
+ "audioclient-types-aidl",
"shared-file-region-aidl",
- "framework-permission-aidl"
+ "framework-permission-aidl",
],
backend:
{
- cpp: {
- enabled: true,
- },
java: {
- // TODO: need to have audio_common-aidl available in Java to enable
- // this.
- enabled: false,
+ sdk_version: "module_current",
},
},
}
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index fa5a2da..135bac3 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -36,13 +36,10 @@
using android::IServiceManager;
using android::defaultServiceManager;
using android::interface_cast;
-using android::IInterface;
using android::Mutex;
using android::ProcessState;
using android::sp;
using android::status_t;
-using android::wp;
-using android::binder::Status;
using namespace aaudio;
@@ -93,7 +90,7 @@
ALOGE("%s() - linkToDeath() returned %d", __func__, status);
}
aaudioService = interface_cast<IAAudioService>(binder);
- mAdapter.reset(new Adapter(aaudioService, mAAudioClient));
+ mAdapter = std::make_shared<Adapter>(aaudioService, mAAudioClient);
needToRegister = true;
// Make sure callbacks can be received by mAAudioClient
ProcessState::self()->startThreadPool();
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index 6a7b639..557ced5 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -108,7 +108,7 @@
return AAUDIO_ERROR_UNAVAILABLE;
}
- void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {
+ void onStreamChange(aaudio_handle_t /*handle*/, int32_t /*opcode*/, int32_t /*value*/) {
// TODO This is just a stub so we can have a client Binder to pass to the service.
// TODO Implemented in a later CL.
ALOGW("onStreamChange called!");
@@ -116,7 +116,7 @@
class AAudioClient : public android::IBinder::DeathRecipient, public BnAAudioClient {
public:
- AAudioClient(android::wp<AAudioBinderClient> aaudioBinderClient)
+ explicit AAudioClient(const android::wp<AAudioBinderClient>& aaudioBinderClient)
: mBinderClient(aaudioBinderClient) {
}
@@ -150,10 +150,10 @@
class Adapter : public AAudioBinderAdapter {
public:
Adapter(const android::sp<IAAudioService>& delegate,
- const android::sp<AAudioClient>& aaudioClient)
+ android::sp<AAudioClient> aaudioClient)
: AAudioBinderAdapter(delegate.get()),
mDelegate(delegate),
- mAAudioClient(aaudioClient) {}
+ mAAudioClient(std::move(aaudioClient)) {}
virtual ~Adapter() {
if (mDelegate != nullptr) {
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 5d11512..bf94774 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -37,7 +37,7 @@
class AAudioServiceInterface {
public:
- AAudioServiceInterface() {};
+ AAudioServiceInterface() = default;
virtual ~AAudioServiceInterface() = default;
virtual void registerClient(const android::sp<IAAudioClient>& client) = 0;
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 2d501ef..b60bac2 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -23,26 +23,36 @@
#include <sys/mman.h>
#include <aaudio/AAudio.h>
+#include <media/AidlConversion.h>
+
#include "binding/AAudioStreamConfiguration.h"
using namespace aaudio;
-using android::media::audio::common::AudioFormat;
+using android::media::audio::common::AudioFormatDescription;
AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
- setSamplesPerFrame(parcelable.samplesPerFrame);
+ setChannelMask(parcelable.channelMask);
setSampleRate(parcelable.sampleRate);
setDeviceId(parcelable.deviceId);
static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
setSharingMode(parcelable.sharingMode);
- static_assert(sizeof(audio_format_t) == sizeof(parcelable.audioFormat));
- setFormat(static_cast<audio_format_t>(parcelable.audioFormat));
+ auto convFormat = android::aidl2legacy_AudioFormatDescription_audio_format_t(
+ parcelable.audioFormat);
+ setFormat(convFormat.ok() ? convFormat.value() : AUDIO_FORMAT_INVALID);
static_assert(sizeof(aaudio_direction_t) == sizeof(parcelable.direction));
setDirection(parcelable.direction);
static_assert(sizeof(audio_usage_t) == sizeof(parcelable.usage));
setUsage(parcelable.usage);
static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
setContentType(parcelable.contentType);
+
+ static_assert(sizeof(aaudio_spatialization_behavior_t) ==
+ sizeof(parcelable.spatializationBehavior));
+ setSpatializationBehavior(parcelable.spatializationBehavior);
+ setIsContentSpatialized(parcelable.isContentSpatialized);
+
+
static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
setInputPreset(parcelable.inputPreset);
setBufferCapacity(parcelable.bufferCapacity);
@@ -63,13 +73,19 @@
StreamParameters AAudioStreamConfiguration::parcelable() const {
StreamParameters result;
- result.samplesPerFrame = getSamplesPerFrame();
+ result.channelMask = getChannelMask();
result.sampleRate = getSampleRate();
result.deviceId = getDeviceId();
static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
result.sharingMode = getSharingMode();
- static_assert(sizeof(audio_format_t) == sizeof(result.audioFormat));
- result.audioFormat = static_cast<AudioFormat>(getFormat());
+ auto convAudioFormat = android::legacy2aidl_audio_format_t_AudioFormatDescription(getFormat());
+ if (convAudioFormat.ok()) {
+ result.audioFormat = convAudioFormat.value();
+ } else {
+ result.audioFormat = AudioFormatDescription{};
+ result.audioFormat.type =
+ android::media::audio::common::AudioFormatType::SYS_RESERVED_INVALID;
+ }
static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
result.direction = getDirection();
static_assert(sizeof(audio_usage_t) == sizeof(result.usage));
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 8d90034..a4cc2bd 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -30,7 +30,7 @@
using namespace aaudio;
AAudioStreamRequest::AAudioStreamRequest(const StreamRequest& parcelable) :
- mConfiguration(std::move(parcelable.params)),
+ mConfiguration(parcelable.params),
mAttributionSource(parcelable.attributionSource),
mSharingModeMatchRequired(parcelable.sharingModeMatchRequired),
mInService(parcelable.inService) {
@@ -38,7 +38,7 @@
StreamRequest AAudioStreamRequest::parcelable() const {
StreamRequest result;
- result.params = std::move(mConfiguration).parcelable();
+ result.params = mConfiguration.parcelable();
result.attributionSource = mAttributionSource;
result.sharingModeMatchRequired = mSharingModeMatchRequired;
result.inService = mInService;
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index aa4ac27..dea3e4a 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -29,17 +29,15 @@
#include "binding/AudioEndpointParcelable.h"
using android::base::unique_fd;
-using android::media::SharedFileRegion;
-using android::NO_ERROR;
using android::status_t;
using namespace aaudio;
AudioEndpointParcelable::AudioEndpointParcelable(Endpoint&& parcelable)
- : mUpMessageQueueParcelable(std::move(parcelable.upMessageQueueParcelable)),
- mDownMessageQueueParcelable(std::move(parcelable.downMessageQueueParcelable)),
- mUpDataQueueParcelable(std::move(parcelable.upDataQueueParcelable)),
- mDownDataQueueParcelable(std::move(parcelable.downDataQueueParcelable)),
+ : mUpMessageQueueParcelable(parcelable.upMessageQueueParcelable),
+ mDownMessageQueueParcelable(parcelable.downMessageQueueParcelable),
+ mUpDataQueueParcelable(parcelable.upDataQueueParcelable),
+ mDownDataQueueParcelable(parcelable.downDataQueueParcelable),
mNumSharedMemories(parcelable.sharedMemories.size()) {
for (size_t i = 0; i < parcelable.sharedMemories.size() && i < MAX_SHARED_MEMORIES; ++i) {
// Re-construct.
@@ -56,10 +54,10 @@
Endpoint AudioEndpointParcelable::parcelable()&& {
Endpoint result;
- result.upMessageQueueParcelable = std::move(mUpMessageQueueParcelable).parcelable();
- result.downMessageQueueParcelable = std::move(mDownMessageQueueParcelable).parcelable();
- result.upDataQueueParcelable = std::move(mUpDataQueueParcelable).parcelable();
- result.downDataQueueParcelable = std::move(mDownDataQueueParcelable).parcelable();
+ result.upMessageQueueParcelable = mUpMessageQueueParcelable.parcelable();
+ result.downMessageQueueParcelable = mDownMessageQueueParcelable.parcelable();
+ result.upDataQueueParcelable = mUpDataQueueParcelable.parcelable();
+ result.downDataQueueParcelable = mDownDataQueueParcelable.parcelable();
result.sharedMemories.reserve(std::min(mNumSharedMemories, MAX_SHARED_MEMORIES));
for (size_t i = 0; i < mNumSharedMemories && i < MAX_SHARED_MEMORIES; ++i) {
result.sharedMemories.emplace_back(std::move(mSharedMemories[i]).parcelable());
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index 5237a1a..544aa92 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -43,7 +43,7 @@
// Ctor/assignment from a parcelable representation.
// Since the parcelable object owns unique FDs (for shared memory blocks), move semantics are
// provided to avoid the need to dupe.
- AudioEndpointParcelable(Endpoint&& parcelable);
+ explicit AudioEndpointParcelable(Endpoint&& parcelable);
AudioEndpointParcelable& operator=(Endpoint&& parcelable);
/**
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index a4b3cec..fa7ca72 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -30,9 +30,9 @@
using namespace aaudio;
RingBufferParcelable::RingBufferParcelable(const RingBuffer& parcelable)
- : mReadCounterParcelable(std::move(parcelable.readCounterParcelable)),
- mWriteCounterParcelable(std::move(parcelable.writeCounterParcelable)),
- mDataParcelable(std::move(parcelable.dataParcelable)),
+ : mReadCounterParcelable(parcelable.readCounterParcelable),
+ mWriteCounterParcelable(parcelable.writeCounterParcelable),
+ mDataParcelable(parcelable.dataParcelable),
mBytesPerFrame(parcelable.bytesPerFrame),
mFramesPerBurst(parcelable.framesPerBurst),
mCapacityInFrames(parcelable.capacityInFrames),
@@ -42,9 +42,9 @@
RingBuffer RingBufferParcelable::parcelable() const {
RingBuffer result;
- result.readCounterParcelable = std::move(mReadCounterParcelable).parcelable();
- result.writeCounterParcelable = std::move(mWriteCounterParcelable).parcelable();
- result.dataParcelable = std::move(mDataParcelable).parcelable();
+ result.readCounterParcelable = mReadCounterParcelable.parcelable();
+ result.writeCounterParcelable = mWriteCounterParcelable.parcelable();
+ result.dataParcelable = mDataParcelable.parcelable();
result.bytesPerFrame = mBytesPerFrame;
result.framesPerBurst = mFramesPerBurst;
result.capacityInFrames = mCapacityInFrames;
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index eef238f..3a49655 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -32,7 +32,6 @@
#include "binding/SharedMemoryParcelable.h"
using android::base::unique_fd;
-using android::NO_ERROR;
using android::status_t;
using android::media::SharedFileRegion;
@@ -78,7 +77,7 @@
}
aaudio_result_t SharedMemoryParcelable::resolveSharedMemory(const unique_fd& fd) {
- mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
+ mResolvedAddress = (uint8_t *) mmap(nullptr, mSizeInBytes, PROT_READ | PROT_WRITE,
MAP_SHARED, fd.get(), 0);
if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
ALOGE("mmap() failed for fd = %d, nBytes = %" PRId64 ", errno = %s",
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 56b99c0..6fa109b 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -29,10 +29,7 @@
#include "binding/SharedMemoryParcelable.h"
#include "binding/SharedRegionParcelable.h"
-using android::NO_ERROR;
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
using namespace aaudio;
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index b7c4f70..983e193 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -16,17 +16,19 @@
package aaudio;
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioFormatDescription;
parcelable StreamParameters {
- int samplesPerFrame; // = AAUDIO_UNSPECIFIED;
+ int channelMask; // = AAUDIO_UNSPECIFIED;
int sampleRate; // = AAUDIO_UNSPECIFIED;
int deviceId; // = AAUDIO_UNSPECIFIED;
int /* aaudio_sharing_mode_t */ sharingMode; // = AAUDIO_SHARING_MODE_SHARED;
- AudioFormat audioFormat; // = AUDIO_FORMAT_DEFAULT;
+ AudioFormatDescription audioFormat; // = AUDIO_FORMAT_DEFAULT;
int /* aaudio_direction_t */ direction; // = AAUDIO_DIRECTION_OUTPUT;
int /* aaudio_usage_t */ usage; // = AAUDIO_UNSPECIFIED;
int /* aaudio_content_type_t */ contentType; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_spatialization_behavior_t */spatializationBehavior; //= AAUDIO_UNSPECIFIED;
+ boolean isContentSpatialized; // = false;
int /* aaudio_input_preset_t */ inputPreset; // = AAUDIO_UNSPECIFIED;
int bufferCapacity; // = AAUDIO_UNSPECIFIED;
int /* aaudio_allowed_capture_policy_t */ allowedCapturePolicy; // = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
index 61b50f3..68b84c8 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.cpp
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -38,7 +38,7 @@
int32_t sourceChannelCount,
audio_format_t sinkFormat,
int32_t sinkChannelCount) {
- AudioFloatOutputPort *lastOutput = nullptr;
+ FlowGraphPortFloatOutput *lastOutput = nullptr;
// TODO change back to ALOGD
ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d",
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index a49f64e..079328a 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -53,11 +53,11 @@
void setRampLengthInFrames(int32_t numFrames);
private:
- std::unique_ptr<flowgraph::AudioSource> mSource;
- std::unique_ptr<flowgraph::RampLinear> mVolumeRamp;
- std::unique_ptr<flowgraph::ClipToRange> mClipper;
- std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
- std::unique_ptr<flowgraph::AudioSink> mSink;
+ std::unique_ptr<flowgraph::FlowGraphSourceBuffered> mSource;
+ std::unique_ptr<flowgraph::RampLinear> mVolumeRamp;
+ std::unique_ptr<flowgraph::ClipToRange> mClipper;
+ std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
+ std::unique_ptr<flowgraph::FlowGraphSink> mSink;
};
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index ebc9f2b..24888de 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -31,13 +31,6 @@
#define RIDICULOUSLY_LARGE_BUFFER_CAPACITY (256 * 1024)
#define RIDICULOUSLY_LARGE_FRAME_SIZE 4096
-AudioEndpoint::AudioEndpoint()
- : mFreeRunning(false)
- , mDataReadCounter(0)
- , mDataWriteCounter(0)
-{
-}
-
// TODO Consider moving to a method in RingBufferDescriptor
static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
const RingBufferDescriptor *descriptor) {
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 4c8d60f..b3dbc20 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -34,7 +34,7 @@
class AudioEndpoint {
public:
- AudioEndpoint();
+ AudioEndpoint() = default;
/**
* Configure based on the EndPointDescriptor_t.
@@ -95,9 +95,9 @@
private:
std::unique_ptr<android::FifoBufferIndirect> mUpCommandQueue;
std::unique_ptr<android::FifoBufferIndirect> mDataQueue;
- bool mFreeRunning;
- android::fifo_counter_t mDataReadCounter; // only used if free-running
- android::fifo_counter_t mDataWriteCounter; // only used if free-running
+ bool mFreeRunning{false};
+ android::fifo_counter_t mDataReadCounter{0}; // only used if free-running
+ android::fifo_counter_t mDataWriteCounter{0}; // only used if free-running
};
} // namespace aaudio
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 6d2d464..89d42bf 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -27,6 +27,7 @@
#include <aaudio/AAudio.h>
#include <cutils/properties.h>
+#include <media/AudioSystem.h>
#include <media/MediaMetricsItem.h>
#include <utils/Trace.h>
@@ -49,8 +50,6 @@
// This is needed to make sense of the logs more easily.
#define LOG_TAG (mInService ? "AudioStreamInternal_Service" : "AudioStreamInternal_Client")
-using android::Mutex;
-using android::WrappingBuffer;
using android::content::AttributionSourceState;
using namespace aaudio;
@@ -97,7 +96,7 @@
return result;
}
- const int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
+ const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
int32_t burstMicros = 0;
const audio_format_t requestedFormat = getFormat();
@@ -123,12 +122,14 @@
request.getConfiguration().setDeviceId(getDeviceId());
request.getConfiguration().setSampleRate(getSampleRate());
- request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
request.getConfiguration().setDirection(getDirection());
request.getConfiguration().setSharingMode(getSharingMode());
+ request.getConfiguration().setChannelMask(getChannelMask());
request.getConfiguration().setUsage(getUsage());
request.getConfiguration().setContentType(getContentType());
+ request.getConfiguration().setSpatializationBehavior(getSpatializationBehavior());
+ request.getConfiguration().setIsContentSpatialized(isContentSpatialized());
request.getConfiguration().setInputPreset(getInputPreset());
request.getConfiguration().setPrivacySensitive(isPrivacySensitive());
@@ -138,7 +139,8 @@
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
if (mServiceStreamHandle < 0
- && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
+ && (request.getConfiguration().getSamplesPerFrame() == 1
+ || request.getConfiguration().getChannelMask() == AAUDIO_CHANNEL_MONO)
&& getDirection() == AAUDIO_DIRECTION_OUTPUT
&& !isInService()) {
// if that failed then try switching from mono to stereo if OUTPUT.
@@ -146,7 +148,7 @@
// that writes to a stereo MMAP stream.
ALOGD("%s() - openStream() returned %d, try switching from MONO to STEREO",
__func__, mServiceStreamHandle);
- request.getConfiguration().setSamplesPerFrame(2); // stereo
+ request.getConfiguration().setChannelMask(AAUDIO_CHANNEL_STEREO);
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
}
if (mServiceStreamHandle < 0) {
@@ -174,9 +176,10 @@
goto error;
}
// Save results of the open.
- if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
- setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
+ if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+ setChannelMask(configurationOutput.getChannelMask());
}
+
mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
setSampleRate(configurationOutput.getSampleRate());
@@ -186,6 +189,8 @@
setUsage(configurationOutput.getUsage());
setContentType(configurationOutput.getContentType());
+ setSpatializationBehavior(configurationOutput.getSpatializationBehavior());
+ setIsContentSpatialized(configurationOutput.isContentSpatialized());
setInputPreset(configurationOutput.getInputPreset());
// Save device format so we can do format conversion and volume scaling together.
@@ -332,10 +337,10 @@
{
AudioStreamInternal *stream = (AudioStreamInternal *)context;
//LOGD("oboe_callback_thread, stream = %p", stream);
- if (stream != NULL) {
+ if (stream != nullptr) {
return stream->callbackLoop();
} else {
- return NULL;
+ return nullptr;
}
}
@@ -424,7 +429,7 @@
if (isDataCallbackSet()
&& (isActive() || getState() == AAUDIO_STREAM_STATE_DISCONNECTED)) {
mCallbackEnabled.store(false);
- aaudio_result_t result = joinThread_l(NULL); // may temporarily unlock mStreamLock
+ aaudio_result_t result = joinThread_l(nullptr); // may temporarily unlock mStreamLock
if (result == AAUDIO_ERROR_INVALID_HANDLE) {
ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
result = AAUDIO_OK;
@@ -511,7 +516,7 @@
return result;
}
-aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t /*clockId*/,
int64_t *framePosition,
int64_t *timeNanoseconds) {
// Generated in server and passed to client. Return latest.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index fbe4c13..eab1382 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -116,7 +116,7 @@
virtual void prepareBuffersForStart() {}
- virtual void advanceClientToMatchServerPosition(int32_t serverMargin = 0) = 0;
+ virtual void advanceClientToMatchServerPosition(int32_t serverMargin) = 0;
virtual void onFlushFromServer() {}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 2da5406..1efccb1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -46,8 +46,6 @@
}
-AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
-
void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
int64_t readCounter = mAudioEndpoint->getDataReadCounter();
int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
@@ -109,7 +107,7 @@
if (mNeedCatchUp.isRequested()) {
// Catch an MMAP pointer that is already advancing.
// This will avoid initial underruns caused by a slow cold start.
- advanceClientToMatchServerPosition();
+ advanceClientToMatchServerPosition(0 /*serverMargin*/);
mNeedCatchUp.acknowledge();
}
@@ -228,7 +226,7 @@
void *AudioStreamInternalCapture::callbackLoop() {
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- if (!isDataCallbackSet()) return NULL;
+ if (!isDataCallbackSet()) return nullptr;
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -260,5 +258,5 @@
ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
result, (int) isActive());
- return NULL;
+ return nullptr;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 251a7f2..87017de 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -28,8 +28,9 @@
class AudioStreamInternalCapture : public AudioStreamInternal {
public:
- AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface, bool inService = false);
- virtual ~AudioStreamInternalCapture();
+ explicit AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
+ bool inService = false);
+ virtual ~AudioStreamInternalCapture() = default;
aaudio_result_t read(void *buffer,
int32_t numFrames,
@@ -45,7 +46,7 @@
}
protected:
- void advanceClientToMatchServerPosition(int32_t serverOffset = 0) override;
+ void advanceClientToMatchServerPosition(int32_t serverOffset) override;
/**
* Low level data processing that will not block. It will just read or write as much as it can.
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 71bde90..5921799 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -44,8 +44,6 @@
}
-AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
-
constexpr int kRampMSec = 10; // time to apply a change in volume
aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
@@ -115,7 +113,7 @@
}
void AudioStreamInternalPlay::onFlushFromServer() {
- advanceClientToMatchServerPosition();
+ advanceClientToMatchServerPosition(0 /*serverMargin*/);
}
// Write the data, block if needed and timeoutMillis > 0
@@ -281,7 +279,7 @@
ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- if (!isDataCallbackSet()) return NULL;
+ if (!isDataCallbackSet()) return nullptr;
int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
// result might be a frame count
@@ -309,7 +307,7 @@
ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
__func__, result, (int) isActive());
- return NULL;
+ return nullptr;
}
//------------------------------------------------------------------------------
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index 03c957d..e761807 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -30,8 +30,9 @@
class AudioStreamInternalPlay : public AudioStreamInternal {
public:
- AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface, bool inService = false);
- virtual ~AudioStreamInternalPlay();
+ explicit AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
+ bool inService = false);
+ virtual ~AudioStreamInternalPlay() = default;
aaudio_result_t open(const AudioStreamBuilder &builder) override;
@@ -66,7 +67,7 @@
void prepareBuffersForStart() override;
- void advanceClientToMatchServerPosition(int32_t serverMargin = 0) override;
+ void advanceClientToMatchServerPosition(int32_t serverMargin) override;
void onFlushFromServer() override;
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index f0dcd44..6921271 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -43,14 +43,7 @@
// and dumped to the log when the stream is stopped.
IsochronousClockModel::IsochronousClockModel()
- : mMarkerFramePosition(0)
- , mMarkerNanoTime(0)
- , mSampleRate(48000)
- , mFramesPerBurst(48)
- , mBurstPeriodNanos(0) // this will be updated before use
- , mMaxMeasuredLatenessNanos(0)
- , mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
- , mState(STATE_STOPPED)
+ : mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
{
if ((AAudioProperty_getLogMask() & AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM) != 0) {
mHistogramMicros = std::make_unique<Histogram>(kHistogramBinCount,
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 6280013..3007237 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -149,16 +149,16 @@
static constexpr int32_t kHistogramBinWidthMicros = 50;
static constexpr int32_t kHistogramBinCount = 128;
- int64_t mMarkerFramePosition; // Estimated HW position.
- int64_t mMarkerNanoTime; // Estimated HW time.
- int32_t mSampleRate;
- int32_t mFramesPerBurst; // number of frames transferred at one time.
- int32_t mBurstPeriodNanos; // Time between HW bursts.
+ int64_t mMarkerFramePosition{0}; // Estimated HW position.
+ int64_t mMarkerNanoTime{0}; // Estimated HW time.
+ int32_t mSampleRate{48000};
+ int32_t mFramesPerBurst{48}; // number of frames transferred at one time.
+ int32_t mBurstPeriodNanos{0}; // Time between HW bursts.
// Includes mBurstPeriodNanos because we sample randomly over time.
- int32_t mMaxMeasuredLatenessNanos;
+ int32_t mMaxMeasuredLatenessNanos{0};
// Threshold for lateness that triggers a drift later in time.
int32_t mLatenessForDriftNanos;
- clock_model_state_t mState; // State machine handles startup sequence.
+ clock_model_state_t mState{STATE_STOPPED}; // State machine handles startup sequence.
int32_t mTimestampCount = 0; // For logging.
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index d103aca..90ff4a5 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -128,7 +128,8 @@
int32_t samplesPerFrame)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- streamBuilder->setSamplesPerFrame(samplesPerFrame);
+ const aaudio_channel_mask_t channelMask = AAudioConvert_channelCountToMask(samplesPerFrame);
+ streamBuilder->setChannelMask(channelMask);
}
AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
@@ -166,6 +167,18 @@
streamBuilder->setContentType(contentType);
}
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+ aaudio_spatialization_behavior_t spatializationBehavior) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setSpatializationBehavior(spatializationBehavior);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+ bool isSpatialized) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setIsContentSpatialized(isSpatialized);
+}
+
AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
aaudio_input_preset_t inputPreset) {
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
@@ -223,6 +236,13 @@
streamBuilder->setFramesPerDataCallback(frames);
}
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+ aaudio_channel_mask_t channelMask)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setChannelMask(channelMask);
+}
+
AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
AAudioStream** streamPtr)
{
@@ -332,7 +352,8 @@
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+ android::sp<AudioStream> spAudioStream(audioStream);
+ return spAudioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
}
// ============================================================
@@ -495,6 +516,19 @@
return audioStream->getContentType();
}
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+ AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getSpatializationBehavior();
+}
+
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->isContentSpatialized();
+}
+
AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
@@ -562,3 +596,11 @@
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
return audioStream->isPrivacySensitive();
}
+
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ const aaudio_channel_mask_t channelMask = audioStream->getChannelMask();
+ // Do not return channel index masks as they are not public.
+ return AAudio_isChannelIndexMask(channelMask) ? AAUDIO_UNSPECIFIED : channelMask;
+}
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index acfac24..8b7b75e 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -30,9 +30,6 @@
// HDMI supports up to 32 channels at 1536000 Hz.
#define SAMPLE_RATE_HZ_MAX 1600000
-AAudioStreamParameters::AAudioStreamParameters() {}
-AAudioStreamParameters::~AAudioStreamParameters() {}
-
void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
mSamplesPerFrame = other.mSamplesPerFrame;
mSampleRate = other.mSampleRate;
@@ -44,11 +41,14 @@
mBufferCapacity = other.mBufferCapacity;
mUsage = other.mUsage;
mContentType = other.mContentType;
+ mSpatializationBehavior = other.mSpatializationBehavior;
+ mIsContentSpatialized = other.mIsContentSpatialized;
mInputPreset = other.mInputPreset;
mAllowedCapturePolicy = other.mAllowedCapturePolicy;
mIsPrivacySensitive = other.mIsPrivacySensitive;
mOpPackageName = other.mOpPackageName;
mAttributionTag = other.mAttributionTag;
+ mChannelMask = other.mChannelMask;
}
static aaudio_result_t isFormatValid(audio_format_t format) {
@@ -160,6 +160,19 @@
// break;
}
+ switch (mSpatializationBehavior) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+ break; // valid
+ default:
+ ALOGD("spatialization behavior not valid = %d", mSpatializationBehavior);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
+ // no validation required for mIsContentSpatialized
+
switch (mInputPreset) {
case AAUDIO_UNSPECIFIED:
case AAUDIO_INPUT_PRESET_GENERIC:
@@ -187,7 +200,94 @@
// break;
}
- return AAUDIO_OK;
+ return validateChannelMask();
+}
+
+bool AAudioStreamParameters::validateChannelMask() const {
+ if (mChannelMask == AAUDIO_UNSPECIFIED) {
+ return AAUDIO_OK;
+ }
+
+ if (mChannelMask & AAUDIO_CHANNEL_BIT_INDEX) {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_INDEX_MASK_1:
+ case AAUDIO_CHANNEL_INDEX_MASK_2:
+ case AAUDIO_CHANNEL_INDEX_MASK_3:
+ case AAUDIO_CHANNEL_INDEX_MASK_4:
+ case AAUDIO_CHANNEL_INDEX_MASK_5:
+ case AAUDIO_CHANNEL_INDEX_MASK_6:
+ case AAUDIO_CHANNEL_INDEX_MASK_7:
+ case AAUDIO_CHANNEL_INDEX_MASK_8:
+ case AAUDIO_CHANNEL_INDEX_MASK_9:
+ case AAUDIO_CHANNEL_INDEX_MASK_10:
+ case AAUDIO_CHANNEL_INDEX_MASK_11:
+ case AAUDIO_CHANNEL_INDEX_MASK_12:
+ case AAUDIO_CHANNEL_INDEX_MASK_13:
+ case AAUDIO_CHANNEL_INDEX_MASK_14:
+ case AAUDIO_CHANNEL_INDEX_MASK_15:
+ case AAUDIO_CHANNEL_INDEX_MASK_16:
+ case AAUDIO_CHANNEL_INDEX_MASK_17:
+ case AAUDIO_CHANNEL_INDEX_MASK_18:
+ case AAUDIO_CHANNEL_INDEX_MASK_19:
+ case AAUDIO_CHANNEL_INDEX_MASK_20:
+ case AAUDIO_CHANNEL_INDEX_MASK_21:
+ case AAUDIO_CHANNEL_INDEX_MASK_22:
+ case AAUDIO_CHANNEL_INDEX_MASK_23:
+ case AAUDIO_CHANNEL_INDEX_MASK_24:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel index mask %#x", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ }
+
+ if (getDirection() == AAUDIO_DIRECTION_INPUT) {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ case AAUDIO_CHANNEL_STEREO:
+ case AAUDIO_CHANNEL_FRONT_BACK:
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ case AAUDIO_CHANNEL_5POINT1:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel mask %#x, IN", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ } else {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ case AAUDIO_CHANNEL_STEREO:
+ case AAUDIO_CHANNEL_2POINT1:
+ case AAUDIO_CHANNEL_TRI:
+ case AAUDIO_CHANNEL_TRI_BACK:
+ case AAUDIO_CHANNEL_3POINT1:
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ case AAUDIO_CHANNEL_QUAD:
+ case AAUDIO_CHANNEL_QUAD_SIDE:
+ case AAUDIO_CHANNEL_SURROUND:
+ case AAUDIO_CHANNEL_PENTA:
+ case AAUDIO_CHANNEL_5POINT1:
+ case AAUDIO_CHANNEL_5POINT1_SIDE:
+ case AAUDIO_CHANNEL_5POINT1POINT2:
+ case AAUDIO_CHANNEL_5POINT1POINT4:
+ case AAUDIO_CHANNEL_6POINT1:
+ case AAUDIO_CHANNEL_7POINT1:
+ case AAUDIO_CHANNEL_7POINT1POINT2:
+ case AAUDIO_CHANNEL_7POINT1POINT4:
+ case AAUDIO_CHANNEL_9POINT1POINT4:
+ case AAUDIO_CHANNEL_9POINT1POINT6:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel mask %#x. OUT", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ }
}
void AAudioStreamParameters::dump() const {
@@ -195,12 +295,15 @@
ALOGD("mSessionId = %6d", mSessionId);
ALOGD("mSampleRate = %6d", mSampleRate);
ALOGD("mSamplesPerFrame = %6d", mSamplesPerFrame);
+ ALOGD("mChannelMask = %#x", mChannelMask);
ALOGD("mSharingMode = %6d", (int)mSharingMode);
ALOGD("mAudioFormat = %6d", (int)mAudioFormat);
ALOGD("mDirection = %6d", mDirection);
ALOGD("mBufferCapacity = %6d", mBufferCapacity);
ALOGD("mUsage = %6d", mUsage);
ALOGD("mContentType = %6d", mContentType);
+ ALOGD("mSpatializationBehavior = %6d", mSpatializationBehavior);
+ ALOGD("mIsContentSpatialized = %s", mIsContentSpatialized ? "true" : "false");
ALOGD("mInputPreset = %6d", mInputPreset);
ALOGD("mAllowedCapturePolicy = %6d", mAllowedCapturePolicy);
ALOGD("mIsPrivacySensitive = %s", mIsPrivacySensitive ? "true" : "false");
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 5737052..cb998bf 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -26,8 +26,8 @@
class AAudioStreamParameters {
public:
- AAudioStreamParameters();
- virtual ~AAudioStreamParameters();
+ AAudioStreamParameters() = default;
+ virtual ~AAudioStreamParameters() = default;
int32_t getDeviceId() const {
return mDeviceId;
@@ -49,13 +49,6 @@
return mSamplesPerFrame;
}
- /**
- * This is also known as channelCount.
- */
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
audio_format_t getFormat() const {
return mAudioFormat;
}
@@ -104,6 +97,22 @@
mContentType = contentType;
}
+ aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+ return mSpatializationBehavior;
+ }
+
+ void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+ mSpatializationBehavior = spatializationBehavior;
+ }
+
+ bool isContentSpatialized() const {
+ return mIsContentSpatialized;
+ }
+
+ void setIsContentSpatialized(bool isSpatialized) {
+ mIsContentSpatialized = isSpatialized;
+ }
+
aaudio_input_preset_t getInputPreset() const {
return mInputPreset;
}
@@ -141,7 +150,7 @@
}
// TODO b/182392769: reexamine if Identity can be used
- void setOpPackageName(const std::optional<std::string> opPackageName) {
+ void setOpPackageName(const std::optional<std::string>& opPackageName) {
mOpPackageName = opPackageName;
}
@@ -149,10 +158,19 @@
return mAttributionTag;
}
- void setAttributionTag(const std::optional<std::string> attributionTag) {
+ void setAttributionTag(const std::optional<std::string>& attributionTag) {
mAttributionTag = attributionTag;
}
+ aaudio_channel_mask_t getChannelMask() const {
+ return mChannelMask;
+ }
+
+ void setChannelMask(aaudio_channel_mask_t channelMask) {
+ mChannelMask = channelMask;
+ mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+ }
+
/**
* @return bytes per frame of getFormat()
*/
@@ -171,6 +189,8 @@
void dump() const;
private:
+ bool validateChannelMask() const;
+
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
@@ -179,6 +199,9 @@
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_spatialization_behavior_t mSpatializationBehavior
+ = AAUDIO_UNSPECIFIED;
+ bool mIsContentSpatialized = false;
aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_UNSPECIFIED;
@@ -186,6 +209,7 @@
bool mIsPrivacySensitive = false;
std::optional<std::string> mOpPackageName = {};
std::optional<std::string> mAttributionTag = {};
+ aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
index 1e88d15..6c22744 100644
--- a/media/libaaudio/src/core/AudioGlobal.h
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -31,7 +31,8 @@
const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode);
const char* AudioGlobal_convertSharingModeToText(aaudio_sharing_mode_t mode);
const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state);
-}
+
+} // namespace aaudio
#endif // AAUDIO_AUDIOGLOBAL_H
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 09d9535..73432af 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -76,6 +76,7 @@
// Copy parameters from the Builder because the Builder may be deleted after this call.
// TODO AudioStream should be a subclass of AudioStreamParameters
mSamplesPerFrame = builder.getSamplesPerFrame();
+ mChannelMask = builder.getChannelMask();
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
@@ -91,6 +92,12 @@
if (mContentType == AAUDIO_UNSPECIFIED) {
mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
}
+ mSpatializationBehavior = builder.getSpatializationBehavior();
+ // for consistency with other properties, note UNSPECIFIED is the same as AUTO
+ if (mSpatializationBehavior == AAUDIO_UNSPECIFIED) {
+ mSpatializationBehavior = AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO;
+ }
+ mIsContentSpatialized = builder.isContentSpatialized();
mInputPreset = builder.getInputPreset();
if (mInputPreset == AAUDIO_UNSPECIFIED) {
mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 9835c8c..afb8551 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -253,6 +253,14 @@
return mContentType;
}
+ aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+ return mSpatializationBehavior;
+ }
+
+ bool isContentSpatialized() const {
+ return mIsContentSpatialized;
+ }
+
aaudio_input_preset_t getInputPreset() const {
return mInputPreset;
}
@@ -270,7 +278,8 @@
}
/**
- * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+ * This is only valid after setChannelMask() and setFormat()
+ * have been called.
*/
int32_t getBytesPerFrame() const {
return mSamplesPerFrame * getBytesPerSample();
@@ -284,7 +293,7 @@
}
/**
- * This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
+ * This is only valid after setChannelMask() and setDeviceFormat() have been called.
*/
int32_t getBytesPerDeviceFrame() const {
return getSamplesPerFrame() * audio_bytes_per_sample(getDeviceFormat());
@@ -318,6 +327,15 @@
return mFramesPerDataCallback;
}
+ aaudio_channel_mask_t getChannelMask() const {
+ return mChannelMask;
+ }
+
+ void setChannelMask(aaudio_channel_mask_t channelMask) {
+ mChannelMask = channelMask;
+ mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+ }
+
/**
* @return true if data callback has been specified
*/
@@ -429,7 +447,7 @@
// PlayerBase allows the system to control the stream volume.
class MyPlayerBase : public android::PlayerBase {
public:
- MyPlayerBase() {};
+ MyPlayerBase() = default;
virtual ~MyPlayerBase() = default;
@@ -495,11 +513,6 @@
}
// This should not be called after the open() call.
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
- // This should not be called after the open() call.
void setFramesPerBurst(int32_t framesPerBurst) {
mFramesPerBurst = framesPerBurst;
}
@@ -563,7 +576,7 @@
* @param numFrames
* @return original pointer or the conversion buffer
*/
- virtual const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+ virtual const void * maybeConvertDeviceData(const void *audioData, int32_t /*numFrames*/) {
return audioData;
}
@@ -589,6 +602,14 @@
mContentType = contentType;
}
+ void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+ mSpatializationBehavior = spatializationBehavior;
+ }
+
+ void setIsContentSpatialized(bool isContentSpatialized) {
+ mIsContentSpatialized = isContentSpatialized;
+ }
+
/**
* This should not be called after the open() call.
*/
@@ -633,6 +654,7 @@
// These do not change after open().
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
@@ -645,6 +667,8 @@
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_spatialization_behavior_t mSpatializationBehavior = AAUDIO_UNSPECIFIED;
+ bool mIsContentSpatialized = false;
aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
bool mIsPrivacySensitive = false;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index e015592..2be3d65 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -20,9 +20,14 @@
#include <new>
#include <stdint.h>
+#include <vector>
#include <aaudio/AAudio.h>
#include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <media/AudioSystem.h>
#include "binding/AAudioBinderClient.h"
#include "client/AudioStreamInternalCapture.h"
@@ -35,6 +40,10 @@
using namespace aaudio;
+using android::media::audio::common::AudioMMapPolicy;
+using android::media::audio::common::AudioMMapPolicyInfo;
+using android::media::audio::common::AudioMMapPolicyType;
+
#define AAUDIO_MMAP_POLICY_DEFAULT AAUDIO_POLICY_NEVER
#define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT AAUDIO_POLICY_NEVER
@@ -53,16 +62,10 @@
/*
* AudioStreamBuilder
*/
-AudioStreamBuilder::AudioStreamBuilder() {
-}
-
-AudioStreamBuilder::~AudioStreamBuilder() {
-}
-
static aaudio_result_t builder_createStream(aaudio_direction_t direction,
- aaudio_sharing_mode_t sharingMode,
- bool tryMMap,
- android::sp<AudioStream> &stream) {
+ aaudio_sharing_mode_t /*sharingMode*/,
+ bool tryMMap,
+ android::sp<AudioStream> &stream) {
aaudio_result_t result = AAUDIO_OK;
switch (direction) {
@@ -92,6 +95,37 @@
return result;
}
+namespace {
+
+aaudio_policy_t aidl2legacy_aaudio_policy(AudioMMapPolicy aidl) {
+ switch (aidl) {
+ case AudioMMapPolicy::NEVER:
+ return AAUDIO_POLICY_NEVER;
+ case AudioMMapPolicy::AUTO:
+ return AAUDIO_POLICY_AUTO;
+ case AudioMMapPolicy::ALWAYS:
+ return AAUDIO_POLICY_ALWAYS;
+ case AudioMMapPolicy::UNSPECIFIED:
+ default:
+ return AAUDIO_UNSPECIFIED;
+ }
+}
+
+// The aaudio policy will be ALWAYS, NEVER, UNSPECIFIED only when all policy info are
+// ALWAYS, NEVER or UNSPECIFIED. Otherwise, the aaudio policy will be AUTO.
+aaudio_policy_t getAAudioPolicy(
+ const std::vector<AudioMMapPolicyInfo>& policyInfos) {
+ if (policyInfos.empty()) return AAUDIO_POLICY_AUTO;
+ for (size_t i = 1; i < policyInfos.size(); ++i) {
+ if (policyInfos.at(i).mmapPolicy != policyInfos.at(0).mmapPolicy) {
+ return AAUDIO_POLICY_AUTO;
+ }
+ }
+ return aidl2legacy_aaudio_policy(policyInfos.at(0).mmapPolicy);
+}
+
+} // namespace
+
// Try to open using MMAP path if that is allowed.
// Fall back to Legacy path if MMAP not available.
// Exact behavior is controlled by MMapPolicy.
@@ -110,25 +144,32 @@
return result;
}
+ std::vector<AudioMMapPolicyInfo> policyInfos;
// The API setting is the highest priority.
aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
// If not specified then get from a system property.
- if (mmapPolicy == AAUDIO_UNSPECIFIED) {
- mmapPolicy = AAudioProperty_getMMapPolicy();
+ if (mmapPolicy == AAUDIO_UNSPECIFIED && android::AudioSystem::getMmapPolicyInfo(
+ AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
+ mmapPolicy = getAAudioPolicy(policyInfos);
}
// If still not specified then use the default.
if (mmapPolicy == AAUDIO_UNSPECIFIED) {
mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
}
- int32_t mapExclusivePolicy = AAudioProperty_getMMapExclusivePolicy();
- if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
- mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
+ policyInfos.clear();
+ aaudio_policy_t mmapExclusivePolicy = AAUDIO_UNSPECIFIED;
+ if (android::AudioSystem::getMmapPolicyInfo(
+ AudioMMapPolicyType::EXCLUSIVE, &policyInfos) == NO_ERROR) {
+ mmapExclusivePolicy = getAAudioPolicy(policyInfos);
+ }
+ if (mmapExclusivePolicy == AAUDIO_UNSPECIFIED) {
+ mmapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
}
aaudio_sharing_mode_t sharingMode = getSharingMode();
if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
- && (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
+ && (mmapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
sharingMode = AAUDIO_SHARING_MODE_SHARED;
setSharingMode(sharingMode);
@@ -268,8 +309,8 @@
void AudioStreamBuilder::logParameters() const {
// This is very helpful for debugging in the future. Please leave it in.
- ALOGI("rate = %6d, channels = %d, format = %d, sharing = %s, dir = %s",
- getSampleRate(), getSamplesPerFrame(), getFormat(),
+ ALOGI("rate = %6d, channels = %d, channelMask = %#x, format = %d, sharing = %s, dir = %s",
+ getSampleRate(), getSamplesPerFrame(), getChannelMask(), getFormat(),
AAudio_convertSharingModeToShortText(getSharingMode()),
AAudio_convertDirectionToText(getDirection()));
ALOGI("device = %6d, sessionId = %d, perfMode = %d, callback: %s with frames = %d",
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 9f93341..f91c25a 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -31,9 +31,9 @@
*/
class AudioStreamBuilder : public AAudioStreamParameters {
public:
- AudioStreamBuilder();
+ AudioStreamBuilder() = default;
- ~AudioStreamBuilder();
+ ~AudioStreamBuilder() = default;
bool isSharingModeMatchRequired() const {
return mSharingModeMatchRequired;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 37548f0..7b0aca1 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -38,7 +38,7 @@
class FifoBuffer {
public:
- FifoBuffer(int32_t bytesPerFrame);
+ explicit FifoBuffer(int32_t bytesPerFrame);
virtual ~FifoBuffer() = default;
@@ -162,6 +162,6 @@
uint8_t *mExternalStorage = nullptr;
};
-} // android
+} // namespace android
#endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
index 057a94e..e15d444 100644
--- a/media/libaaudio/src/fifo/FifoController.h
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -36,7 +36,7 @@
, mWriteCounter(0)
{}
- virtual ~FifoController() {}
+ virtual ~FifoController() = default;
// TODO review use of memory barriers, probably incorrect
virtual fifo_counter_t getReadCounter() override {
@@ -57,6 +57,6 @@
std::atomic<fifo_counter_t> mWriteCounter;
};
-} // android
+} // namespace android
#endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index 1dece0e..ad6d041 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -29,9 +29,6 @@
{
}
-FifoControllerBase::~FifoControllerBase() {
-}
-
fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
fifo_frames_t temp = 0;
__builtin_sub_overflow(getWriteCounter(), getReadCounter(), &temp);
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
index 1edb8a3..2a6173b 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.h
+++ b/media/libaaudio/src/fifo/FifoControllerBase.h
@@ -43,7 +43,7 @@
*/
FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
- virtual ~FifoControllerBase();
+ virtual ~FifoControllerBase() = default;
// Abstract methods to be implemented in subclasses.
/**
@@ -123,6 +123,6 @@
fifo_frames_t mThreshold;
};
-} // android
+} // namespace android
#endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
index ec48e57..a59225a 100644
--- a/media/libaaudio/src/fifo/FifoControllerIndirect.h
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -44,7 +44,7 @@
setReadCounter(0);
setWriteCounter(0);
}
- virtual ~FifoControllerIndirect() {};
+ virtual ~FifoControllerIndirect() = default;
// TODO review use of memory barriers, probably incorrect
virtual fifo_counter_t getReadCounter() override {
@@ -68,6 +68,6 @@
std::atomic<fifo_counter_t> * mWriteCounterAddress;
};
-} // android
+} // namespace android
#endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp b/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
deleted file mode 100644
index 5667fdb..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <sys/types.h>
-#include "AudioProcessorBase.h"
-
-using namespace flowgraph;
-
-/***************************************************************************/
-int32_t AudioProcessorBase::pullData(int64_t framePosition, int32_t numFrames) {
- if (framePosition > mLastFramePosition) {
- mLastFramePosition = framePosition;
- mFramesValid = onProcess(framePosition, numFrames);
- }
- return mFramesValid;
-}
-
-/***************************************************************************/
-AudioFloatBlockPort::AudioFloatBlockPort(AudioProcessorBase &parent,
- int32_t samplesPerFrame,
- int32_t framesPerBlock)
- : AudioPort(parent, samplesPerFrame)
- , mFramesPerBlock(framesPerBlock)
- , mSampleBlock(NULL) {
- int32_t numFloats = framesPerBlock * getSamplesPerFrame();
- mSampleBlock = new float[numFloats]{0.0f};
-}
-
-AudioFloatBlockPort::~AudioFloatBlockPort() {
- delete[] mSampleBlock;
-}
-
-/***************************************************************************/
-int32_t AudioFloatOutputPort::pullData(int64_t framePosition, int32_t numFrames) {
- numFrames = std::min(getFramesPerBlock(), numFrames);
- return mParent.pullData(framePosition, numFrames);
-}
-
-// These need to be in the .cpp file because of forward cross references.
-void AudioFloatOutputPort::connect(AudioFloatInputPort *port) {
- port->connect(this);
-}
-
-void AudioFloatOutputPort::disconnect(AudioFloatInputPort *port) {
- port->disconnect(this);
-}
-
-/***************************************************************************/
-int32_t AudioFloatInputPort::pullData(int64_t framePosition, int32_t numFrames) {
- return (mConnected == NULL)
- ? std::min(getFramesPerBlock(), numFrames)
- : mConnected->pullData(framePosition, numFrames);
-}
-
-float *AudioFloatInputPort::getBlock() {
- if (mConnected == NULL) {
- return AudioFloatBlockPort::getBlock(); // loaded using setValue()
- } else {
- return mConnected->getBlock();
- }
-}
-
-/***************************************************************************/
-int32_t AudioSink::pull(int32_t numFrames) {
- int32_t actualFrames = input.pullData(mFramePosition, numFrames);
- mFramePosition += actualFrames;
- return actualFrames;
-}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.h b/media/libaaudio/src/flowgraph/AudioProcessorBase.h
deleted file mode 100644
index 972932f..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * AudioProcessorBase.h
- *
- * Audio processing node and ports that can be used in a simple data flow graph.
- */
-
-#ifndef FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-#define FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-
-#include <cassert>
-#include <cstring>
-#include <math.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-// TODO consider publishing all header files under "include/libaaudio/FlowGraph.h"
-
-namespace flowgraph {
-
-// Default block size that can be overridden when the AudioFloatBlockPort is created.
-// If it is too small then we will have too much overhead from switching between nodes.
-// If it is too high then we will thrash the caches.
-constexpr int kDefaultBlockSize = 8; // arbitrary
-
-class AudioFloatInputPort;
-
-/***************************************************************************/
-class AudioProcessorBase {
-public:
- virtual ~AudioProcessorBase() = default;
-
- /**
- * Perform custom function.
- *
- * @param framePosition index of first frame to be processed
- * @param numFrames maximum number of frames requested for processing
- * @return number of frames actually processed
- */
- virtual int32_t onProcess(int64_t framePosition, int32_t numFrames) = 0;
-
- /**
- * If the framePosition is at or after the last frame position then call onProcess().
- * This prevents infinite recursion in case of cyclic graphs.
- * It also prevents nodes upstream from a branch from being executed twice.
- *
- * @param framePosition
- * @param numFrames
- * @return
- */
- int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-protected:
- int64_t mLastFramePosition = -1; // Start at -1 so that the first pull works.
-
-private:
- int32_t mFramesValid = 0; // num valid frames in the block
-};
-
-/***************************************************************************/
-/**
- * This is a connector that allows data to flow between modules.
- */
-class AudioPort {
-public:
- AudioPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
- : mParent(parent)
- , mSamplesPerFrame(samplesPerFrame) {
- }
-
- // Ports are often declared public. So let's make them non-copyable.
- AudioPort(const AudioPort&) = delete;
- AudioPort& operator=(const AudioPort&) = delete;
-
- int32_t getSamplesPerFrame() const {
- return mSamplesPerFrame;
- }
-
-protected:
- AudioProcessorBase &mParent;
-
-private:
- const int32_t mSamplesPerFrame = 1;
-};
-
-/***************************************************************************/
-/**
- * This port contains a float type buffer.
- * The size is framesPerBlock * samplesPerFrame).
- */
-class AudioFloatBlockPort : public AudioPort {
-public:
- AudioFloatBlockPort(AudioProcessorBase &mParent,
- int32_t samplesPerFrame,
- int32_t framesPerBlock = kDefaultBlockSize
- );
-
- virtual ~AudioFloatBlockPort();
-
- int32_t getFramesPerBlock() const {
- return mFramesPerBlock;
- }
-
-protected:
-
- /**
- * @return buffer internal to the port or from a connected port
- */
- virtual float *getBlock() {
- return mSampleBlock;
- }
-
-
-private:
- const int32_t mFramesPerBlock = 1;
- float *mSampleBlock = nullptr; // allocated in constructor
-};
-
-/***************************************************************************/
-/**
- * The results of a module are stored in the buffer of the output ports.
- */
-class AudioFloatOutputPort : public AudioFloatBlockPort {
-public:
- AudioFloatOutputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
- : AudioFloatBlockPort(parent, samplesPerFrame) {
- }
-
- virtual ~AudioFloatOutputPort() = default;
-
- using AudioFloatBlockPort::getBlock;
-
- /**
- * Call the parent module's onProcess() method.
- * That may pull data from its inputs and recursively
- * process the entire graph.
- * @return number of frames actually pulled
- */
- int32_t pullData(int64_t framePosition, int32_t numFrames);
-
- /**
- * Connect to the input of another module.
- * An input port can only have one connection.
- * An output port can have multiple connections.
- * If you connect a second output port to an input port
- * then it overwrites the previous connection.
- *
- * This not thread safe. Do not modify the graph topology form another thread while running.
- */
- void connect(AudioFloatInputPort *port);
-
- /**
- * Disconnect from the input of another module.
- * This not thread safe.
- */
- void disconnect(AudioFloatInputPort *port);
-};
-
-/***************************************************************************/
-class AudioFloatInputPort : public AudioFloatBlockPort {
-public:
- AudioFloatInputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
- : AudioFloatBlockPort(parent, samplesPerFrame) {
- }
-
- virtual ~AudioFloatInputPort() = default;
-
- /**
- * If connected to an output port then this will return
- * that output ports buffers.
- * If not connected then it returns the input ports own buffer
- * which can be loaded using setValue().
- */
- float *getBlock() override;
-
- /**
- * Pull data from any output port that is connected.
- */
- int32_t pullData(int64_t framePosition, int32_t numFrames);
-
- /**
- * Write every value of the float buffer.
- * This value will be ignored if an output port is connected
- * to this port.
- */
- void setValue(float value) {
- int numFloats = kDefaultBlockSize * getSamplesPerFrame();
- float *buffer = getBlock();
- for (int i = 0; i < numFloats; i++) {
- *buffer++ = value;
- }
- }
-
- /**
- * Connect to the output of another module.
- * An input port can only have one connection.
- * An output port can have multiple connections.
- * This not thread safe.
- */
- void connect(AudioFloatOutputPort *port) {
- assert(getSamplesPerFrame() == port->getSamplesPerFrame());
- mConnected = port;
- }
-
- void disconnect(AudioFloatOutputPort *port) {
- assert(mConnected == port);
- (void) port;
- mConnected = nullptr;
- }
-
- void disconnect() {
- mConnected = nullptr;
- }
-
-private:
- AudioFloatOutputPort *mConnected = nullptr;
-};
-
-/***************************************************************************/
-class AudioSource : public AudioProcessorBase {
-public:
- explicit AudioSource(int32_t channelCount)
- : output(*this, channelCount) {
- }
-
- virtual ~AudioSource() = default;
-
- AudioFloatOutputPort output;
-
- void setData(const void *data, int32_t numFrames) {
- mData = data;
- mSizeInFrames = numFrames;
- mFrameIndex = 0;
- }
-
-protected:
- const void *mData = nullptr;
- int32_t mSizeInFrames = 0; // number of frames in mData
- int32_t mFrameIndex = 0; // index of next frame to be processed
-};
-
-/***************************************************************************/
-class AudioSink : public AudioProcessorBase {
-public:
- explicit AudioSink(int32_t channelCount)
- : input(*this, channelCount) {
- }
-
- virtual ~AudioSink() = default;
-
- AudioFloatInputPort input;
-
- /**
- * Do nothing. The work happens in the read() method.
- *
- * @param framePosition index of first frame to be processed
- * @param numFrames
- * @return number of frames actually processed
- */
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override {
- (void) framePosition;
- (void) numFrames;
- return 0;
- };
-
- virtual int32_t read(void *data, int32_t numFrames) = 0;
-
-protected:
- int32_t pull(int32_t numFrames);
-
-private:
- int64_t mFramePosition = 0;
-};
-
-} /* namespace flowgraph */
-
-#endif /* FLOWGRAPH_AUDIO_PROCESSOR_BASE_H */
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
new file mode 100644
index 0000000..351def2
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "ChannelCountConverter.h"
+
+using namespace flowgraph;
+
+ChannelCountConverter::ChannelCountConverter(
+ int32_t inputChannelCount,
+ int32_t outputChannelCount)
+ : input(*this, inputChannelCount)
+ , output(*this, outputChannelCount) {
+}
+
+ChannelCountConverter::~ChannelCountConverter() = default;
+
+int32_t ChannelCountConverter::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
+ int32_t inputChannelCount = input.getSamplesPerFrame();
+ int32_t outputChannelCount = output.getSamplesPerFrame();
+ for (int i = 0; i < numFrames; i++) {
+ int inputChannel = 0;
+ for (int outputChannel = 0; outputChannel < outputChannelCount; outputChannel++) {
+ // Copy input channels to output channels.
+ // Wrap if we run out of inputs.
+ // Discard if we run out of outputs.
+ outputBuffer[outputChannel] = inputBuffer[inputChannel];
+ inputChannel = (inputChannel == inputChannelCount)
+ ? 0 : inputChannel + 1;
+ }
+ inputBuffer += inputChannelCount;
+ outputBuffer += outputChannelCount;
+ }
+ return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.h b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
new file mode 100644
index 0000000..e4b6f4e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+#define FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Change the number of number of channels without mixing.
+ * When increasing the channel count, duplicate input channels.
+ * When decreasing the channel count, drop input channels.
+ */
+ class ChannelCountConverter : public FlowGraphNode {
+ public:
+ explicit ChannelCountConverter(
+ int32_t inputChannelCount,
+ int32_t outputChannelCount);
+
+ virtual ~ChannelCountConverter();
+
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "ChannelCountConverter";
+ }
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
+ };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.cpp b/media/libaaudio/src/flowgraph/ClipToRange.cpp
index bd9c22a..d2f8a02 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.cpp
+++ b/media/libaaudio/src/flowgraph/ClipToRange.cpp
@@ -16,25 +16,23 @@
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "ClipToRange.h"
using namespace flowgraph;
ClipToRange::ClipToRange(int32_t channelCount)
- : input(*this, channelCount)
- , output(*this, channelCount) {
+ : FlowGraphFilter(channelCount) {
}
-int32_t ClipToRange::onProcess(int64_t framePosition, int32_t numFrames) {
- int32_t framesToProcess = input.pullData(framePosition, numFrames);
- const float *inputBuffer = input.getBlock();
- float *outputBuffer = output.getBlock();
+int32_t ClipToRange::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
- int32_t numSamples = framesToProcess * output.getSamplesPerFrame();
+ int32_t numSamples = numFrames * output.getSamplesPerFrame();
for (int32_t i = 0; i < numSamples; i++) {
*outputBuffer++ = std::min(mMaximum, std::max(mMinimum, *inputBuffer++));
}
- return framesToProcess;
+ return numFrames;
}
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.h b/media/libaaudio/src/flowgraph/ClipToRange.h
index 9eef254..22b7804 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.h
+++ b/media/libaaudio/src/flowgraph/ClipToRange.h
@@ -21,7 +21,7 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
@@ -30,13 +30,13 @@
constexpr float kDefaultMaxHeadroom = 1.41253754f;
constexpr float kDefaultMinHeadroom = -kDefaultMaxHeadroom;
-class ClipToRange : public AudioProcessorBase {
+class ClipToRange : public FlowGraphFilter {
public:
explicit ClipToRange(int32_t channelCount);
virtual ~ClipToRange() = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
void setMinimum(float min) {
mMinimum = min;
@@ -54,8 +54,9 @@
return mMaximum;
}
- AudioFloatInputPort input;
- AudioFloatOutputPort output;
+ const char *getName() override {
+ return "ClipToRange";
+ }
private:
float mMinimum = kDefaultMinHeadroom;
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.cpp b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
new file mode 100644
index 0000000..4c76e77
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stdio.h"
+#include <algorithm>
+#include <sys/types.h>
+#include "FlowGraphNode.h"
+
+using namespace flowgraph;
+
+/***************************************************************************/
+int32_t FlowGraphNode::pullData(int32_t numFrames, int64_t callCount) {
+ int32_t frameCount = numFrames;
+ // Prevent recursion and multiple execution of nodes.
+ if (callCount > mLastCallCount) {
+ mLastCallCount = callCount;
+ if (mDataPulledAutomatically) {
+ // Pull from all the upstream nodes.
+ for (auto &port : mInputPorts) {
+ // TODO fix bug of leaving unused data in some ports if using multiple AudioSource
+ frameCount = port.get().pullData(callCount, frameCount);
+ }
+ }
+ if (frameCount > 0) {
+ frameCount = onProcess(frameCount);
+ }
+ mLastFrameCount = frameCount;
+ } else {
+ frameCount = mLastFrameCount;
+ }
+ return frameCount;
+}
+
+void FlowGraphNode::pullReset() {
+ if (!mBlockRecursion) {
+ mBlockRecursion = true; // for cyclic graphs
+ // Pull reset from all the upstream nodes.
+ for (auto &port : mInputPorts) {
+ port.get().pullReset();
+ }
+ mBlockRecursion = false;
+ reset();
+ }
+}
+
+void FlowGraphNode::reset() {
+ mLastFrameCount = 0;
+ mLastCallCount = kInitialCallCount;
+}
+
+/***************************************************************************/
+FlowGraphPortFloat::FlowGraphPortFloat(FlowGraphNode &parent,
+ int32_t samplesPerFrame,
+ int32_t framesPerBuffer)
+ : FlowGraphPort(parent, samplesPerFrame)
+ , mFramesPerBuffer(framesPerBuffer)
+ , mBuffer(nullptr) {
+ size_t numFloats = framesPerBuffer * getSamplesPerFrame();
+ mBuffer = std::make_unique<float[]>(numFloats);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatOutput::pullData(int64_t callCount, int32_t numFrames) {
+ numFrames = std::min(getFramesPerBuffer(), numFrames);
+ return mContainingNode.pullData(numFrames, callCount);
+}
+
+void FlowGraphPortFloatOutput::pullReset() {
+ mContainingNode.pullReset();
+}
+
+// These need to be in the .cpp file because of forward cross references.
+void FlowGraphPortFloatOutput::connect(FlowGraphPortFloatInput *port) {
+ port->connect(this);
+}
+
+void FlowGraphPortFloatOutput::disconnect(FlowGraphPortFloatInput *port) {
+ port->disconnect(this);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatInput::pullData(int64_t callCount, int32_t numFrames) {
+ return (mConnected == nullptr)
+ ? std::min(getFramesPerBuffer(), numFrames)
+ : mConnected->pullData(callCount, numFrames);
+}
+void FlowGraphPortFloatInput::pullReset() {
+ if (mConnected != nullptr) mConnected->pullReset();
+}
+
+float *FlowGraphPortFloatInput::getBuffer() {
+ if (mConnected == nullptr) {
+ return FlowGraphPortFloat::getBuffer(); // loaded using setValue()
+ } else {
+ return mConnected->getBuffer();
+ }
+}
+
+int32_t FlowGraphSink::pullData(int32_t numFrames) {
+ return FlowGraphNode::pullData(numFrames, getLastCallCount() + 1);
+}
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.h b/media/libaaudio/src/flowgraph/FlowGraphNode.h
new file mode 100644
index 0000000..69c83dd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * FlowGraph.h
+ *
+ * Processing node and ports that can be used in a simple data flow graph.
+ * This was designed to work with audio but could be used for other
+ * types of data.
+ */
+
+#ifndef FLOWGRAPH_FLOW_GRAPH_NODE_H
+#define FLOWGRAPH_FLOW_GRAPH_NODE_H
+
+#include <cassert>
+#include <cstring>
+#include <math.h>
+#include <memory>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <vector>
+
+// TODO Move these classes into separate files.
+// TODO Review use of raw pointers for connect(). Maybe use smart pointers but need to avoid
+// run-time deallocation in audio thread.
+
+// Set this to 1 if using it inside the Android framework.
+// This code is kept here so that it can be moved easily between Oboe and AAudio.
+#ifndef FLOWGRAPH_ANDROID_INTERNAL
+#define FLOWGRAPH_ANDROID_INTERNAL 0
+#endif
+
+namespace flowgraph {
+
+// Default block size that can be overridden when the FlowGraphPortFloat is created.
+// If it is too small then we will have too much overhead from switching between nodes.
+// If it is too high then we will thrash the caches.
+constexpr int kDefaultBufferSize = 8; // arbitrary
+
+class FlowGraphPort;
+class FlowGraphPortFloatInput;
+
+/***************************************************************************/
+/**
+ * Base class for all nodes in the flowgraph.
+ */
+class FlowGraphNode {
+public:
+ FlowGraphNode() = default;
+ virtual ~FlowGraphNode() = default;
+
+ /**
+ * Read from the input ports,
+ * generate multiple frames of data then write the results to the output ports.
+ *
+ * @param numFrames maximum number of frames requested for processing
+ * @return number of frames actually processed
+ */
+ virtual int32_t onProcess(int32_t numFrames) = 0;
+
+ /**
+ * If the callCount is at or after the previous callCount then call
+ * pullData on all of the upstreamNodes.
+ * Then call onProcess().
+ * This prevents infinite recursion in case of cyclic graphs.
+ * It also prevents nodes upstream from a branch from being executed twice.
+ *
+ * @param callCount
+ * @param numFrames
+ * @return number of frames valid
+ */
+ int32_t pullData(int32_t numFrames, int64_t callCount);
+
+ /**
+ * Recursively reset all the nodes in the graph, starting from a Sink.
+ *
+ * This must not be called at the same time as pullData!
+ */
+ void pullReset();
+
+ /**
+ * Reset framePosition counters.
+ */
+ virtual void reset();
+
+ void addInputPort(FlowGraphPort &port) {
+ mInputPorts.emplace_back(port);
+ }
+
+ bool isDataPulledAutomatically() const {
+ return mDataPulledAutomatically;
+ }
+
+ /**
+ * Set true if you want the data pulled through the graph automatically.
+ * This is the default.
+ *
+ * Set false if you want to pull the data from the input ports in the onProcess() method.
+ * You might do this, for example, in a sample rate converting node.
+ *
+ * @param automatic
+ */
+ void setDataPulledAutomatically(bool automatic) {
+ mDataPulledAutomatically = automatic;
+ }
+
+ virtual const char *getName() {
+ return "FlowGraph";
+ }
+
+ int64_t getLastCallCount() {
+ return mLastCallCount;
+ }
+
+protected:
+
+ static constexpr int64_t kInitialCallCount = -1;
+ int64_t mLastCallCount = kInitialCallCount;
+
+ std::vector<std::reference_wrapper<FlowGraphPort>> mInputPorts;
+
+private:
+ bool mDataPulledAutomatically = true;
+ bool mBlockRecursion = false;
+ int32_t mLastFrameCount = 0;
+
+};
+
+/***************************************************************************/
+/**
+ * This is a connector that allows data to flow between modules.
+ *
+ * The ports are the primary means of interacting with a module.
+ * So they are generally declared as public.
+ *
+ */
+class FlowGraphPort {
+public:
+ FlowGraphPort(FlowGraphNode &parent, int32_t samplesPerFrame)
+ : mContainingNode(parent)
+ , mSamplesPerFrame(samplesPerFrame) {
+ }
+
+ virtual ~FlowGraphPort() = default;
+
+ // Ports are often declared public. So let's make them non-copyable.
+ FlowGraphPort(const FlowGraphPort&) = delete;
+ FlowGraphPort& operator=(const FlowGraphPort&) = delete;
+
+ int32_t getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ virtual int32_t pullData(int64_t framePosition, int32_t numFrames) = 0;
+
+ virtual void pullReset() {}
+
+protected:
+ FlowGraphNode &mContainingNode;
+
+private:
+ const int32_t mSamplesPerFrame = 1;
+};
+
+/***************************************************************************/
+/**
+ * This port contains a 32-bit float buffer that can contain several frames of data.
+ * Processing the data in a block improves performance.
+ *
+ * The size is framesPerBuffer * samplesPerFrame).
+ */
+class FlowGraphPortFloat : public FlowGraphPort {
+public:
+ FlowGraphPortFloat(FlowGraphNode &parent,
+ int32_t samplesPerFrame,
+ int32_t framesPerBuffer = kDefaultBufferSize
+ );
+
+ virtual ~FlowGraphPortFloat() = default;
+
+ int32_t getFramesPerBuffer() const {
+ return mFramesPerBuffer;
+ }
+
+protected:
+
+ /**
+ * @return buffer internal to the port or from a connected port
+ */
+ virtual float *getBuffer() {
+ return mBuffer.get();
+ }
+
+private:
+ const int32_t mFramesPerBuffer = 1;
+ std::unique_ptr<float[]> mBuffer; // allocated in constructor
+};
+
+/***************************************************************************/
+/**
+ * The results of a node's processing are stored in the buffers of the output ports.
+ */
+class FlowGraphPortFloatOutput : public FlowGraphPortFloat {
+public:
+ FlowGraphPortFloatOutput(FlowGraphNode &parent, int32_t samplesPerFrame)
+ : FlowGraphPortFloat(parent, samplesPerFrame) {
+ }
+
+ virtual ~FlowGraphPortFloatOutput() = default;
+
+ using FlowGraphPortFloat::getBuffer;
+
+ /**
+ * Connect to the input of another module.
+ * An input port can only have one connection.
+ * An output port can have multiple connections.
+ * If you connect a second output port to an input port
+ * then it overwrites the previous connection.
+ *
+ * This not thread safe. Do not modify the graph topology from another thread while running.
+ * Also do not delete a module while it is connected to another port if the graph is running.
+ */
+ void connect(FlowGraphPortFloatInput *port);
+
+ /**
+ * Disconnect from the input of another module.
+ * This not thread safe.
+ */
+ void disconnect(FlowGraphPortFloatInput *port);
+
+ /**
+ * Call the parent module's onProcess() method.
+ * That may pull data from its inputs and recursively
+ * process the entire graph.
+ * @return number of frames actually pulled
+ */
+ int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+
+ void pullReset() override;
+
+};
+
+/***************************************************************************/
+
+/**
+ * An input port for streaming audio data.
+ * You can set a value that will be used for processing.
+ * If you connect an output port to this port then its value will be used instead.
+ */
+class FlowGraphPortFloatInput : public FlowGraphPortFloat {
+public:
+ FlowGraphPortFloatInput(FlowGraphNode &parent, int32_t samplesPerFrame)
+ : FlowGraphPortFloat(parent, samplesPerFrame) {
+ // Add to parent so it can pull data from each input.
+ parent.addInputPort(*this);
+ }
+
+ virtual ~FlowGraphPortFloatInput() = default;
+
+ /**
+ * If connected to an output port then this will return
+ * that output ports buffers.
+ * If not connected then it returns the input ports own buffer
+ * which can be loaded using setValue().
+ */
+ float *getBuffer() override;
+
+ /**
+ * Write every value of the float buffer.
+ * This value will be ignored if an output port is connected
+ * to this port.
+ */
+ void setValue(float value) {
+ int numFloats = kDefaultBufferSize * getSamplesPerFrame();
+ float *buffer = getBuffer();
+ for (int i = 0; i < numFloats; i++) {
+ *buffer++ = value;
+ }
+ }
+
+ /**
+ * Connect to the output of another module.
+ * An input port can only have one connection.
+ * An output port can have multiple connections.
+ * This not thread safe.
+ */
+ void connect(FlowGraphPortFloatOutput *port) {
+ assert(getSamplesPerFrame() == port->getSamplesPerFrame());
+ mConnected = port;
+ }
+
+ void disconnect(FlowGraphPortFloatOutput *port) {
+ assert(mConnected == port);
+ (void) port;
+ mConnected = nullptr;
+ }
+
+ void disconnect() {
+ mConnected = nullptr;
+ }
+
+ /**
+ * Pull data from any output port that is connected.
+ */
+ int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+ void pullReset() override;
+
+private:
+ FlowGraphPortFloatOutput *mConnected = nullptr;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSource : public FlowGraphNode {
+public:
+ explicit FlowGraphSource(int32_t channelCount)
+ : output(*this, channelCount) {
+ }
+
+ virtual ~FlowGraphSource() = default;
+
+ FlowGraphPortFloatOutput output;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSourceBuffered : public FlowGraphSource {
+public:
+ explicit FlowGraphSourceBuffered(int32_t channelCount)
+ : FlowGraphSource(channelCount) {}
+
+ virtual ~FlowGraphSourceBuffered() = default;
+
+ /**
+ * Specify buffer that the node will read from.
+ *
+ * @param data TODO Consider using std::shared_ptr.
+ * @param numFrames
+ */
+ void setData(const void *data, int32_t numFrames) {
+ mData = data;
+ mSizeInFrames = numFrames;
+ mFrameIndex = 0;
+ }
+
+protected:
+ const void *mData = nullptr;
+ int32_t mSizeInFrames = 0; // number of frames in mData
+ int32_t mFrameIndex = 0; // index of next frame to be processed
+};
+
+/***************************************************************************/
+/**
+ * Base class for an edge node in a graph that has no downstream nodes.
+ * It consumes data but does not output data.
+ * This graph will be executed when data is read() from this node
+ * by pulling data from upstream nodes.
+ */
+class FlowGraphSink : public FlowGraphNode {
+public:
+ explicit FlowGraphSink(int32_t channelCount)
+ : input(*this, channelCount) {
+ }
+
+ virtual ~FlowGraphSink() = default;
+
+ FlowGraphPortFloatInput input;
+
+ /**
+ * Do nothing. The work happens in the read() method.
+ *
+ * @param numFrames
+ * @return number of frames actually processed
+ */
+ int32_t onProcess(int32_t numFrames) override {
+ return numFrames;
+ }
+
+ virtual int32_t read(void *data, int32_t numFrames) = 0;
+
+protected:
+ /**
+ * Pull data through the graph using this nodes last callCount.
+ * @param numFrames
+ * @return
+ */
+ int32_t pullData(int32_t numFrames);
+};
+
+/***************************************************************************/
+/**
+ * Base class for a node that has an input and an output with the same number of channels.
+ * This may include traditional filters, eg. FIR, but also include
+ * any processing node that converts input to output.
+ */
+class FlowGraphFilter : public FlowGraphNode {
+public:
+ explicit FlowGraphFilter(int32_t channelCount)
+ : input(*this, channelCount)
+ , output(*this, channelCount) {
+ }
+
+ virtual ~FlowGraphFilter() = default;
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
+};
+
+} /* namespace flowgraph */
+
+#endif /* FLOWGRAPH_FLOW_GRAPH_NODE_H */
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
new file mode 100644
index 0000000..879685e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "ManyToMultiConverter.h"
+
+using namespace flowgraph;
+
+ManyToMultiConverter::ManyToMultiConverter(int32_t channelCount)
+ : inputs(channelCount)
+ , output(*this, channelCount) {
+ for (int i = 0; i < channelCount; i++) {
+ inputs[i] = std::make_unique<FlowGraphPortFloatInput>(*this, 1);
+ }
+}
+
+int32_t ManyToMultiConverter::onProcess(int32_t numFrames) {
+ int32_t channelCount = output.getSamplesPerFrame();
+
+ for (int ch = 0; ch < channelCount; ch++) {
+ const float *inputBuffer = inputs[ch]->getBuffer();
+ float *outputBuffer = output.getBuffer() + ch;
+
+ for (int i = 0; i < numFrames; i++) {
+ // read one, write into the proper interleaved output channel
+ float sample = *inputBuffer++;
+ *outputBuffer = sample;
+ outputBuffer += channelCount; // advance to next multichannel frame
+ }
+ }
+ return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.h b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
new file mode 100644
index 0000000..c7460ff
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+#define FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine multiple mono inputs into one interleaved multi-channel output.
+ */
+class ManyToMultiConverter : public flowgraph::FlowGraphNode {
+public:
+ explicit ManyToMultiConverter(int32_t channelCount);
+
+ virtual ~ManyToMultiConverter() = default;
+
+ int32_t onProcess(int numFrames) override;
+
+ void setEnabled(bool /*enabled*/) {}
+
+ std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatInput>> inputs;
+ flowgraph::FlowGraphPortFloatOutput output;
+
+ const char *getName() override {
+ return "ManyToMultiConverter";
+ }
+
+private:
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
index 78aad52..c8d60b9 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
@@ -14,34 +14,28 @@
* limitations under the License.
*/
-
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "MonoToMultiConverter.h"
using namespace flowgraph;
-MonoToMultiConverter::MonoToMultiConverter(int32_t channelCount)
+MonoToMultiConverter::MonoToMultiConverter(int32_t outputChannelCount)
: input(*this, 1)
- , output(*this, channelCount) {
+ , output(*this, outputChannelCount) {
}
-MonoToMultiConverter::~MonoToMultiConverter() { }
-
-int32_t MonoToMultiConverter::onProcess(int64_t framePosition, int32_t numFrames) {
- int32_t framesToProcess = input.pullData(framePosition, numFrames);
-
- const float *inputBuffer = input.getBlock();
- float *outputBuffer = output.getBlock();
+int32_t MonoToMultiConverter::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
- // TODO maybe move to audio_util as audio_mono_to_multi()
- for (int i = 0; i < framesToProcess; i++) {
+ for (int i = 0; i < numFrames; i++) {
// read one, write many
float sample = *inputBuffer++;
for (int channel = 0; channel < channelCount; channel++) {
*outputBuffer++ = sample;
}
}
- return framesToProcess;
+ return numFrames;
}
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
index 34d53c7..6e87ccb 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
@@ -14,27 +14,34 @@
* limitations under the License.
*/
-
#ifndef FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
#define FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class MonoToMultiConverter : public AudioProcessorBase {
+/**
+ * Convert a monophonic stream to a multi-channel interleaved stream
+ * with the same signal on each channel.
+ */
+class MonoToMultiConverter : public FlowGraphNode {
public:
- explicit MonoToMultiConverter(int32_t channelCount);
+ explicit MonoToMultiConverter(int32_t outputChannelCount);
- virtual ~MonoToMultiConverter();
+ virtual ~MonoToMultiConverter() = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
- AudioFloatInputPort input;
- AudioFloatOutputPort output;
+ const char *getName() override {
+ return "MonoToMultiConverter";
+ }
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
new file mode 100644
index 0000000..c745108
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToMonoConverter.h"
+
+using namespace flowgraph;
+
+MultiToMonoConverter::MultiToMonoConverter(int32_t inputChannelCount)
+ : input(*this, inputChannelCount)
+ , output(*this, 1) {
+}
+
+MultiToMonoConverter::~MultiToMonoConverter() = default;
+
+int32_t MultiToMonoConverter::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
+ int32_t channelCount = input.getSamplesPerFrame();
+ for (int i = 0; i < numFrames; i++) {
+ // read first channel of multi stream, write many
+ *outputBuffer++ = *inputBuffer;
+ inputBuffer += channelCount;
+ }
+ return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.h b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
new file mode 100644
index 0000000..37c53bd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to a monophonic stream
+ * by extracting channel[0].
+ */
+ class MultiToMonoConverter : public FlowGraphNode {
+ public:
+ explicit MultiToMonoConverter(int32_t inputChannelCount);
+
+ virtual ~MultiToMonoConverter();
+
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "MultiToMonoConverter";
+ }
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
+ };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/RampLinear.cpp b/media/libaaudio/src/flowgraph/RampLinear.cpp
index a260828..905ae07 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.cpp
+++ b/media/libaaudio/src/flowgraph/RampLinear.cpp
@@ -14,20 +14,15 @@
* limitations under the License.
*/
-#define LOG_TAG "RampLinear"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "RampLinear.h"
using namespace flowgraph;
RampLinear::RampLinear(int32_t channelCount)
- : input(*this, channelCount)
- , output(*this, channelCount) {
+ : FlowGraphFilter(channelCount) {
mTarget.store(1.0f);
}
@@ -37,16 +32,19 @@
void RampLinear::setTarget(float target) {
mTarget.store(target);
+ // If the ramp has not been used then start immediately at this level.
+ if (mLastCallCount == kInitialCallCount) {
+ forceCurrent(target);
+ }
}
float RampLinear::interpolateCurrent() {
return mLevelTo - (mRemaining * mScaler);
}
-int32_t RampLinear::onProcess(int64_t framePosition, int32_t numFrames) {
- int32_t framesToProcess = input.pullData(framePosition, numFrames);
- const float *inputBuffer = input.getBlock();
- float *outputBuffer = output.getBlock();
+int32_t RampLinear::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
float target = getTarget();
@@ -55,12 +53,10 @@
mLevelFrom = interpolateCurrent();
mLevelTo = target;
mRemaining = mLengthInFrames;
- ALOGV("%s() mLevelFrom = %f, mLevelTo = %f, mRemaining = %d, mScaler = %f",
- __func__, mLevelFrom, mLevelTo, mRemaining, mScaler);
mScaler = (mLevelTo - mLevelFrom) / mLengthInFrames; // for interpolation
}
- int32_t framesLeft = framesToProcess;
+ int32_t framesLeft = numFrames;
if (mRemaining > 0) { // Ramping? This doesn't happen very often.
int32_t framesToRamp = std::min(framesLeft, mRemaining);
@@ -81,5 +77,5 @@
*outputBuffer++ = *inputBuffer++ * mLevelTo;
}
- return framesToProcess;
+ return numFrames;
}
diff --git a/media/libaaudio/src/flowgraph/RampLinear.h b/media/libaaudio/src/flowgraph/RampLinear.h
index bdc8f41..f285704 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.h
+++ b/media/libaaudio/src/flowgraph/RampLinear.h
@@ -21,17 +21,25 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class RampLinear : public AudioProcessorBase {
+/**
+ * When the target is modified then the output will ramp smoothly
+ * between the original and the new target value.
+ * This can be used to smooth out control values and reduce pops.
+ *
+ * The target may be updated while a ramp is in progress, which will trigger
+ * a new ramp from the current value.
+ */
+class RampLinear : public FlowGraphFilter {
public:
explicit RampLinear(int32_t channelCount);
virtual ~RampLinear() = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
/**
* This is used for the next ramp.
@@ -66,8 +74,9 @@
mLevelTo = level;
}
- AudioFloatInputPort input;
- AudioFloatOutputPort output;
+ const char *getName() override {
+ return "RampLinear";
+ }
private:
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.cpp b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
new file mode 100644
index 0000000..5c3ed1f
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SampleRateConverter.h"
+
+using namespace flowgraph;
+using namespace resampler;
+
+SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResampler &resampler)
+ : FlowGraphFilter(channelCount)
+ , mResampler(resampler) {
+ setDataPulledAutomatically(false);
+}
+
+void SampleRateConverter::reset() {
+ FlowGraphNode::reset();
+ mInputCursor = kInitialCallCount;
+}
+
+// Return true if there is a sample available.
+bool SampleRateConverter::isInputAvailable() {
+ // If we have consumed all of the input data then go out and get some more.
+ if (mInputCursor >= mNumValidInputFrames) {
+ mInputCallCount++;
+ mNumValidInputFrames = input.pullData(mInputCallCount, input.getFramesPerBuffer());
+ mInputCursor = 0;
+ }
+ return (mInputCursor < mNumValidInputFrames);
+}
+
+const float *SampleRateConverter::getNextInputFrame() {
+ const float *inputBuffer = input.getBuffer();
+ return &inputBuffer[mInputCursor++ * input.getSamplesPerFrame()];
+}
+
+int32_t SampleRateConverter::onProcess(int32_t numFrames) {
+ float *outputBuffer = output.getBuffer();
+ int32_t channelCount = output.getSamplesPerFrame();
+ int framesLeft = numFrames;
+ while (framesLeft > 0) {
+ // Gather input samples as needed.
+ if(mResampler.isWriteNeeded()) {
+ if (isInputAvailable()) {
+ const float *frame = getNextInputFrame();
+ mResampler.writeNextFrame(frame);
+ } else {
+ break;
+ }
+ } else {
+ // Output frame is interpolated from input samples.
+ mResampler.readNextFrame(outputBuffer);
+ outputBuffer += channelCount;
+ framesLeft--;
+ }
+ }
+ return numFrames - framesLeft;
+}
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.h b/media/libaaudio/src/flowgraph/SampleRateConverter.h
new file mode 100644
index 0000000..57d76a4
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SAMPLE_RATE_CONVERTER_H
+#define OBOE_SAMPLE_RATE_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+#include "resampler/MultiChannelResampler.h"
+
+namespace flowgraph {
+
+class SampleRateConverter : public FlowGraphFilter {
+public:
+ explicit SampleRateConverter(int32_t channelCount,
+ resampler::MultiChannelResampler &mResampler);
+
+ virtual ~SampleRateConverter() = default;
+
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SampleRateConverter";
+ }
+
+ void reset() override;
+
+private:
+
+ // Return true if there is a sample available.
+ bool isInputAvailable();
+
+ // This assumes data is available. Only call after calling isInputAvailable().
+ const float *getNextInputFrame();
+
+ resampler::MultiChannelResampler &mResampler;
+
+ int32_t mInputCursor = 0; // offset into the input port buffer
+ int32_t mNumValidInputFrames = 0; // number of valid frames currently in the input port buffer
+ // We need our own callCount for upstream calls because calls occur at a different rate.
+ // This means we cannot have cyclic graphs or merges that contain an SRC.
+ int64_t mInputCallCount = 0;
+
+};
+
+} /* namespace flowgraph */
+
+#endif //OBOE_SAMPLE_RATE_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.cpp b/media/libaaudio/src/flowgraph/SinkFloat.cpp
index fb3dcbc..0588848 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SinkFloat.cpp
@@ -16,31 +16,31 @@
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SinkFloat.h"
using namespace flowgraph;
SinkFloat::SinkFloat(int32_t channelCount)
- : AudioSink(channelCount) {
+ : FlowGraphSink(channelCount) {
}
int32_t SinkFloat::read(void *data, int32_t numFrames) {
float *floatData = (float *) data;
- int32_t channelCount = input.getSamplesPerFrame();
+ const int32_t channelCount = input.getSamplesPerFrame();
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
- if (framesRead <= 0) {
+ int32_t framesPulled = pullData(framesLeft);
+ if (framesPulled <= 0) {
break;
}
- const float *signal = input.getBlock();
- int32_t numSamples = framesRead * channelCount;
+ const float *signal = input.getBuffer();
+ int32_t numSamples = framesPulled * channelCount;
memcpy(floatData, signal, numSamples * sizeof(float));
floatData += numSamples;
- framesLeft -= framesRead;
+ framesLeft -= framesPulled;
}
return numFrames - framesLeft;
}
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.h b/media/libaaudio/src/flowgraph/SinkFloat.h
index 7775c08..c812373 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.h
+++ b/media/libaaudio/src/flowgraph/SinkFloat.h
@@ -21,16 +21,23 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkFloat : public AudioSink {
+/**
+ * AudioSink that lets you read data as 32-bit floats.
+ */
+class SinkFloat : public FlowGraphSink {
public:
explicit SinkFloat(int32_t channelCount);
+ ~SinkFloat() override = default;
int32_t read(void *data, int32_t numFrames) override;
+ const char *getName() override {
+ return "SinkFloat";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI16.cpp b/media/libaaudio/src/flowgraph/SinkI16.cpp
index ffec8f5..da7fd6b 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI16.cpp
@@ -17,17 +17,16 @@
#include <algorithm>
#include <unistd.h>
-#ifdef __ANDROID__
+#include "SinkI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
-#include "SinkI16.h"
-
using namespace flowgraph;
SinkI16::SinkI16(int32_t channelCount)
- : AudioSink(channelCount) {}
+ : FlowGraphSink(channelCount) {}
int32_t SinkI16::read(void *data, int32_t numFrames) {
int16_t *shortData = (int16_t *) data;
@@ -36,13 +35,13 @@
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
+ int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
- const float *signal = input.getBlock();
+ const float *signal = input.getBuffer();
int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_i16_from_float(shortData, signal, numSamples);
shortData += numSamples;
signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI16.h b/media/libaaudio/src/flowgraph/SinkI16.h
index 6d86266..1e1ce3a 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.h
+++ b/media/libaaudio/src/flowgraph/SinkI16.h
@@ -20,15 +20,22 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkI16 : public AudioSink {
+/**
+ * AudioSink that lets you read data as 16-bit signed integers.
+ */
+class SinkI16 : public FlowGraphSink {
public:
explicit SinkI16(int32_t channelCount);
int32_t read(void *data, int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SinkI16";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI24.cpp b/media/libaaudio/src/flowgraph/SinkI24.cpp
index 0cb077d..a9fb5d2 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI24.cpp
@@ -15,19 +15,20 @@
*/
#include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
-#ifdef __ANDROID__
+
+#include "FlowGraphNode.h"
+#include "SinkI24.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
-#include "SinkI24.h"
-
using namespace flowgraph;
SinkI24::SinkI24(int32_t channelCount)
- : AudioSink(channelCount) {}
+ : FlowGraphSink(channelCount) {}
int32_t SinkI24::read(void *data, int32_t numFrames) {
uint8_t *byteData = (uint8_t *) data;
@@ -36,13 +37,13 @@
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
+ int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
- const float *floatData = input.getBlock();
+ const float *floatData = input.getBuffer();
int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_p24_from_float(byteData, floatData, numSamples);
static const int kBytesPerI24Packed = 3;
byteData += numSamples * kBytesPerI24Packed;
diff --git a/media/libaaudio/src/flowgraph/SinkI24.h b/media/libaaudio/src/flowgraph/SinkI24.h
index 5b9b505..44078a9 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.h
+++ b/media/libaaudio/src/flowgraph/SinkI24.h
@@ -20,15 +20,23 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkI24 : public AudioSink {
+/**
+ * AudioSink that lets you read data as packed 24-bit signed integers.
+ * The sample size is 3 bytes.
+ */
+class SinkI24 : public FlowGraphSink {
public:
explicit SinkI24(int32_t channelCount);
int32_t read(void *data, int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SinkI24";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI32.cpp b/media/libaaudio/src/flowgraph/SinkI32.cpp
index eab863d..9fd4e96 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI32.cpp
@@ -14,18 +14,18 @@
* limitations under the License.
*/
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "FlowgraphUtilities.h"
#include "SinkI32.h"
using namespace flowgraph;
SinkI32::SinkI32(int32_t channelCount)
- : AudioSink(channelCount) {}
+ : FlowGraphSink(channelCount) {}
int32_t SinkI32::read(void *data, int32_t numFrames) {
int32_t *intData = (int32_t *) data;
@@ -34,13 +34,13 @@
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
+ int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
- const float *signal = input.getBlock();
+ const float *signal = input.getBuffer();
int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_i32_from_float(intData, signal, numSamples);
intData += numSamples;
signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI32.h b/media/libaaudio/src/flowgraph/SinkI32.h
index 09d23b7..7456d5f 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.h
+++ b/media/libaaudio/src/flowgraph/SinkI32.h
@@ -19,16 +19,20 @@
#include <stdint.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkI32 : public AudioSink {
+class SinkI32 : public FlowGraphSink {
public:
explicit SinkI32(int32_t channelCount);
~SinkI32() override = default;
int32_t read(void *data, int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SinkI32";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.cpp b/media/libaaudio/src/flowgraph/SourceFloat.cpp
index 4bb674f..1b3daf1 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SourceFloat.cpp
@@ -16,23 +16,22 @@
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SourceFloat.h"
using namespace flowgraph;
SourceFloat::SourceFloat(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceFloat::onProcess(int64_t framePosition, int32_t numFrames) {
+int32_t SourceFloat::onProcess(int32_t numFrames) {
+ float *outputBuffer = output.getBuffer();
+ const int32_t channelCount = output.getSamplesPerFrame();
- float *outputBuffer = output.getBlock();
- int32_t channelCount = output.getSamplesPerFrame();
-
- int32_t framesLeft = mSizeInFrames - mFrameIndex;
- int32_t framesToProcess = std::min(numFrames, framesLeft);
- int32_t numSamples = framesToProcess * channelCount;
+ const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+ const int32_t framesToProcess = std::min(numFrames, framesLeft);
+ const int32_t numSamples = framesToProcess * channelCount;
const float *floatBase = (float *) mData;
const float *floatData = &floatBase[mFrameIndex * channelCount];
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.h b/media/libaaudio/src/flowgraph/SourceFloat.h
index e6eed9f..4719669 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.h
+++ b/media/libaaudio/src/flowgraph/SourceFloat.h
@@ -20,15 +20,23 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SourceFloat : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined float data.
+ */
+class SourceFloat : public FlowGraphSourceBuffered {
public:
explicit SourceFloat(int32_t channelCount);
+ ~SourceFloat() override = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SourceFloat";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI16.cpp b/media/libaaudio/src/flowgraph/SourceI16.cpp
index c3fcec2..8813023 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI16.cpp
@@ -17,21 +17,21 @@
#include <algorithm>
#include <unistd.h>
-#ifdef __ANDROID__
+#include "FlowGraphNode.h"
+#include "SourceI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
-#include "SourceI16.h"
-
using namespace flowgraph;
SourceI16::SourceI16(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceI16::onProcess(int64_t framePosition, int32_t numFrames) {
- float *floatData = output.getBlock();
+int32_t SourceI16::onProcess(int32_t numFrames) {
+ float *floatData = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -41,7 +41,7 @@
const int16_t *shortBase = static_cast<const int16_t *>(mData);
const int16_t *shortData = &shortBase[mFrameIndex * channelCount];
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_float_from_i16(floatData, shortData, numSamples);
#else
for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI16.h b/media/libaaudio/src/flowgraph/SourceI16.h
index 2b116cf..fe440b2 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.h
+++ b/media/libaaudio/src/flowgraph/SourceI16.h
@@ -20,15 +20,21 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-
-class SourceI16 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 16-bit integer data.
+ */
+class SourceI16 : public FlowGraphSourceBuffered {
public:
explicit SourceI16(int32_t channelCount);
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SourceI16";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI24.cpp b/media/libaaudio/src/flowgraph/SourceI24.cpp
index 097954e..1975878 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI24.cpp
@@ -15,13 +15,13 @@
*/
#include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SourceI24.h"
using namespace flowgraph;
@@ -29,11 +29,11 @@
constexpr int kBytesPerI24Packed = 3;
SourceI24::SourceI24(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceI24::onProcess(int64_t framePosition, int32_t numFrames) {
- float *floatData = output.getBlock();
+int32_t SourceI24::onProcess(int32_t numFrames) {
+ float *floatData = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -43,7 +43,7 @@
const uint8_t *byteBase = (uint8_t *) mData;
const uint8_t *byteData = &byteBase[mFrameIndex * channelCount * kBytesPerI24Packed];
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_float_from_p24(floatData, byteData, numSamples);
#else
static const float scale = 1. / (float)(1UL << 31);
diff --git a/media/libaaudio/src/flowgraph/SourceI24.h b/media/libaaudio/src/flowgraph/SourceI24.h
index 2ed6f18..3779534 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.h
+++ b/media/libaaudio/src/flowgraph/SourceI24.h
@@ -17,17 +17,25 @@
#ifndef FLOWGRAPH_SOURCE_I24_H
#define FLOWGRAPH_SOURCE_I24_H
-#include <stdint.h>
+#include <unistd.h>
+#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SourceI24 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 24-bit packed integer data.
+ */
+class SourceI24 : public FlowGraphSourceBuffered {
public:
explicit SourceI24(int32_t channelCount);
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SourceI24";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI32.cpp b/media/libaaudio/src/flowgraph/SourceI32.cpp
index e8177ad..4b2e8c4 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI32.cpp
@@ -17,31 +17,31 @@
#include <algorithm>
#include <unistd.h>
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SourceI32.h"
using namespace flowgraph;
SourceI32::SourceI32(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceI32::onProcess(int64_t framePosition, int32_t numFrames) {
- float *floatData = output.getBlock();
- int32_t channelCount = output.getSamplesPerFrame();
+int32_t SourceI32::onProcess(int32_t numFrames) {
+ float *floatData = output.getBuffer();
+ const int32_t channelCount = output.getSamplesPerFrame();
- int32_t framesLeft = mSizeInFrames - mFrameIndex;
- int32_t framesToProcess = std::min(numFrames, framesLeft);
- int32_t numSamples = framesToProcess * channelCount;
+ const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+ const int32_t framesToProcess = std::min(numFrames, framesLeft);
+ const int32_t numSamples = framesToProcess * channelCount;
const int32_t *intBase = static_cast<const int32_t *>(mData);
const int32_t *intData = &intBase[mFrameIndex * channelCount];
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_float_from_i32(floatData, intData, numSamples);
#else
for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI32.h b/media/libaaudio/src/flowgraph/SourceI32.h
index e50f9be..b4e0d7b 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.h
+++ b/media/libaaudio/src/flowgraph/SourceI32.h
@@ -19,17 +19,20 @@
#include <stdint.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SourceI32 : public AudioSource {
+class SourceI32 : public FlowGraphSourceBuffered {
public:
explicit SourceI32(int32_t channelCount);
~SourceI32() override = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+ const char *getName() override {
+ return "SourceI32";
+ }
private:
static constexpr float kScale = 1.0 / (1UL << 31);
};
diff --git a/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
new file mode 100644
index 0000000..f6479ae
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+#define RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a HyperbolicCosineWindow window centered at 0.
+ * This can be used in place of a Kaiser window.
+ *
+ * The code is based on an anonymous contribution by "a concerned citizen":
+ * https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+ */
+class HyperbolicCosineWindow {
+public:
+ HyperbolicCosineWindow() {
+ setStopBandAttenuation(60);
+ }
+
+ /**
+ * @param attenuation typical values range from 30 to 90 dB
+ * @return beta
+ */
+ double setStopBandAttenuation(double attenuation) {
+ double alpha = ((-325.1e-6 * attenuation + 0.1677) * attenuation) - 3.149;
+ setAlpha(alpha);
+ return alpha;
+ }
+
+ void setAlpha(double alpha) {
+ mAlpha = alpha;
+ mInverseCoshAlpha = 1.0 / cosh(alpha);
+ }
+
+ /**
+ * @param x ranges from -1.0 to +1.0
+ */
+ double operator()(double x) {
+ double x2 = x * x;
+ if (x2 >= 1.0) return 0.0;
+ double w = mAlpha * sqrt(1.0 - x2);
+ return cosh(w) * mInverseCoshAlpha;
+ }
+
+private:
+ double mAlpha = 0.0;
+ double mInverseCoshAlpha = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
new file mode 100644
index 0000000..4bd75b3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IntegerRatio.h"
+
+using namespace resampler;
+
+// Enough primes to cover the common sample rates.
+static const int kPrimes[] = {
+ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+ 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
+ 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+ 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199};
+
+void IntegerRatio::reduce() {
+ for (int prime : kPrimes) {
+ if (mNumerator < prime || mDenominator < prime) {
+ break;
+ }
+
+ // Find biggest prime factor for numerator.
+ while (true) {
+ int top = mNumerator / prime;
+ int bottom = mDenominator / prime;
+ if ((top >= 1)
+ && (bottom >= 1)
+ && (top * prime == mNumerator) // divided evenly?
+ && (bottom * prime == mDenominator)) {
+ mNumerator = top;
+ mDenominator = bottom;
+ } else {
+ break;
+ }
+ }
+
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
new file mode 100644
index 0000000..8c044d8
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_INTEGER_RATIO_H
+#define OBOE_INTEGER_RATIO_H
+
+#include <sys/types.h>
+
+namespace resampler {
+
+/**
+ * Represent the ratio of two integers.
+ */
+class IntegerRatio {
+public:
+ IntegerRatio(int32_t numerator, int32_t denominator)
+ : mNumerator(numerator), mDenominator(denominator) {}
+
+ /**
+ * Reduce by removing common prime factors.
+ */
+ void reduce();
+
+ int32_t getNumerator() {
+ return mNumerator;
+ }
+
+ int32_t getDenominator() {
+ return mDenominator;
+ }
+
+private:
+ int32_t mNumerator;
+ int32_t mDenominator;
+};
+
+} // namespace resampler
+
+#endif //OBOE_INTEGER_RATIO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
new file mode 100644
index 0000000..73dbc41
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_KAISER_WINDOW_H
+#define RESAMPLER_KAISER_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a Kaiser window centered at 0.
+ */
+class KaiserWindow {
+public:
+ KaiserWindow() {
+ setStopBandAttenuation(60);
+ }
+
+ /**
+ * @param attenuation typical values range from 30 to 90 dB
+ * @return beta
+ */
+ double setStopBandAttenuation(double attenuation) {
+ double beta = 0.0;
+ if (attenuation > 50) {
+ beta = 0.1102 * (attenuation - 8.7);
+ } else if (attenuation >= 21) {
+ double a21 = attenuation - 21;
+ beta = 0.5842 * pow(a21, 0.4) + (0.07886 * a21);
+ }
+ setBeta(beta);
+ return beta;
+ }
+
+ void setBeta(double beta) {
+ mBeta = beta;
+ mInverseBesselBeta = 1.0 / bessel(beta);
+ }
+
+ /**
+ * @param x ranges from -1.0 to +1.0
+ */
+ double operator()(double x) {
+ double x2 = x * x;
+ if (x2 >= 1.0) return 0.0;
+ double w = mBeta * sqrt(1.0 - x2);
+ return bessel(w) * mInverseBesselBeta;
+ }
+
+ // Approximation of a
+ // modified zero order Bessel function of the first kind.
+ // Based on a discussion at:
+ // https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+ static double bessel(double x) {
+ double y = cosh(0.970941817426052 * x);
+ y += cosh(0.8854560256532099 * x);
+ y += cosh(0.7485107481711011 * x);
+ y += cosh(0.5680647467311558 * x);
+ y += cosh(0.3546048870425356 * x);
+ y += cosh(0.120536680255323 * x);
+ y *= 2;
+ y += cosh(x);
+ y /= 13;
+ return y;
+ }
+
+private:
+ double mBeta = 0.0;
+ double mInverseBesselBeta = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_KAISER_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
new file mode 100644
index 0000000..a7748c1
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LinearResampler.h"
+
+using namespace resampler;
+
+LinearResampler::LinearResampler(const MultiChannelResampler::Builder &builder)
+ : MultiChannelResampler(builder) {
+ mPreviousFrame = std::make_unique<float[]>(getChannelCount());
+ mCurrentFrame = std::make_unique<float[]>(getChannelCount());
+}
+
+void LinearResampler::writeFrame(const float *frame) {
+ memcpy(mPreviousFrame.get(), mCurrentFrame.get(), sizeof(float) * getChannelCount());
+ memcpy(mCurrentFrame.get(), frame, sizeof(float) * getChannelCount());
+}
+
+void LinearResampler::readFrame(float *frame) {
+ float *previous = mPreviousFrame.get();
+ float *current = mCurrentFrame.get();
+ float phase = (float) getIntegerPhase() / mDenominator;
+ // iterate across samples in the frame
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float f0 = *previous++;
+ float f1 = *current++;
+ *frame++ = f0 + (phase * (f1 - f0));
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.h b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
new file mode 100644
index 0000000..6bde81d
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_LINEAR_RESAMPLER_H
+#define OBOE_LINEAR_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Simple resampler that uses bi-linear interpolation.
+ */
+class LinearResampler : public MultiChannelResampler {
+public:
+ explicit LinearResampler(const MultiChannelResampler::Builder &builder);
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+
+private:
+ std::unique_ptr<float[]> mPreviousFrame;
+ std::unique_ptr<float[]> mCurrentFrame;
+};
+
+} // namespace resampler
+#endif //OBOE_LINEAR_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
new file mode 100644
index 0000000..d630520
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math.h>
+
+#include "IntegerRatio.h"
+#include "LinearResampler.h"
+#include "MultiChannelResampler.h"
+#include "PolyphaseResampler.h"
+#include "PolyphaseResamplerMono.h"
+#include "PolyphaseResamplerStereo.h"
+#include "SincResampler.h"
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+MultiChannelResampler::MultiChannelResampler(const MultiChannelResampler::Builder &builder)
+ : mNumTaps(builder.getNumTaps())
+ , mX(builder.getChannelCount() * builder.getNumTaps() * 2)
+ , mSingleFrame(builder.getChannelCount())
+ , mChannelCount(builder.getChannelCount())
+ {
+ // Reduce sample rates to the smallest ratio.
+ // For example 44100/48000 would become 147/160.
+ IntegerRatio ratio(builder.getInputRate(), builder.getOutputRate());
+ ratio.reduce();
+ mNumerator = ratio.getNumerator();
+ mDenominator = ratio.getDenominator();
+ mIntegerPhase = mDenominator;
+}
+
+// static factory method
+MultiChannelResampler *MultiChannelResampler::make(int32_t channelCount,
+ int32_t inputRate,
+ int32_t outputRate,
+ Quality quality) {
+ Builder builder;
+ builder.setInputRate(inputRate);
+ builder.setOutputRate(outputRate);
+ builder.setChannelCount(channelCount);
+
+ switch (quality) {
+ case Quality::Fastest:
+ builder.setNumTaps(2);
+ break;
+ case Quality::Low:
+ builder.setNumTaps(4);
+ break;
+ case Quality::Medium:
+ default:
+ builder.setNumTaps(8);
+ break;
+ case Quality::High:
+ builder.setNumTaps(16);
+ break;
+ case Quality::Best:
+ builder.setNumTaps(32);
+ break;
+ }
+
+ // Set the cutoff frequency so that we do not get aliasing when down-sampling.
+ if (inputRate > outputRate) {
+ builder.setNormalizedCutoff(kDefaultNormalizedCutoff);
+ }
+ return builder.build();
+}
+
+MultiChannelResampler *MultiChannelResampler::Builder::build() {
+ if (getNumTaps() == 2) {
+ // Note that this does not do low pass filteringh.
+ return new LinearResampler(*this);
+ }
+ IntegerRatio ratio(getInputRate(), getOutputRate());
+ ratio.reduce();
+ bool usePolyphase = (getNumTaps() * ratio.getDenominator()) <= kMaxCoefficients;
+ if (usePolyphase) {
+ if (getChannelCount() == 1) {
+ return new PolyphaseResamplerMono(*this);
+ } else if (getChannelCount() == 2) {
+ return new PolyphaseResamplerStereo(*this);
+ } else {
+ return new PolyphaseResampler(*this);
+ }
+ } else {
+ // Use less optimized resampler that uses a float phaseIncrement.
+ // TODO mono resampler
+ if (getChannelCount() == 2) {
+ return new SincResamplerStereo(*this);
+ } else {
+ return new SincResampler(*this);
+ }
+ }
+}
+
+void MultiChannelResampler::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * getChannelCount()];
+ int offset = getNumTaps() * getChannelCount();
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ // Write twice so we avoid having to wrap when reading.
+ dest[channel] = dest[channel + offset] = frame[channel];
+ }
+}
+
+float MultiChannelResampler::sinc(float radians) {
+ if (abs(radians) < 1.0e-9) return 1.0f; // avoid divide by zero
+ return sinf(radians) / radians; // Sinc function
+}
+
+// Generate coefficients in the order they will be used by readFrame().
+// This is more complicated but readFrame() is called repeatedly and should be optimized.
+void MultiChannelResampler::generateCoefficients(int32_t inputRate,
+ int32_t outputRate,
+ int32_t numRows,
+ double phaseIncrement,
+ float normalizedCutoff) {
+ mCoefficients.resize(getNumTaps() * numRows);
+ int coefficientIndex = 0;
+ double phase = 0.0; // ranges from 0.0 to 1.0, fraction between samples
+ // Stretch the sinc function for low pass filtering.
+ const float cutoffScaler = normalizedCutoff *
+ ((outputRate < inputRate)
+ ? ((float)outputRate / inputRate)
+ : ((float)inputRate / outputRate));
+ const int numTapsHalf = getNumTaps() / 2; // numTaps must be even.
+ const float numTapsHalfInverse = 1.0f / numTapsHalf;
+ for (int i = 0; i < numRows; i++) {
+ float tapPhase = phase - numTapsHalf;
+ float gain = 0.0; // sum of raw coefficients
+ int gainCursor = coefficientIndex;
+ for (int tap = 0; tap < getNumTaps(); tap++) {
+ float radians = tapPhase * M_PI;
+
+#if MCR_USE_KAISER
+ float window = mKaiserWindow(tapPhase * numTapsHalfInverse);
+#else
+ float window = mCoshWindow(tapPhase * numTapsHalfInverse);
+#endif
+ float coefficient = sinc(radians * cutoffScaler) * window;
+ mCoefficients.at(coefficientIndex++) = coefficient;
+ gain += coefficient;
+ tapPhase += 1.0;
+ }
+ phase += phaseIncrement;
+ while (phase >= 1.0) {
+ phase -= 1.0;
+ }
+
+ // Correct for gain variations.
+ float gainCorrection = 1.0 / gain; // normalize the gain
+ for (int tap = 0; tap < getNumTaps(); tap++) {
+ mCoefficients.at(gainCursor + tap) *= gainCorrection;
+ }
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
new file mode 100644
index 0000000..da79cad
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_MULTICHANNEL_RESAMPLER_H
+#define OBOE_MULTICHANNEL_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef MCR_USE_KAISER
+// It appears from the spectrogram that the HyperbolicCosine window leads to fewer artifacts.
+// And it is faster to calculate.
+#define MCR_USE_KAISER 0
+#endif
+
+#if MCR_USE_KAISER
+#include "KaiserWindow.h"
+#else
+#include "HyperbolicCosineWindow.h"
+#endif
+
+namespace resampler {
+
+class MultiChannelResampler {
+
+public:
+
+ enum class Quality : int32_t {
+ Fastest,
+ Low,
+ Medium,
+ High,
+ Best,
+ };
+
+ class Builder {
+ public:
+ /**
+ * Construct an optimal resampler based on the specified parameters.
+ * @return address of a resampler
+ */
+ MultiChannelResampler *build();
+
+ /**
+ * The number of taps in the resampling filter.
+ * More taps gives better quality but uses more CPU time.
+ * This typically ranges from 4 to 64. Default is 16.
+ *
+ * For polyphase filters, numTaps must be a multiple of four for loop unrolling.
+ * @param numTaps number of taps for the filter
+ * @return address of this builder for chaining calls
+ */
+ Builder *setNumTaps(int32_t numTaps) {
+ mNumTaps = numTaps;
+ return this;
+ }
+
+ /**
+ * Use 1 for mono, 2 for stereo, etc. Default is 1.
+ *
+ * @param channelCount number of channels
+ * @return address of this builder for chaining calls
+ */
+ Builder *setChannelCount(int32_t channelCount) {
+ mChannelCount = channelCount;
+ return this;
+ }
+
+ /**
+ * Default is 48000.
+ *
+ * @param inputRate sample rate of the input stream
+ * @return address of this builder for chaining calls
+ */
+ Builder *setInputRate(int32_t inputRate) {
+ mInputRate = inputRate;
+ return this;
+ }
+
+ /**
+ * Default is 48000.
+ *
+ * @param outputRate sample rate of the output stream
+ * @return address of this builder for chaining calls
+ */
+ Builder *setOutputRate(int32_t outputRate) {
+ mOutputRate = outputRate;
+ return this;
+ }
+
+ /**
+ * Set cutoff frequency relative to the Nyquist rate of the output sample rate.
+ * Set to 1.0 to match the Nyquist frequency.
+ * Set lower to reduce aliasing.
+ * Default is 0.70.
+ *
+ * @param normalizedCutoff anti-aliasing filter cutoff
+ * @return address of this builder for chaining calls
+ */
+ Builder *setNormalizedCutoff(float normalizedCutoff) {
+ mNormalizedCutoff = normalizedCutoff;
+ return this;
+ }
+
+ int32_t getNumTaps() const {
+ return mNumTaps;
+ }
+
+ int32_t getChannelCount() const {
+ return mChannelCount;
+ }
+
+ int32_t getInputRate() const {
+ return mInputRate;
+ }
+
+ int32_t getOutputRate() const {
+ return mOutputRate;
+ }
+
+ float getNormalizedCutoff() const {
+ return mNormalizedCutoff;
+ }
+
+ protected:
+ int32_t mChannelCount = 1;
+ int32_t mNumTaps = 16;
+ int32_t mInputRate = 48000;
+ int32_t mOutputRate = 48000;
+ float mNormalizedCutoff = kDefaultNormalizedCutoff;
+ };
+
+ virtual ~MultiChannelResampler() = default;
+
+ /**
+ * Factory method for making a resampler that is optimal for the given inputs.
+ *
+ * @param channelCount number of channels, 2 for stereo
+ * @param inputRate sample rate of the input stream
+ * @param outputRate sample rate of the output stream
+ * @param quality higher quality sounds better but uses more CPU
+ * @return an optimal resampler
+ */
+ static MultiChannelResampler *make(int32_t channelCount,
+ int32_t inputRate,
+ int32_t outputRate,
+ Quality quality);
+
+ bool isWriteNeeded() const {
+ return mIntegerPhase >= mDenominator;
+ }
+
+ /**
+ * Write a frame containing N samples.
+ *
+ * @param frame pointer to the first sample in a frame
+ */
+ void writeNextFrame(const float *frame) {
+ writeFrame(frame);
+ advanceWrite();
+ }
+
+ /**
+ * Read a frame containing N samples.
+ *
+ * @param frame pointer to the first sample in a frame
+ */
+ void readNextFrame(float *frame) {
+ readFrame(frame);
+ advanceRead();
+ }
+
+ int getNumTaps() const {
+ return mNumTaps;
+ }
+
+ int getChannelCount() const {
+ return mChannelCount;
+ }
+
+ static float hammingWindow(float radians, float spread);
+
+ static float sinc(float radians);
+
+protected:
+
+ explicit MultiChannelResampler(const MultiChannelResampler::Builder &builder);
+
+ /**
+ * Write a frame containing N samples.
+ * Call advanceWrite() after calling this.
+ * @param frame pointer to the first sample in a frame
+ */
+ virtual void writeFrame(const float *frame);
+
+ /**
+ * Read a frame containing N samples using interpolation.
+ * Call advanceRead() after calling this.
+ * @param frame pointer to the first sample in a frame
+ */
+ virtual void readFrame(float *frame) = 0;
+
+ void advanceWrite() {
+ mIntegerPhase -= mDenominator;
+ }
+
+ void advanceRead() {
+ mIntegerPhase += mNumerator;
+ }
+
+ /**
+ * Generate the filter coefficients in optimal order.
+ * @param inputRate sample rate of the input stream
+ * @param outputRate sample rate of the output stream
+ * @param numRows number of rows in the array that contain a set of tap coefficients
+ * @param phaseIncrement how much to increment the phase between rows
+ * @param normalizedCutoff filter cutoff frequency normalized to Nyquist rate of output
+ */
+ void generateCoefficients(int32_t inputRate,
+ int32_t outputRate,
+ int32_t numRows,
+ double phaseIncrement,
+ float normalizedCutoff);
+
+
+ int32_t getIntegerPhase() {
+ return mIntegerPhase;
+ }
+
+ static constexpr int kMaxCoefficients = 8 * 1024;
+ std::vector<float> mCoefficients;
+
+ const int mNumTaps;
+ int mCursor = 0;
+ std::vector<float> mX; // delayed input values for the FIR
+ std::vector<float> mSingleFrame; // one frame for temporary use
+ int32_t mIntegerPhase = 0;
+ int32_t mNumerator = 0;
+ int32_t mDenominator = 0;
+
+
+private:
+
+#if MCR_USE_KAISER
+ KaiserWindow mKaiserWindow;
+#else
+ HyperbolicCosineWindow mCoshWindow;
+#endif
+
+ static constexpr float kDefaultNormalizedCutoff = 0.70f;
+
+ const int mChannelCount;
+};
+
+} // namespace resampler
+#endif //OBOE_MULTICHANNEL_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
new file mode 100644
index 0000000..aa4ffd9
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "IntegerRatio.h"
+#include "PolyphaseResampler.h"
+
+using namespace resampler;
+
+PolyphaseResampler::PolyphaseResampler(const MultiChannelResampler::Builder &builder)
+ : MultiChannelResampler(builder)
+ {
+ assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+
+ int32_t inputRate = builder.getInputRate();
+ int32_t outputRate = builder.getOutputRate();
+
+ int32_t numRows = mDenominator;
+ double phaseIncrement = (double) inputRate / (double) outputRate;
+ generateCoefficients(inputRate, outputRate,
+ numRows, phaseIncrement,
+ builder.getNormalizedCutoff());
+}
+
+void PolyphaseResampler::readFrame(float *frame) {
+ // Clear accumulator for mixing.
+ std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+
+ // Multiply input times windowed sinc function.
+ float *coefficients = &mCoefficients[mCoefficientCursor];
+ float *xFrame = &mX[mCursor * getChannelCount()];
+ for (int i = 0; i < mNumTaps; i++) {
+ float coefficient = *coefficients++;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ mSingleFrame[channel] += *xFrame++ * coefficient;
+ }
+ }
+
+ // Advance and wrap through coefficients.
+ mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+ // Copy accumulator to output.
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ frame[channel] = mSingleFrame[channel];
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
new file mode 100644
index 0000000..1aeb680
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_H
+#define OBOE_POLYPHASE_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+/**
+ * Resampler that is optimized for a reduced ratio of sample rates.
+ * All of the coefficients for each possible phase value are pre-calculated.
+ */
+class PolyphaseResampler : public MultiChannelResampler {
+public:
+ /**
+ *
+ * @param builder containing lots of parameters
+ */
+ explicit PolyphaseResampler(const MultiChannelResampler::Builder &builder);
+
+ virtual ~PolyphaseResampler() = default;
+
+ void readFrame(float *frame) override;
+
+protected:
+
+ int32_t mCoefficientCursor = 0;
+
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
new file mode 100644
index 0000000..c0e29b7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerMono.h"
+
+using namespace resampler;
+
+#define MONO 1
+
+PolyphaseResamplerMono::PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder)
+ : PolyphaseResampler(builder) {
+ assert(builder.getChannelCount() == MONO);
+}
+
+void PolyphaseResamplerMono::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * MONO];
+ const int offset = mNumTaps * MONO;
+ // Write each channel twice so we avoid having to wrap when running the FIR.
+ const float sample = frame[0];
+ // Put ordered writes together.
+ dest[0] = sample;
+ dest[offset] = sample;
+}
+
+void PolyphaseResamplerMono::readFrame(float *frame) {
+ // Clear accumulator.
+ float sum = 0.0;
+
+ // Multiply input times precomputed windowed sinc function.
+ const float *coefficients = &mCoefficients[mCoefficientCursor];
+ float *xFrame = &mX[mCursor * MONO];
+ const int numLoops = mNumTaps >> 2; // n/4
+ for (int i = 0; i < numLoops; i++) {
+ // Manual loop unrolling, might get converted to SIMD.
+ sum += *xFrame++ * *coefficients++;
+ sum += *xFrame++ * *coefficients++;
+ sum += *xFrame++ * *coefficients++;
+ sum += *xFrame++ * *coefficients++;
+ }
+
+ mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+ // Copy accumulator to output.
+ frame[0] = sum;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
new file mode 100644
index 0000000..0a691a3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_MONO_H
+#define OBOE_POLYPHASE_RESAMPLER_MONO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerMono : public PolyphaseResampler {
+public:
+ explicit PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder);
+
+ virtual ~PolyphaseResamplerMono() = default;
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_MONO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
new file mode 100644
index 0000000..e4bef74
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO 2
+
+PolyphaseResamplerStereo::PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder)
+ : PolyphaseResampler(builder) {
+ assert(builder.getChannelCount() == STEREO);
+}
+
+void PolyphaseResamplerStereo::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * STEREO];
+ const int offset = mNumTaps * STEREO;
+ // Write each channel twice so we avoid having to wrap when running the FIR.
+ const float left = frame[0];
+ const float right = frame[1];
+ // Put ordered writes together.
+ dest[0] = left;
+ dest[1] = right;
+ dest[offset] = left;
+ dest[1 + offset] = right;
+}
+
+void PolyphaseResamplerStereo::readFrame(float *frame) {
+ // Clear accumulators.
+ float left = 0.0;
+ float right = 0.0;
+
+ // Multiply input times precomputed windowed sinc function.
+ const float *coefficients = &mCoefficients[mCoefficientCursor];
+ float *xFrame = &mX[mCursor * STEREO];
+ const int numLoops = mNumTaps >> 2; // n/4
+ for (int i = 0; i < numLoops; i++) {
+ // Manual loop unrolling, might get converted to SIMD.
+ float coefficient = *coefficients++;
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+
+ coefficient = *coefficients++; // next tap
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+
+ coefficient = *coefficients++; // next tap
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+
+ coefficient = *coefficients++; // next tap
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+ }
+
+ mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+ // Copy accumulators to output.
+ frame[0] = left;
+ frame[1] = right;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
new file mode 100644
index 0000000..e608483
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_STEREO_H
+#define OBOE_POLYPHASE_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerStereo : public PolyphaseResampler {
+public:
+ explicit PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+ virtual ~PolyphaseResamplerStereo() = default;
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/README.md b/media/libaaudio/src/flowgraph/resampler/README.md
new file mode 100644
index 0000000..05d8a89
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/README.md
@@ -0,0 +1,91 @@
+# Sample Rate Converter
+
+This folder contains a sample rate converter, or "resampler".
+
+The converter is based on a sinc function that has been windowed by a hyperbolic cosine.
+We found this had fewer artifacts than the more traditional Kaiser window.
+
+## Creating a Resampler
+
+Include the [main header](MultiChannelResampler.h) for the resampler.
+
+ #include "resampler/MultiChannelResampler.h"
+
+Here is an example of creating a stereo resampler that will convert from 44100 to 48000 Hz.
+Only do this once, when you open your stream. Then use the sample resampler to process multiple buffers.
+
+ MultiChannelResampler *resampler = MultiChannelResampler::make(
+ 2, // channel count
+ 44100, // input sampleRate
+ 48000, // output sampleRate
+ MultiChannelResampler::Quality::Medium); // conversion quality
+
+Possible values for quality include { Fastest, Low, Medium, High, Best }.
+Higher quality levels will sound better but consume more CPU because they have more taps in the filter.
+
+## Fractional Frame Counts
+
+Note that the number of output frames generated for a given number of input frames can vary.
+
+For example, suppose you are converting from 44100 Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output frames you get:
+
+ 960 * 48000 * 44100 = 1044.897959...
+
+You cannot generate a fractional number of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. On average it will generate 1044.897959 frames. The resampler stores the fraction internally and keeps track of when to consume or generate a frame.
+
+You can either use a fixed number of input frames or a fixed number of output frames. The other frame count will vary.
+
+## Calling the Resampler with a fixed number of OUTPUT frames
+
+In this example, suppose we have a fixed number of output frames and a variable number of input frames.
+
+Assume you start with these variables and a method that returns the next input frame:
+
+ float *outputBuffer; // multi-channel buffer to be filled
+ int numOutputFrames; // number of frames of output
+
+The resampler has a method isWriteNeeded() that tells you whether to write to or read from the resampler.
+
+ int outputFramesLeft = numOutputFrames;
+ while (outputFramesLeft > 0) {
+ if(resampler->isWriteNeeded()) {
+ const float *frame = getNextInputFrame(); // you provide this
+ resampler->writeNextFrame(frame);
+ } else {
+ resampler->readNextFrame(outputBuffer);
+ outputBuffer += channelCount;
+ outputFramesLeft--;
+ }
+ }
+
+## Calling the Resampler with a fixed number of INPUT frames
+
+In this example, suppose we have a fixed number of input frames and a variable number of output frames.
+
+Assume you start with these variables:
+
+ float *inputBuffer; // multi-channel buffer to be consumed
+ float *outputBuffer; // multi-channel buffer to be filled
+ int numInputFrames; // number of frames of input
+ int numOutputFrames = 0;
+ int channelCount; // 1 for mono, 2 for stereo
+
+ int inputFramesLeft = numInputFrames;
+ while (inputFramesLeft > 0) {
+ if(resampler->isWriteNeeded()) {
+ resampler->writeNextFrame(inputBuffer);
+ inputBuffer += channelCount;
+ inputFramesLeft--;
+ } else {
+ resampler->readNextFrame(outputBuffer);
+ outputBuffer += channelCount;
+ numOutputFrames++;
+ }
+ }
+
+## Deleting the Resampler
+
+When you are done, you should delete the Resampler to avoid a memory leak.
+
+ delete resampler;
+
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
new file mode 100644
index 0000000..5e8a9e0
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "SincResampler.h"
+
+using namespace resampler;
+
+SincResampler::SincResampler(const MultiChannelResampler::Builder &builder)
+ : MultiChannelResampler(builder)
+ , mSingleFrame2(builder.getChannelCount()) {
+ assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+ mNumRows = kMaxCoefficients / getNumTaps(); // no guard row needed
+ mPhaseScaler = (double) mNumRows / mDenominator;
+ double phaseIncrement = 1.0 / mNumRows;
+ generateCoefficients(builder.getInputRate(),
+ builder.getOutputRate(),
+ mNumRows,
+ phaseIncrement,
+ builder.getNormalizedCutoff());
+}
+
+void SincResampler::readFrame(float *frame) {
+ // Clear accumulator for mixing.
+ std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+ std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+ // Determine indices into coefficients table.
+ double tablePhase = getIntegerPhase() * mPhaseScaler;
+ int index1 = static_cast<int>(floor(tablePhase));
+ if (index1 >= mNumRows) { // no guard row needed because we wrap the indices
+ tablePhase -= mNumRows;
+ index1 -= mNumRows;
+ }
+
+ int index2 = index1 + 1;
+ if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+ index2 -= mNumRows;
+ }
+
+ float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+ float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+
+ float *xFrame = &mX[mCursor * getChannelCount()];
+ for (int i = 0; i < mNumTaps; i++) {
+ float coefficient1 = *coefficients1++;
+ float coefficient2 = *coefficients2++;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float sample = *xFrame++;
+ mSingleFrame[channel] += sample * coefficient1;
+ mSingleFrame2[channel] += sample * coefficient2;
+ }
+ }
+
+ // Interpolate and copy to output.
+ float fraction = tablePhase - index1;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float low = mSingleFrame[channel];
+ float high = mSingleFrame2[channel];
+ frame[channel] = low + (fraction * (high - low));
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.h b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
new file mode 100644
index 0000000..b235188
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_H
+#define OBOE_SINC_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Resampler that can interpolate between coefficients.
+ * This can be used to support arbitrary ratios.
+ */
+class SincResampler : public MultiChannelResampler {
+public:
+ explicit SincResampler(const MultiChannelResampler::Builder &builder);
+
+ virtual ~SincResampler() = default;
+
+ void readFrame(float *frame) override;
+
+protected:
+
+ std::vector<float> mSingleFrame2; // for interpolation
+ int32_t mNumRows = 0;
+ double mPhaseScaler = 1.0;
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
new file mode 100644
index 0000000..ce00302
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO 2
+
+SincResamplerStereo::SincResamplerStereo(const MultiChannelResampler::Builder &builder)
+ : SincResampler(builder) {
+ assert(builder.getChannelCount() == STEREO);
+}
+
+void SincResamplerStereo::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * STEREO];
+ const int offset = mNumTaps * STEREO;
+ // Write each channel twice so we avoid having to wrap when running the FIR.
+ const float left = frame[0];
+ const float right = frame[1];
+ // Put ordered writes together.
+ dest[0] = left;
+ dest[1] = right;
+ dest[offset] = left;
+ dest[1 + offset] = right;
+}
+
+// Multiply input times windowed sinc function.
+void SincResamplerStereo::readFrame(float *frame) {
+ // Clear accumulator for mixing.
+ std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+ std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+ // Determine indices into coefficients table.
+ double tablePhase = getIntegerPhase() * mPhaseScaler;
+ int index1 = static_cast<int>(floor(tablePhase));
+ float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+ int index2 = (index1 + 1);
+ if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+ index2 = 0;
+ }
+ float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+ float *xFrame = &mX[mCursor * getChannelCount()];
+ for (int i = 0; i < mNumTaps; i++) {
+ float coefficient1 = *coefficients1++;
+ float coefficient2 = *coefficients2++;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float sample = *xFrame++;
+ mSingleFrame[channel] += sample * coefficient1;
+ mSingleFrame2[channel] += sample * coefficient2;
+ }
+ }
+
+ // Interpolate and copy to output.
+ float fraction = tablePhase - index1;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float low = mSingleFrame[channel];
+ float high = mSingleFrame2[channel];
+ frame[channel] = low + (fraction * (high - low));
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
new file mode 100644
index 0000000..7d49ec7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_STEREO_H
+#define OBOE_SINC_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "SincResampler.h"
+
+namespace resampler {
+
+class SincResamplerStereo : public SincResampler {
+public:
+ explicit SincResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+ virtual ~SincResamplerStereo() = default;
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index e96e134..38f3c24 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -37,18 +37,6 @@
: AudioStream() {
}
-AudioStreamLegacy::~AudioStreamLegacy() {
-}
-
-// Called from AudioTrack.cpp or AudioRecord.cpp
-static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
- AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
- streamLegacy->processCallback(event, info);
-}
-
-aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
- return AudioStreamLegacy_callback;
-}
aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer,
int32_t numFrames) {
@@ -76,84 +64,77 @@
return (int32_t) callDataCallbackFrames(buffer, numFrames);
}
-void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
- aaudio_data_callback_result_t callbackResult;
+
+void AudioStreamLegacy::onNewIAudioTrack() {
+ ALOGD("%s stream disconnected", __func__);
+ forceDisconnect();
+ mCallbackEnabled.store(false);
+}
+
+size_t AudioStreamLegacy::onMoreData(const android::AudioTrack::Buffer& buffer) {
// This illegal size can be used to tell AudioRecord or AudioTrack to stop calling us.
// This takes advantage of them killing the stream when they see a size out of range.
// That is an undocumented behavior.
// TODO add to API in AudioRecord and AudioTrack
const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
+ aaudio_data_callback_result_t callbackResult;
+ (void) checkForDisconnectRequest(true);
- switch (opcode) {
- case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
- (void) checkForDisconnectRequest(true);
-
- // Note that this code assumes an AudioTrack::Buffer is the same as
- // AudioRecord::Buffer
- // TODO define our own AudioBuffer and pass it from the subclasses.
- AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
- if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
- ALOGW("processCallbackCommon() data, stream disconnected");
- // This will kill the stream and prevent it from being restarted.
- // That is OK because the stream is disconnected.
- audioBuffer->size = SIZE_STOP_CALLBACKS;
- } else if (!mCallbackEnabled.load()) {
- ALOGW("processCallbackCommon() no data because callback disabled, set size=0");
- // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
- // prevent it from being restarted. This can occur because of a race condition
- // caused by Legacy callbacks running after the track is "stopped".
- audioBuffer->size = 0;
- } else {
- if (audioBuffer->frameCount == 0) {
- ALOGW("processCallbackCommon() data, frameCount is zero");
- return;
- }
-
- // If the caller specified an exact size then use a block size adapter.
- if (mBlockAdapter != nullptr) {
- int32_t byteCount = audioBuffer->frameCount * getBytesPerDeviceFrame();
- callbackResult = mBlockAdapter->processVariableBlock(
- (uint8_t *) audioBuffer->raw, byteCount);
- } else {
- // Call using the AAudio callback interface.
- callbackResult = callDataCallbackFrames((uint8_t *)audioBuffer->raw,
- audioBuffer->frameCount);
- }
- if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
- } else {
- if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
- ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
- } else {
- ALOGW("%s() callback returned invalid result = %d",
- __func__, callbackResult);
- }
- audioBuffer->size = 0;
- systemStopInternal();
- // Disable the callback just in case the system keeps trying to call us.
- mCallbackEnabled.store(false);
- }
-
- if (updateStateMachine() != AAUDIO_OK) {
- forceDisconnect();
- mCallbackEnabled.store(false);
- }
- }
+ // Note that this code assumes an AudioTrack::Buffer is the same as
+ // AudioRecord::Buffer
+ // TODO define our own AudioBuffer and pass it from the subclasses.
+ size_t written = buffer.size;
+ if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ ALOGW("%s() data, stream disconnected", __func__);
+ // This will kill the stream and prevent it from being restarted.
+ // That is OK because the stream is disconnected.
+ written = SIZE_STOP_CALLBACKS;
+ } else if (!mCallbackEnabled.load()) {
+ ALOGW("%s() no data because callback disabled, set size=0", __func__);
+ // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
+ // prevent it from being restarted. This can occur because of a race condition
+ // caused by Legacy callbacks running after the track is "stopped".
+ written = 0;
+ } else {
+ if (buffer.frameCount == 0) {
+ ALOGW("%s() data, frameCount is zero", __func__);
+ return written;
}
- break;
- // Stream got rerouted so we disconnect.
- case AAUDIO_CALLBACK_OPERATION_DISCONNECTED:
- ALOGD("processCallbackCommon() stream disconnected");
+ // If the caller specified an exact size then use a block size adapter.
+ if (mBlockAdapter != nullptr) {
+ int32_t byteCount = buffer.frameCount * getBytesPerDeviceFrame();
+ callbackResult = mBlockAdapter->processVariableBlock(
+ static_cast<uint8_t*>(buffer.raw), byteCount);
+ } else {
+ // Call using the AAudio callback interface.
+ callbackResult = callDataCallbackFrames(static_cast<uint8_t *>(buffer.raw),
+ buffer.frameCount);
+ }
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ written = buffer.frameCount * getBytesPerDeviceFrame();
+ } else {
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+ } else {
+ ALOGW("%s() callback returned invalid result = %d",
+ __func__, callbackResult);
+ }
+ written = 0;
+ systemStopInternal();
+ // Disable the callback just in case the system keeps trying to call us.
+ mCallbackEnabled.store(false);
+ }
+
+ if (updateStateMachine() != AAUDIO_OK) {
forceDisconnect();
mCallbackEnabled.store(false);
- break;
-
- default:
- break;
+ }
}
+ return written;
}
+
aaudio_result_t AudioStreamLegacy::checkForDisconnectRequest(bool errorCallbackEnabled) {
if (mRequestDisconnect.isRequested()) {
ALOGD("checkForDisconnectRequest() mRequestDisconnect acknowledged");
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 88ef270..c54d7e2 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -18,6 +18,7 @@
#define LEGACY_AUDIO_STREAM_LEGACY_H
#include <media/AudioTimestamp.h>
+#include <media/AudioTrack.h>
#include <media/AudioSystem.h>
#include <aaudio/AAudio.h>
@@ -30,8 +31,6 @@
namespace aaudio {
-typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
-
enum {
/**
* Request that the callback function should fill the data buffer of an output stream,
@@ -56,21 +55,17 @@
typedef int32_t aaudio_callback_operation_t;
-class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+class AudioStreamLegacy : public AudioStream,
+ public FixedBlockProcessor,
+ protected android::AudioTrack::IAudioTrackCallback {
public:
AudioStreamLegacy();
- virtual ~AudioStreamLegacy();
+ virtual ~AudioStreamLegacy() = default;
- aaudio_legacy_callback_t getLegacyCallback();
int32_t callDataCallbackFrames(uint8_t *buffer, int32_t numFrames);
- // This is public so it can be called from the C callback function.
- // This is called from the AudioTrack/AudioRecord client.
- virtual void processCallback(int event, void *info) = 0;
-
- void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
// Implement FixedBlockProcessor
int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
@@ -86,7 +81,8 @@
}
protected:
-
+ size_t onMoreData(const android::AudioTrack::Buffer& buffer) override;
+ void onNewIAudioTrack() override;
aaudio_result_t getBestTimestamp(clockid_t clockId,
int64_t *framePosition,
int64_t *timeNanoseconds,
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index dc66742..df7d4cf 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -37,6 +37,10 @@
using namespace android;
using namespace aaudio;
+static void sCallbackWrapper(int event, void* userData, void* info) {
+ static_cast<AudioStreamRecord*>(userData)->processCallback(event, info);
+}
+
AudioStreamRecord::AudioStreamRecord()
: AudioStreamLegacy()
, mFixedBlockWriter(*this)
@@ -65,11 +69,8 @@
const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
// TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
- audio_channel_in_mask_from_count(samplesPerFrame) :
- audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+ audio_channel_mask_t channelMask =
+ AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), true /*isInput*/);
size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
: builder.getBufferCapacity();
@@ -115,7 +116,7 @@
constexpr int32_t kMostLikelySampleRateForFast = 48000;
if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
&& perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
- && (samplesPerFrame <= 2) // FAST only for mono and stereo
+ && (audio_channel_count_from_in_mask(channelMask) <= 2) // FAST only for mono and stereo
&& (getSampleRate() == kMostLikelySampleRateForFast
|| getSampleRate() == AAUDIO_UNSPECIFIED)) {
setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
@@ -127,12 +128,12 @@
uint32_t notificationFrames = 0;
// Setup the callback if there is one.
- AudioRecord::callback_t callback = nullptr;
+ AudioRecord::legacy_callback_t callback = nullptr;
void *callbackData = nullptr;
AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
if (builder.getDataCallbackProc() != nullptr) {
streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
- callback = getLegacyCallback();
+ callback = sCallbackWrapper;
callbackData = this;
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
@@ -228,7 +229,9 @@
.set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(requestedFormat).c_str()).record();
// Get the actual values from the AudioRecord.
- setSamplesPerFrame(mAudioRecord->channelCount());
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ mAudioRecord->channelMask(), true /*isInput*/,
+ AAudio_isChannelIndexMask(getChannelMask())));
setSampleRate(mAudioRecord->getSampleRate());
setBufferCapacity(getBufferCapacityFromDevice());
setFramesPerBurst(getFramesPerBurstFromDevice());
@@ -354,14 +357,15 @@
void AudioStreamRecord::processCallback(int event, void *info) {
switch (event) {
case AudioRecord::EVENT_MORE_DATA:
- processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+ {
+ AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+ audioBuffer->size = onMoreData(*audioBuffer);
break;
-
+ }
// Stream got rerouted so we disconnect.
case AudioRecord::EVENT_NEW_IAUDIORECORD:
- processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+ onNewIAudioTrack();
break;
-
default:
break;
}
@@ -505,7 +509,7 @@
return (aaudio_result_t) framesRead;
}
-aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
+aaudio_result_t AudioStreamRecord::setBufferSize(int32_t /*requestedFrames*/)
{
return getBufferSize();
}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 692651d..5ce73f9 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -65,7 +65,9 @@
}
// This is public so it can be called from the C callback function.
- void processCallback(int event, void *info) override;
+ void processCallback(int event, void *info);
+
+ void processCallbackRecord(aaudio_callback_operation_t opcode, void *info);
int64_t incrementClientFrameCounter(int32_t frames) override {
return incrementFramesRead(frames);
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1d412c0..17a6d0c 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -66,13 +66,8 @@
const aaudio_session_id_t requestedSessionId = builder.getSessionId();
const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
- // Try to create an AudioTrack
- // Use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
- audio_channel_out_mask_from_count(samplesPerFrame) :
- audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+ audio_channel_mask_t channelMask =
+ AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), false /*isInput*/);
audio_output_flags_t flags;
aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -108,14 +103,12 @@
: getFormat();
// Setup the callback if there is one.
- AudioTrack::callback_t callback = nullptr;
- void *callbackData = nullptr;
+ wp<AudioTrack::IAudioTrackCallback> callback;
// Note that TRANSFER_SYNC does not allow FAST track
AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
if (builder.getDataCallbackProc() != nullptr) {
streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
- callback = getLegacyCallback();
- callbackData = this;
+ callback = wp<AudioTrack::IAudioTrackCallback>::fromExisting(this);
// If the total buffer size is unspecified then base the size on the burst size.
if (frameCount == 0
@@ -140,7 +133,9 @@
const audio_usage_t usage =
AAudioConvert_usageToInternal(builder.getUsage());
const audio_flags_mask_t attributesFlags =
- AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy());
+ AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy(),
+ builder.getSpatializationBehavior(),
+ builder.isContentSpatialized());
const audio_attributes_t attributes = {
.content_type = contentType,
@@ -160,13 +155,12 @@
frameCount,
flags,
callback,
- callbackData,
notificationFrames,
- 0, // DEFAULT sharedBuffer*/,
+ nullptr, // DEFAULT sharedBuffer*/,
false, // DEFAULT threadCanCallJava
sessionId,
streamTransferType,
- NULL, // DEFAULT audio_offload_info_t
+ nullptr, // DEFAULT audio_offload_info_t
AttributionSourceState(), // DEFAULT uid and pid
&attributes,
// WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
@@ -199,7 +193,9 @@
doSetVolume();
// Get the actual values from the AudioTrack.
- setSamplesPerFrame(mAudioTrack->channelCount());
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ mAudioTrack->channelMask(), false /*isInput*/,
+ AAudio_isChannelIndexMask(getChannelMask())));
setFormat(mAudioTrack->format());
setDeviceFormat(mAudioTrack->format());
setSampleRate(mAudioTrack->getSampleRate());
@@ -218,7 +214,6 @@
mBlockAdapter = nullptr;
}
- setState(AAUDIO_STREAM_STATE_OPEN);
setDeviceId(mAudioTrack->getRoutedDeviceId());
aaudio_session_id_t actualSessionId =
@@ -251,6 +246,19 @@
"open() perfMode changed from %d to %d",
perfMode, actualPerformanceMode);
+ if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
+ ALOGE("%s - Open canceled since state = %d", __func__, getState());
+ if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED)
+ {
+ ALOGE("%s - Opening while state is disconnected", __func__);
+ safeReleaseClose();
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
+ safeReleaseClose();
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ setState(AAUDIO_STREAM_STATE_OPEN);
return AAUDIO_OK;
}
@@ -282,31 +290,19 @@
AudioStream::close_l();
}
-void AudioStreamTrack::processCallback(int event, void *info) {
- switch (event) {
- case AudioTrack::EVENT_MORE_DATA:
- processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
- break;
-
- // Stream got rerouted so we disconnect.
- case AudioTrack::EVENT_NEW_IAUDIOTRACK:
- // request stream disconnect if the restored AudioTrack has properties not matching
- // what was requested initially
- if (mAudioTrack->channelCount() != getSamplesPerFrame()
- || mAudioTrack->format() != getFormat()
- || mAudioTrack->getSampleRate() != getSampleRate()
- || mAudioTrack->getRoutedDeviceId() != getDeviceId()
- || getBufferCapacityFromDevice() != getBufferCapacity()
- || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
- processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
- }
- break;
-
- default:
- break;
+void AudioStreamTrack::onNewIAudioTrack() {
+ // Stream got rerouted so we disconnect.
+ // request stream disconnect if the restored AudioTrack has properties not matching
+ // what was requested initially
+ if (mAudioTrack->channelCount() != getSamplesPerFrame()
+ || mAudioTrack->format() != getFormat()
+ || mAudioTrack->getSampleRate() != getSampleRate()
+ || mAudioTrack->getRoutedDeviceId() != getDeviceId()
+ || getBufferCapacityFromDevice() != getBufferCapacity()
+ || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
+ AudioStreamLegacy::onNewIAudioTrack();
}
- return;
}
aaudio_result_t AudioStreamTrack::requestStart_l() {
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index f604871..0f4d72b 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -81,9 +81,6 @@
aaudio_result_t updateStateMachine() override;
- // This is public so it can be called from the C callback function.
- void processCallback(int event, void *info) override;
-
int64_t incrementClientFrameCounter(int32_t frames) override {
return incrementFramesWritten(frames);
}
@@ -100,6 +97,7 @@
int32_t getFramesPerBurstFromDevice() const override;
int32_t getBufferCapacityFromDevice() const override;
+ void onNewIAudioTrack() override;
private:
diff --git a/media/libaaudio/src/libaaudio.map.txt b/media/libaaudio/src/libaaudio.map.txt
index 1dd44d1..f45b816 100644
--- a/media/libaaudio/src/libaaudio.map.txt
+++ b/media/libaaudio/src/libaaudio.map.txt
@@ -25,6 +25,9 @@
AAudioStreamBuilder_setPrivacySensitive; # introduced=30
AAudioStreamBuilder_setPackageName; # introduced=31
AAudioStreamBuilder_setAttributionTag; # introduced=31
+ AAudioStreamBuilder_setChannelMask; # introduced=32
+ AAudioStreamBuilder_setSpatializationBehavior; # introduced=32
+ AAudioStreamBuilder_setIsContentSpatialized; # introduced=32
AAudioStreamBuilder_openStream;
AAudioStreamBuilder_delete;
AAudioStream_close;
@@ -61,6 +64,9 @@
AAudioStream_isMMapUsed;
AAudioStream_isPrivacySensitive; # introduced=30
AAudioStream_release; # introduced=30
+ AAudioStream_getChannelMask; # introduced=32
+ AAudioStream_getSpatializationBehavior; # introduced=32
+ AAudioStream_isContentSpatialized; # introduced=32
local:
*;
};
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index d795725..a0952fe 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -235,20 +235,46 @@
}
audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
- aaudio_allowed_capture_policy_t policy) {
+ aaudio_allowed_capture_policy_t policy,
+ aaudio_spatialization_behavior_t spatializationBehavior,
+ bool isContentSpatialized) {
+ audio_flags_mask_t flagsMask = AUDIO_FLAG_NONE;
switch (policy) {
case AAUDIO_UNSPECIFIED:
case AAUDIO_ALLOW_CAPTURE_BY_ALL:
- return AUDIO_FLAG_NONE;
+ // flagsMask is not modified
+ break;
case AAUDIO_ALLOW_CAPTURE_BY_SYSTEM:
- return AUDIO_FLAG_NO_MEDIA_PROJECTION;
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NO_MEDIA_PROJECTION);
+ break;
case AAUDIO_ALLOW_CAPTURE_BY_NONE:
- return static_cast<audio_flags_mask_t>(
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask |
AUDIO_FLAG_NO_MEDIA_PROJECTION | AUDIO_FLAG_NO_SYSTEM_CAPTURE);
+ break;
default:
- ALOGE("%s() 0x%08X unrecognized", __func__, policy);
- return AUDIO_FLAG_NONE; //
+ ALOGE("%s() 0x%08X unrecognized capture policy", __func__, policy);
+ // flagsMask is not modified
}
+
+ switch (spatializationBehavior) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+ // flagsMask is not modified
+ break;
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NEVER_SPATIALIZE);
+ break;
+ default:
+ ALOGE("%s() 0x%08X unrecognized spatialization behavior",
+ __func__, spatializationBehavior);
+ // flagsMask is not modified
+ }
+
+ if (isContentSpatialized) {
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_CONTENT_SPATIALIZED);
+ }
+
+ return flagsMask;
}
audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
@@ -256,6 +282,248 @@
return privacySensitive ? AUDIO_FLAG_CAPTURE_PRIVATE : AUDIO_FLAG_NONE;
}
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+ aaudio_channel_mask_t channelMask, bool isInput) {
+ if (isInput) {
+ switch (channelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ return AUDIO_CHANNEL_IN_MONO;
+ case AAUDIO_CHANNEL_STEREO:
+ return AUDIO_CHANNEL_IN_STEREO;
+ case AAUDIO_CHANNEL_FRONT_BACK:
+ return AUDIO_CHANNEL_IN_FRONT_BACK;
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ return AUDIO_CHANNEL_IN_2POINT0POINT2;
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ return AUDIO_CHANNEL_IN_2POINT1POINT2;
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ return AUDIO_CHANNEL_IN_3POINT0POINT2;
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ return AUDIO_CHANNEL_IN_3POINT1POINT2;
+ case AAUDIO_CHANNEL_5POINT1:
+ return AUDIO_CHANNEL_IN_5POINT1;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ } else {
+ switch (channelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ return AUDIO_CHANNEL_OUT_MONO;
+ case AAUDIO_CHANNEL_STEREO:
+ return AUDIO_CHANNEL_OUT_STEREO;
+ case AAUDIO_CHANNEL_2POINT1:
+ return AUDIO_CHANNEL_OUT_2POINT1;
+ case AAUDIO_CHANNEL_TRI:
+ return AUDIO_CHANNEL_OUT_TRI;
+ case AAUDIO_CHANNEL_TRI_BACK:
+ return AUDIO_CHANNEL_OUT_TRI_BACK;
+ case AAUDIO_CHANNEL_3POINT1:
+ return AUDIO_CHANNEL_OUT_3POINT1;
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ return AUDIO_CHANNEL_OUT_2POINT0POINT2;
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_2POINT1POINT2;
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ return AUDIO_CHANNEL_OUT_3POINT0POINT2;
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_3POINT1POINT2;
+ case AAUDIO_CHANNEL_QUAD:
+ return AUDIO_CHANNEL_OUT_QUAD;
+ case AAUDIO_CHANNEL_QUAD_SIDE:
+ return AUDIO_CHANNEL_OUT_QUAD_SIDE;
+ case AAUDIO_CHANNEL_SURROUND:
+ return AUDIO_CHANNEL_OUT_SURROUND;
+ case AAUDIO_CHANNEL_PENTA:
+ return AUDIO_CHANNEL_OUT_PENTA;
+ case AAUDIO_CHANNEL_5POINT1:
+ return AUDIO_CHANNEL_OUT_5POINT1;
+ case AAUDIO_CHANNEL_5POINT1_SIDE:
+ return AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+ case AAUDIO_CHANNEL_5POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_5POINT1POINT2;
+ case AAUDIO_CHANNEL_5POINT1POINT4:
+ return AUDIO_CHANNEL_OUT_5POINT1POINT4;
+ case AAUDIO_CHANNEL_6POINT1:
+ return AUDIO_CHANNEL_OUT_6POINT1;
+ case AAUDIO_CHANNEL_7POINT1:
+ return AUDIO_CHANNEL_OUT_7POINT1;
+ case AAUDIO_CHANNEL_7POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_7POINT1POINT2;
+ case AAUDIO_CHANNEL_7POINT1POINT4:
+ return AUDIO_CHANNEL_OUT_7POINT1POINT4;
+ // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+ // case AAUDIO_CHANNEL_9POINT1POINT4:
+ // return AUDIO_CHANNEL_OUT_9POINT1POINT4;
+ // case AAUDIO_CHANNEL_9POINT1POINT6:
+ // return AUDIO_CHANNEL_OUT_9POINT1POINT6;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ }
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+ audio_channel_mask_t channelMask, bool isInput) {
+ if (isInput) {
+ switch (channelMask) {
+ case AUDIO_CHANNEL_IN_MONO:
+ return AAUDIO_CHANNEL_MONO;
+ case AUDIO_CHANNEL_IN_STEREO:
+ return AAUDIO_CHANNEL_STEREO;
+ case AUDIO_CHANNEL_IN_FRONT_BACK:
+ return AAUDIO_CHANNEL_FRONT_BACK;
+ case AUDIO_CHANNEL_IN_2POINT0POINT2:
+ return AAUDIO_CHANNEL_2POINT0POINT2;
+ case AUDIO_CHANNEL_IN_2POINT1POINT2:
+ return AAUDIO_CHANNEL_2POINT1POINT2;
+ case AUDIO_CHANNEL_IN_3POINT0POINT2:
+ return AAUDIO_CHANNEL_3POINT0POINT2;
+ case AUDIO_CHANNEL_IN_3POINT1POINT2:
+ return AAUDIO_CHANNEL_3POINT1POINT2;
+ case AUDIO_CHANNEL_IN_5POINT1:
+ return AAUDIO_CHANNEL_5POINT1;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ } else {
+ switch (channelMask) {
+ case AUDIO_CHANNEL_OUT_MONO:
+ return AAUDIO_CHANNEL_MONO;
+ case AUDIO_CHANNEL_OUT_STEREO:
+ return AAUDIO_CHANNEL_STEREO;
+ case AUDIO_CHANNEL_OUT_2POINT1:
+ return AAUDIO_CHANNEL_2POINT1;
+ case AUDIO_CHANNEL_OUT_TRI:
+ return AAUDIO_CHANNEL_TRI;
+ case AUDIO_CHANNEL_OUT_TRI_BACK:
+ return AAUDIO_CHANNEL_TRI_BACK;
+ case AUDIO_CHANNEL_OUT_3POINT1:
+ return AAUDIO_CHANNEL_3POINT1;
+ case AUDIO_CHANNEL_OUT_2POINT0POINT2:
+ return AAUDIO_CHANNEL_2POINT0POINT2;
+ case AUDIO_CHANNEL_OUT_2POINT1POINT2:
+ return AAUDIO_CHANNEL_2POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_3POINT0POINT2:
+ return AAUDIO_CHANNEL_3POINT0POINT2;
+ case AUDIO_CHANNEL_OUT_3POINT1POINT2:
+ return AAUDIO_CHANNEL_3POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_QUAD:
+ return AAUDIO_CHANNEL_QUAD;
+ case AUDIO_CHANNEL_OUT_QUAD_SIDE:
+ return AAUDIO_CHANNEL_QUAD_SIDE;
+ case AUDIO_CHANNEL_OUT_SURROUND:
+ return AAUDIO_CHANNEL_SURROUND;
+ case AUDIO_CHANNEL_OUT_PENTA:
+ return AAUDIO_CHANNEL_PENTA;
+ case AUDIO_CHANNEL_OUT_5POINT1:
+ return AAUDIO_CHANNEL_5POINT1;
+ case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
+ return AAUDIO_CHANNEL_5POINT1_SIDE;
+ case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+ return AAUDIO_CHANNEL_5POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+ return AAUDIO_CHANNEL_5POINT1POINT4;
+ case AUDIO_CHANNEL_OUT_6POINT1:
+ return AAUDIO_CHANNEL_6POINT1;
+ case AUDIO_CHANNEL_OUT_7POINT1:
+ return AAUDIO_CHANNEL_7POINT1;
+ case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+ return AAUDIO_CHANNEL_7POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+ return AAUDIO_CHANNEL_7POINT1POINT4;
+ // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+ // case AUDIO_CHANNEL_OUT_9POINT1POINT4:
+ // return AAUDIO_CHANNEL_9POINT1POINT4;
+ // case AUDIO_CHANNEL_OUT_9POINT1POINT6:
+ // return AAUDIO_CHANNEL_9POINT1POINT6;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ }
+}
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask) {
+ return __builtin_popcount(channelMask & ~AAUDIO_CHANNEL_BIT_INDEX);
+}
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount) {
+ if (channelCount < 0 || channelCount > AUDIO_CHANNEL_COUNT_MAX) {
+ return AAUDIO_CHANNEL_INVALID;
+ }
+
+ if (channelCount == 0) {
+ return AAUDIO_UNSPECIFIED;
+ }
+
+ // Return index mask if the channel count is greater than 2.
+ return AAUDIO_CHANNEL_BIT_INDEX | ((1 << channelCount) - 1);
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+ audio_channel_mask_t channelMask) {
+ if (audio_channel_mask_get_representation(channelMask) != AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ ALOGE("%s() %#x not an index mask", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ return (channelMask & ~AUDIO_CHANNEL_INDEX_HDR) | AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+ aaudio_channel_mask_t channelMask) {
+ if (!AAudio_isChannelIndexMask(channelMask)) {
+ ALOGE("%s() %#x not an index mask", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ return audio_channel_mask_for_index_assignment_from_count(
+ AAudioConvert_channelMaskToCount(channelMask));
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+ audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired) {
+ if (audio_channel_mask_get_representation(channelMask) == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ return AAudioConvert_androidToAAudioChannelIndexMask(channelMask);
+ }
+ if (indexMaskRequired) {
+ // Require index mask, `channelMask` here is a position mask.
+ const int channelCount = isInput ? audio_channel_count_from_in_mask(channelMask)
+ : audio_channel_count_from_out_mask(channelMask);
+ return AAudioConvert_channelCountToMask(channelCount);
+ }
+ return AAudioConvert_androidToAAudioChannelLayoutMask(channelMask, isInput);
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+ aaudio_channel_mask_t channelMask, bool isInput) {
+ return AAudio_isChannelIndexMask(channelMask)
+ ? AAudioConvert_aaudioToAndroidChannelIndexMask(channelMask)
+ : AAudioConvert_aaudioToAndroidChannelLayoutMask(channelMask, isInput);
+}
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask) {
+ return (channelMask & AAUDIO_CHANNEL_BIT_INDEX) == AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+ aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput) {
+ if (channelMask != AAUDIO_UNSPECIFIED) {
+ if (AAudio_isChannelIndexMask(channelMask) && samplesPerFrame <= 2) {
+ // When it is index mask and the count is less than 3, use position mask
+ // instead of index mask for opening a stream. This may need to be revisited
+ // when making channel index mask public.
+ return isInput ? audio_channel_in_mask_from_count(samplesPerFrame)
+ : audio_channel_out_mask_from_count(samplesPerFrame);
+ }
+ return AAudioConvert_aaudioToAndroidChannelMask(channelMask, isInput);
+ }
+
+ // Return stereo when unspecified.
+ return isInput ? AUDIO_CHANNEL_IN_STEREO : AUDIO_CHANNEL_OUT_STEREO;
+}
+
int32_t AAudioConvert_framesToBytes(int32_t numFrames,
int32_t bytesPerFrame,
int32_t *sizeInBytes) {
@@ -276,45 +544,6 @@
return AAUDIO_OK;
}
-static int32_t AAudioProperty_getMMapProperty(const char *propName,
- int32_t defaultValue,
- const char * caller) {
- int32_t prop = property_get_int32(propName, defaultValue);
- switch (prop) {
- case AAUDIO_UNSPECIFIED:
- case AAUDIO_POLICY_NEVER:
- case AAUDIO_POLICY_ALWAYS:
- case AAUDIO_POLICY_AUTO:
- break;
- default:
- ALOGE("%s: invalid = %d", caller, prop);
- prop = defaultValue;
- break;
- }
- return prop;
-}
-
-int32_t AAudioProperty_getMMapPolicy() {
- return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
- AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMMapExclusivePolicy() {
- return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
- AAUDIO_UNSPECIFIED, __func__);
-}
-
-int32_t AAudioProperty_getMixerBursts() {
- const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
- const int32_t maxBursts = 1024; // arbitrary
- int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
- if (prop < 1 || prop > maxBursts) {
- ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
- prop = defaultBursts;
- }
- return prop;
-}
-
int32_t AAudioProperty_getWakeupDelayMicros() {
const int32_t minMicros = 0; // arbitrary
const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
@@ -345,18 +574,6 @@
return prop;
}
-int32_t AAudioProperty_getHardwareBurstMinMicros() {
- const int32_t defaultMicros = 1000; // arbitrary
- const int32_t maxMicros = 1000 * 1000; // arbitrary
- int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
- if (prop < 1 || prop > maxMicros) {
- ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
- prop, defaultMicros);
- prop = defaultMicros;
- }
- return prop;
-}
-
static int32_t AAudioProperty_getMMapOffsetMicros(const char *functionName,
const char *propertyName) {
const int32_t minMicros = -20000; // arbitrary
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 82eb77d..b59ce1c 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -91,35 +91,43 @@
* @return internal audio flags mask
*/
audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
- aaudio_allowed_capture_policy_t policy);
+ aaudio_allowed_capture_policy_t policy,
+ aaudio_spatialization_behavior_t spatializationBehavior,
+ bool isContentSpatialized);
audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
bool privacySensitive);
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+ aaudio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+ audio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+ audio_channel_mask_t channelMask);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+ aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+ audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+ aaudio_channel_mask_t channelMask, bool isInput);
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask);
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount);
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+ aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput);
+
// Note that this code may be replaced by Settings or by some other system configuration tool.
/**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapPolicy();
-#define AAUDIO_PROP_MMAP_POLICY "aaudio.mmap_policy"
-
-/**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
-int32_t AAudioProperty_getMMapExclusivePolicy();
-#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
-
-/**
- * Read system property.
- * @return number of bursts per AAudio service mixer cycle
- */
-int32_t AAudioProperty_getMixerBursts();
-#define AAUDIO_PROP_MIXER_BURSTS "aaudio.mixer_bursts"
-
-/**
* Read a system property that specifies the number of extra microseconds that a thread
* should sleep when waiting for another thread to service a FIFO. This is used
* to avoid the waking thread from being overly optimistic about the other threads
@@ -140,19 +148,6 @@
#define AAUDIO_PROP_MINIMUM_SLEEP_USEC "aaudio.minimum_sleep_usec"
/**
- * Read system property.
- * This is handy in case the DMA is bursting too quickly for the CPU to keep up.
- * For example, there may be a DMA burst every 100 usec but you only
- * want to feed the MMAP buffer every 2000 usec.
- *
- * This will affect the framesPerBurst for an MMAP stream.
- *
- * @return minimum number of microseconds for a MMAP HW burst
- */
-int32_t AAudioProperty_getHardwareBurstMinMicros();
-#define AAUDIO_PROP_HW_BURST_MIN_USEC "aaudio.hw_burst_min_usec"
-
-/**
* Read a system property that specifies an offset that will be added to MMAP timestamps.
* This can be used to correct bias in the timestamp.
* It can also be used to analyze the time distribution of the timestamp
@@ -198,7 +193,7 @@
* @return true if f() eventually returns true.
*/
static inline bool AAudio_tryUntilTrue(
- std::function<bool()> f, int times, int sleepMs) {
+ const std::function<bool()>& f, int times, int sleepMs) {
static const useconds_t US_PER_MS = 1000;
sleepMs = std::max(sleepMs, 0);
@@ -270,9 +265,7 @@
class Timestamp {
public:
- Timestamp()
- : mPosition(0)
- , mNanoseconds(0) {}
+ Timestamp() = default;
Timestamp(int64_t position, int64_t nanoseconds)
: mPosition(position)
, mNanoseconds(nanoseconds) {}
@@ -283,8 +276,8 @@
private:
// These cannot be const because we need to implement the copy assignment operator.
- int64_t mPosition;
- int64_t mNanoseconds;
+ int64_t mPosition{0};
+ int64_t mNanoseconds{0};
};
@@ -318,4 +311,36 @@
std::atomic<int> mRequested{0};
std::atomic<int> mAcknowledged{0};
};
+
+enum {
+ /**
+ * Audio channel index mask, only used internally.
+ */
+ AAUDIO_CHANNEL_BIT_INDEX = 0x80000000,
+ AAUDIO_CHANNEL_INDEX_MASK_1 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 1) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_2 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 2) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_3 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 3) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_4 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 4) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_5 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 5) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_6 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 6) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_7 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 7) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_8 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 8) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_9 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 9) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_10 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 10) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_11 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 11) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_12 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 12) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_13 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 13) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_14 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 14) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_15 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 15) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_16 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 16) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_17 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 17) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_18 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 18) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_19 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 19) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_20 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 20) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_21 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 21) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_22 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 22) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_23 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 23) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_24 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 24) - 1,
+};
+
#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
index 4dc7e68..290e473 100644
--- a/media/libaaudio/src/utility/FixedBlockAdapter.h
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -35,7 +35,7 @@
class FixedBlockAdapter
{
public:
- FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+ explicit FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
: mFixedBlockProcessor(fixedBlockProcessor) {}
virtual ~FixedBlockAdapter() = default;
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
index 128dd52..dc82416 100644
--- a/media/libaaudio/src/utility/FixedBlockReader.h
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -30,7 +30,7 @@
class FixedBlockReader : public FixedBlockAdapter
{
public:
- FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+ explicit FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
virtual ~FixedBlockReader() = default;
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
index f1d917c..3e89b5d 100644
--- a/media/libaaudio/src/utility/FixedBlockWriter.h
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -28,7 +28,7 @@
class FixedBlockWriter : public FixedBlockAdapter
{
public:
- FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+ explicit FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
virtual ~FixedBlockWriter() = default;
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 63add4e..313ccbd 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -30,8 +30,8 @@
class MonotonicCounter {
public:
- MonotonicCounter() {};
- virtual ~MonotonicCounter() {};
+ MonotonicCounter() = default;
+ virtual ~MonotonicCounter() = default;
/**
* @return current value of the counter
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 98e9727..ea00a5a 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -48,7 +48,7 @@
shared_libs: ["libaaudio_internal"],
}
-cc_test {
+cc_binary {
name: "test_timestamps",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_timestamps.cpp"],
@@ -60,121 +60,71 @@
name: "test_open_params",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_open_params.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_no_close",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_no_close.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_aaudio_recovery",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_recovery.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_n_streams",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_n_streams.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_bad_disconnect",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_bad_disconnect.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_various",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_various.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_session_id",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_session_id.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_aaudio_monkey",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_aaudio_monkey.cpp"],
header_libs: ["libaaudio_example_utils"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_attributes",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_attributes.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_interference",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_interference.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
@@ -196,28 +146,18 @@
],
}
-cc_test {
+cc_binary {
name: "test_return_stop",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_return_stop.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_callback_race",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_callback_race.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
@@ -238,7 +178,7 @@
],
}
-cc_test {
+cc_binary {
name: "test_steal_exclusive",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_steal_exclusive.cpp"],
@@ -251,15 +191,9 @@
],
}
-
-cc_test {
+cc_binary {
name: "test_disconnect_race",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_disconnect_race.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
index d540866..b88d562 100644
--- a/media/libaaudio/tests/test_attributes.cpp
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -16,6 +16,10 @@
// Test AAudio attributes such as Usage, ContentType and InputPreset.
+// TODO Many of these tests are duplicates of CTS tests in
+// "test_aaudio_attributes.cpp". That other file is more current.
+// So these tests could be deleted.
+
#include <stdio.h>
#include <unistd.h>
@@ -91,7 +95,7 @@
aaudio_allowed_capture_policy_t expectedCapturePolicy =
(capturePolicy == DONT_SET || capturePolicy == AAUDIO_UNSPECIFIED)
? AAUDIO_ALLOW_CAPTURE_BY_ALL // default
- : preset;
+ : capturePolicy;
EXPECT_EQ(expectedCapturePolicy, AAudioStream_getAllowedCapturePolicy(aaudioStream));
bool expectedPrivacyMode =
@@ -132,10 +136,7 @@
AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
AAUDIO_USAGE_GAME,
AAUDIO_USAGE_ASSISTANT,
- AAUDIO_SYSTEM_USAGE_EMERGENCY,
- AAUDIO_SYSTEM_USAGE_SAFETY,
- AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS,
- AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT
+ // Note that the AAUDIO_SYSTEM_USAGE_* values requires special permission.
};
static const aaudio_content_type_t sContentypes[] = {
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index d563a7e..611cbf7 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -76,31 +76,40 @@
}
TEST(test_flowgraph, module_ramp_linear) {
+ constexpr int singleNumOutput = 1;
constexpr int rampSize = 5;
constexpr int numOutput = 100;
constexpr float value = 1.0f;
- constexpr float target = 100.0f;
+ constexpr float initialTarget = 10.0f;
+ constexpr float finalTarget = 100.0f;
+ constexpr float tolerance = 0.0001f; // arbitrary
float output[numOutput] = {};
RampLinear rampLinear{1};
SinkFloat sinkFloat{1};
rampLinear.input.setValue(value);
rampLinear.setLengthInFrames(rampSize);
- rampLinear.setTarget(target);
- rampLinear.forceCurrent(0.0f);
-
rampLinear.output.connect(&sinkFloat.input);
+ // Check that the values go to the initial target instantly.
+ rampLinear.setTarget(initialTarget);
+ int32_t singleNumRead = sinkFloat.read(output, singleNumOutput);
+ ASSERT_EQ(singleNumRead, singleNumOutput);
+ EXPECT_NEAR(value * initialTarget, output[0], tolerance);
+
+ // Now set target and check that the linear ramp works as expected.
+ rampLinear.setTarget(finalTarget);
int32_t numRead = sinkFloat.read(output, numOutput);
+ const float incrementSize = (finalTarget - initialTarget) / rampSize;
ASSERT_EQ(numOutput, numRead);
- constexpr float tolerance = 0.0001f; // arbitrary
+
int i = 0;
for (; i < rampSize; i++) {
- float expected = i * value * target / rampSize;
+ float expected = value * (initialTarget + i * incrementSize);
EXPECT_NEAR(expected, output[i], tolerance);
}
for (; i < numOutput; i++) {
- float expected = value * target;
+ float expected = value * finalTarget;
EXPECT_NEAR(expected, output[i], tolerance);
}
}
diff --git a/media/libaaudio/tests/test_steal_exclusive.cpp b/media/libaaudio/tests/test_steal_exclusive.cpp
index 05c560d..5cb005c 100644
--- a/media/libaaudio/tests/test_steal_exclusive.cpp
+++ b/media/libaaudio/tests/test_steal_exclusive.cpp
@@ -110,6 +110,10 @@
mOpenDelayMillis = openDelayMillis;
}
+ void setCloseEnabled(bool enabled) {
+ mCloseEnabled = enabled;
+ }
+
void restartStream() {
int retriesLeft = mMaxRetries;
aaudio_result_t result;
@@ -189,10 +193,12 @@
std::lock_guard<std::mutex> lock(mLock);
aaudio_result_t result = AAUDIO_OK;
if (mStream != nullptr) {
- result = AAudioStream_close(mStream);
- if (result != AAUDIO_OK) {
- printf("AAudioStream_close returned %s\n",
- AAudio_convertResultToText(result));
+ if (mCloseEnabled) {
+ result = AAudioStream_close(mStream);
+ printf("AAudioStream_close() returned %s\n",
+ AAudio_convertResultToText(result));
+ } else {
+ printf("AAudioStream_close() DISABLED!\n");
}
mStream = nullptr;
}
@@ -287,6 +293,7 @@
std::string mName;
int mMaxRetries = 1;
int mOpenDelayMillis = 0;
+ bool mCloseEnabled = true;
};
// Callback function that fills the audio output buffer.
@@ -319,11 +326,12 @@
}
static void s_usage() {
- printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] -s\n");
+ printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] -s -c{flag}\n");
printf(" -i direction INPUT, otherwise OUTPUT\n");
- printf(" -d delay open by milliseconds, default = 0\n");
- printf(" -r max retries in the error callback, default = 1\n");
+ printf(" -d Delay open by milliseconds, default = 0\n");
+ printf(" -r max Retries in the error callback, default = 1\n");
printf(" -s try to open in SHARED mode\n");
+ printf(" -c enable or disabling Closing of the stream with 0/1, default = 1\n");
}
int main(int argc, char ** argv) {
@@ -334,6 +342,7 @@
int errorCount = 0;
int maxRetries = 1;
int openDelayMillis = 0;
+ bool closeEnabled = true;
aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
// Make printf print immediately so that debug info is not stuck
@@ -348,6 +357,9 @@
if (arg[0] == '-') {
char option = arg[1];
switch (option) {
+ case 'c':
+ closeEnabled = atoi(&arg[2]) != 0;
+ break;
case 'd':
openDelayMillis = atoi(&arg[2]);
break;
@@ -376,6 +388,8 @@
thief.setOpenDelayMillis(openDelayMillis);
victim.setMaxRetries(maxRetries);
thief.setMaxRetries(maxRetries);
+ victim.setCloseEnabled(closeEnabled);
+ thief.setCloseEnabled(closeEnabled);
result = victim.openAudioStream(direction, requestedSharingMode);
if (result != AAUDIO_OK) {
@@ -442,7 +456,7 @@
}
LOGI("Both streams running. Ask user to plug in headset. ====");
- printf("\n====\nPlease PLUG IN A HEADSET now!\n====\n\n");
+ printf("\n====\nPlease PLUG IN A HEADSET now! - OPTIONAL\n====\n\n");
if (result == AAUDIO_OK) {
const int watchLoops = DUET_DURATION_MSEC / SLEEP_DURATION_MSEC;
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
index cbf863f..b68fc7b 100644
--- a/media/libaaudio/tests/test_various.cpp
+++ b/media/libaaudio/tests/test_various.cpp
@@ -25,6 +25,7 @@
#include <gtest/gtest.h>
#include <unistd.h>
+#include <thread>
// Callback function that does nothing.
aaudio_data_callback_result_t NoopDataCallbackProc(
@@ -51,6 +52,7 @@
}
constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
+constexpr int64_t MICROS_PER_MILLISECOND = 1000;
void checkReleaseThenClose(aaudio_performance_mode_t perfMode,
aaudio_sharing_mode_t sharingMode,
@@ -762,6 +764,58 @@
checkCallbackOnce(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
}
+void waitForStateChangeToClosingorClosed(AAudioStream **stream, std::atomic<bool>* isReady)
+{
+ *isReady = true;
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(*stream,
+ AAUDIO_STREAM_STATE_OPEN, &state,
+ 10000 * NANOS_PER_MILLISECOND));
+ if ((state != AAUDIO_STREAM_STATE_CLOSING) && (state != AAUDIO_STREAM_STATE_CLOSED)){
+ FAIL() << "ERROR - State not closing or closed. Current state: " <<
+ AAudio_convertStreamStateToText(state);
+ }
+}
+
+void testWaitForStateChangeClose(aaudio_performance_mode_t perfMode) {
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+ // Verify Open State
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+ std::atomic<bool> isWaitThreadReady{false};
+
+ // Spawn a new thread to wait for the state change
+ std::thread waitThread (waitForStateChangeToClosingorClosed, &aaudioStream,
+ &isWaitThreadReady);
+
+ // Wait for worker thread to be ready
+ while (!isWaitThreadReady) {
+ usleep(MICROS_PER_MILLISECOND);
+ }
+ // Sleep an additional millisecond to make sure waitForAudioThread is called
+ usleep(MICROS_PER_MILLISECOND);
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+ waitThread.join();
+}
+
+TEST(test_various, wait_for_state_change_close_none) {
+ testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_various, wait_for_state_change_close_lowlat) {
+ testWaitForStateChangeClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
// ************************************************************
struct WakeUpCallbackData {
void wakeOther() {
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 321e7f9..f81aa87 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -14,6 +14,11 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
#define LOG_TAG "AidlConversion"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -21,6 +26,7 @@
#include "media/AidlConversion.h"
#include <media/ShmemCompat.h>
+#include <media/stagefright/foundation/MediaDefs.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
// Utilities
@@ -28,6 +34,40 @@
namespace android {
using base::unexpected;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioContentType;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioEncapsulationMetadataType;
+using media::audio::common::AudioEncapsulationMode;
+using media::audio::common::AudioEncapsulationType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::AudioGain;
+using media::audio::common::AudioGainConfig;
+using media::audio::common::AudioGainMode;
+using media::audio::common::AudioInputFlags;
+using media::audio::common::AudioIoFlags;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioOutputFlags;
+using media::audio::common::AudioPortDeviceExt;
+using media::audio::common::AudioPortExt;
+using media::audio::common::AudioPortMixExt;
+using media::audio::common::AudioPortMixExtUseCase;
+using media::audio::common::AudioProfile;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStandard;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::ExtraAudioDescriptor;
+using media::audio::common::Int;
+using media::audio::common::PcmType;
namespace {
@@ -219,75 +259,7 @@
return std::string(legacy.c_str());
}
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
- media::AudioPortConfigType aidl) {
- switch (aidl) {
- case media::AudioPortConfigType::SAMPLE_RATE:
- return AUDIO_PORT_CONFIG_SAMPLE_RATE;
- case media::AudioPortConfigType::CHANNEL_MASK:
- return AUDIO_PORT_CONFIG_CHANNEL_MASK;
- case media::AudioPortConfigType::FORMAT:
- return AUDIO_PORT_CONFIG_FORMAT;
- case media::AudioPortConfigType::GAIN:
- return AUDIO_PORT_CONFIG_GAIN;
- case media::AudioPortConfigType::FLAGS:
- return AUDIO_PORT_CONFIG_FLAGS;
- }
- return unexpected(BAD_VALUE);
-}
-
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
- int32_t legacy) {
- switch (legacy) {
- case AUDIO_PORT_CONFIG_SAMPLE_RATE:
- return media::AudioPortConfigType::SAMPLE_RATE;
- case AUDIO_PORT_CONFIG_CHANNEL_MASK:
- return media::AudioPortConfigType::CHANNEL_MASK;
- case AUDIO_PORT_CONFIG_FORMAT:
- return media::AudioPortConfigType::FORMAT;
- case AUDIO_PORT_CONFIG_GAIN:
- return media::AudioPortConfigType::GAIN;
- case AUDIO_PORT_CONFIG_FLAGS:
- return media::AudioPortConfigType::FLAGS;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl) {
- return convertBitmask<unsigned int, int32_t, int, media::AudioPortConfigType>(
- aidl, aidl2legacy_AudioPortConfigType_int32_t,
- // AudioPortConfigType enum is index-based.
- indexToEnum_index<media::AudioPortConfigType>,
- // AUDIO_PORT_CONFIG_* flags are mask-based.
- enumToMask_bitmask<unsigned int, int>);
-}
-
-ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy) {
- return convertBitmask<int32_t, unsigned int, media::AudioPortConfigType, int>(
- legacy, legacy2aidl_int32_t_AudioPortConfigType,
- // AUDIO_PORT_CONFIG_* flags are mask-based.
- indexToEnum_bitmask<unsigned>,
- // AudioPortConfigType enum is index-based.
- enumToMask_index<int32_t, media::AudioPortConfigType>);
-}
-
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl) {
- // TODO(ytai): should we convert bit-by-bit?
- // One problem here is that the representation is both opaque and is different based on the
- // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
- return convertReinterpret<audio_channel_mask_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy) {
- // TODO(ytai): should we convert bit-by-bit?
- // One problem here is that the representation is both opaque and is different based on the
- // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
media::AudioIoConfigEvent aidl) {
switch (aidl) {
case media::AudioIoConfigEvent::OUTPUT_REGISTERED:
@@ -312,8 +284,8 @@
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
- audio_io_config_event legacy) {
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+ audio_io_config_event_t legacy) {
switch (legacy) {
case AUDIO_OUTPUT_REGISTERED:
return media::AudioIoConfigEvent::OUTPUT_REGISTERED;
@@ -393,81 +365,1024 @@
return unexpected(BAD_VALUE);
}
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
- media::audio::common::AudioFormat aidl) {
- // This relies on AudioFormat being kept in sync with audio_format_t.
- static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
- return static_cast<audio_format_t>(aidl);
+namespace {
+
+namespace detail {
+using AudioChannelBitPair = std::pair<audio_channel_mask_t, int>;
+using AudioChannelBitPairs = std::vector<AudioChannelBitPair>;
+using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
+using AudioChannelPairs = std::vector<AudioChannelPair>;
+using AudioDevicePair = std::pair<audio_devices_t, AudioDeviceDescription>;
+using AudioDevicePairs = std::vector<AudioDevicePair>;
+using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
+using AudioFormatPairs = std::vector<AudioFormatPair>;
}
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
+const detail::AudioChannelBitPairs& getInAudioChannelBits() {
+ static const detail::AudioChannelBitPairs pairs = {
+ { AUDIO_CHANNEL_IN_LEFT, AudioChannelLayout::CHANNEL_FRONT_LEFT },
+ { AUDIO_CHANNEL_IN_RIGHT, AudioChannelLayout::CHANNEL_FRONT_RIGHT },
+ // AUDIO_CHANNEL_IN_FRONT is at the end
+ { AUDIO_CHANNEL_IN_BACK, AudioChannelLayout::CHANNEL_BACK_CENTER },
+ // AUDIO_CHANNEL_IN_*_PROCESSED not supported
+ // AUDIO_CHANNEL_IN_PRESSURE not supported
+ // AUDIO_CHANNEL_IN_*_AXIS not supported
+ // AUDIO_CHANNEL_IN_VOICE_* not supported
+ { AUDIO_CHANNEL_IN_BACK_LEFT, AudioChannelLayout::CHANNEL_BACK_LEFT },
+ { AUDIO_CHANNEL_IN_BACK_RIGHT, AudioChannelLayout::CHANNEL_BACK_RIGHT },
+ { AUDIO_CHANNEL_IN_CENTER, AudioChannelLayout::CHANNEL_FRONT_CENTER },
+ { AUDIO_CHANNEL_IN_LOW_FREQUENCY, AudioChannelLayout::CHANNEL_LOW_FREQUENCY },
+ { AUDIO_CHANNEL_IN_TOP_LEFT, AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT },
+ { AUDIO_CHANNEL_IN_TOP_RIGHT, AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT },
+ // When going from aidl to legacy, IN_CENTER is used
+ { AUDIO_CHANNEL_IN_FRONT, AudioChannelLayout::CHANNEL_FRONT_CENTER }
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getInAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_INPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
+ }
+
+ DEFINE_INPUT_LAYOUT(MONO),
+ DEFINE_INPUT_LAYOUT(STEREO),
+ DEFINE_INPUT_LAYOUT(FRONT_BACK),
+ // AUDIO_CHANNEL_IN_6 not supported
+ DEFINE_INPUT_LAYOUT(2POINT0POINT2),
+ DEFINE_INPUT_LAYOUT(2POINT1POINT2),
+ DEFINE_INPUT_LAYOUT(3POINT0POINT2),
+ DEFINE_INPUT_LAYOUT(3POINT1POINT2),
+ DEFINE_INPUT_LAYOUT(5POINT1)
+#undef DEFINE_INPUT_LAYOUT
+ };
+ return pairs;
+}
+
+const detail::AudioChannelBitPairs& getOutAudioChannelBits() {
+ static const detail::AudioChannelBitPairs pairs = {
+#define DEFINE_OUTPUT_BITS(n) \
+ { AUDIO_CHANNEL_OUT_##n, AudioChannelLayout::CHANNEL_##n }
+
+ DEFINE_OUTPUT_BITS(FRONT_LEFT),
+ DEFINE_OUTPUT_BITS(FRONT_RIGHT),
+ DEFINE_OUTPUT_BITS(FRONT_CENTER),
+ DEFINE_OUTPUT_BITS(LOW_FREQUENCY),
+ DEFINE_OUTPUT_BITS(BACK_LEFT),
+ DEFINE_OUTPUT_BITS(BACK_RIGHT),
+ DEFINE_OUTPUT_BITS(FRONT_LEFT_OF_CENTER),
+ DEFINE_OUTPUT_BITS(FRONT_RIGHT_OF_CENTER),
+ DEFINE_OUTPUT_BITS(BACK_CENTER),
+ DEFINE_OUTPUT_BITS(SIDE_LEFT),
+ DEFINE_OUTPUT_BITS(SIDE_RIGHT),
+ DEFINE_OUTPUT_BITS(TOP_CENTER),
+ DEFINE_OUTPUT_BITS(TOP_FRONT_LEFT),
+ DEFINE_OUTPUT_BITS(TOP_FRONT_CENTER),
+ DEFINE_OUTPUT_BITS(TOP_FRONT_RIGHT),
+ DEFINE_OUTPUT_BITS(TOP_BACK_LEFT),
+ DEFINE_OUTPUT_BITS(TOP_BACK_CENTER),
+ DEFINE_OUTPUT_BITS(TOP_BACK_RIGHT),
+ DEFINE_OUTPUT_BITS(TOP_SIDE_LEFT),
+ DEFINE_OUTPUT_BITS(TOP_SIDE_RIGHT),
+ DEFINE_OUTPUT_BITS(BOTTOM_FRONT_LEFT),
+ DEFINE_OUTPUT_BITS(BOTTOM_FRONT_CENTER),
+ DEFINE_OUTPUT_BITS(BOTTOM_FRONT_RIGHT),
+ DEFINE_OUTPUT_BITS(LOW_FREQUENCY_2),
+ DEFINE_OUTPUT_BITS(FRONT_WIDE_LEFT),
+ DEFINE_OUTPUT_BITS(FRONT_WIDE_RIGHT),
+#undef DEFINE_OUTPUT_BITS
+ { AUDIO_CHANNEL_OUT_HAPTIC_A, AudioChannelLayout::CHANNEL_HAPTIC_A },
+ { AUDIO_CHANNEL_OUT_HAPTIC_B, AudioChannelLayout::CHANNEL_HAPTIC_B }
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getOutAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_OUTPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_OUT_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
+ }
+
+ DEFINE_OUTPUT_LAYOUT(MONO),
+ DEFINE_OUTPUT_LAYOUT(STEREO),
+ DEFINE_OUTPUT_LAYOUT(2POINT1),
+ DEFINE_OUTPUT_LAYOUT(TRI),
+ DEFINE_OUTPUT_LAYOUT(TRI_BACK),
+ DEFINE_OUTPUT_LAYOUT(3POINT1),
+ DEFINE_OUTPUT_LAYOUT(2POINT0POINT2),
+ DEFINE_OUTPUT_LAYOUT(2POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(3POINT0POINT2),
+ DEFINE_OUTPUT_LAYOUT(3POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(QUAD),
+ DEFINE_OUTPUT_LAYOUT(QUAD_SIDE),
+ DEFINE_OUTPUT_LAYOUT(SURROUND),
+ DEFINE_OUTPUT_LAYOUT(PENTA),
+ DEFINE_OUTPUT_LAYOUT(5POINT1),
+ DEFINE_OUTPUT_LAYOUT(5POINT1_SIDE),
+ DEFINE_OUTPUT_LAYOUT(5POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(5POINT1POINT4),
+ DEFINE_OUTPUT_LAYOUT(6POINT1),
+ DEFINE_OUTPUT_LAYOUT(7POINT1),
+ DEFINE_OUTPUT_LAYOUT(7POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(7POINT1POINT4),
+ DEFINE_OUTPUT_LAYOUT(13POINT_360RA),
+ DEFINE_OUTPUT_LAYOUT(22POINT2),
+ DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_A),
+ DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_A),
+ DEFINE_OUTPUT_LAYOUT(HAPTIC_AB),
+ DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_AB),
+ DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_AB)
+#undef DEFINE_OUTPUT_LAYOUT
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_VOICE_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_VOICE_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>( \
+ AudioChannelLayout::VOICE_##n) \
+ }
+ DEFINE_VOICE_LAYOUT(UPLINK_MONO),
+ DEFINE_VOICE_LAYOUT(DNLINK_MONO),
+ DEFINE_VOICE_LAYOUT(CALL_MONO)
+#undef DEFINE_VOICE_LAYOUT
+ };
+ return pairs;
+}
+
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
+ const std::string& connection = "") {
+ AudioDeviceDescription result;
+ result.type = type;
+ result.connection = connection;
+ return result;
+}
+
+void append_AudioDeviceDescription(detail::AudioDevicePairs& pairs,
+ audio_devices_t inputType, audio_devices_t outputType,
+ AudioDeviceType inType, AudioDeviceType outType,
+ const std::string& connection = "") {
+ pairs.push_back(std::make_pair(inputType, make_AudioDeviceDescription(inType, connection)));
+ pairs.push_back(std::make_pair(outputType, make_AudioDeviceDescription(outType, connection)));
+}
+
+const detail::AudioDevicePairs& getAudioDevicePairs() {
+ static const detail::AudioDevicePairs pairs = []() {
+ detail::AudioDevicePairs pairs = {{
+ {
+ AUDIO_DEVICE_NONE, AudioDeviceDescription{}
+ },
+ {
+ AUDIO_DEVICE_OUT_EARPIECE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER_EARPIECE)
+ },
+ {
+ AUDIO_DEVICE_OUT_SPEAKER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER)
+ },
+ {
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_ANALOG())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_BT_SCO())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_CARKIT,
+ AudioDeviceDescription::CONNECTION_BT_SCO())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_BT_A2DP())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER,
+ AudioDeviceDescription::CONNECTION_BT_A2DP())
+ },
+ {
+ AUDIO_DEVICE_OUT_TELEPHONY_TX, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_TELEPHONY_TX)
+ },
+ {
+ AUDIO_DEVICE_OUT_AUX_LINE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_LINE_AUX)
+ },
+ {
+ AUDIO_DEVICE_OUT_SPEAKER_SAFE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER_SAFE)
+ },
+ {
+ AUDIO_DEVICE_OUT_HEARING_AID, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_HEARING_AID,
+ AudioDeviceDescription::CONNECTION_WIRELESS())
+ },
+ {
+ AUDIO_DEVICE_OUT_ECHO_CANCELLER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_ECHO_CANCELLER)
+ },
+ {
+ AUDIO_DEVICE_OUT_BLE_SPEAKER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER,
+ AudioDeviceDescription::CONNECTION_BT_LE())
+ },
+ // AUDIO_DEVICE_IN_AMBIENT and IN_COMMUNICATION are removed since they were deprecated.
+ {
+ AUDIO_DEVICE_IN_BUILTIN_MIC, make_AudioDeviceDescription(
+ AudioDeviceType::IN_MICROPHONE)
+ },
+ {
+ AUDIO_DEVICE_IN_BACK_MIC, make_AudioDeviceDescription(
+ AudioDeviceType::IN_MICROPHONE_BACK)
+ },
+ {
+ AUDIO_DEVICE_IN_TELEPHONY_RX, make_AudioDeviceDescription(
+ AudioDeviceType::IN_TELEPHONY_RX)
+ },
+ {
+ AUDIO_DEVICE_IN_TV_TUNER, make_AudioDeviceDescription(
+ AudioDeviceType::IN_TV_TUNER)
+ },
+ {
+ AUDIO_DEVICE_IN_LOOPBACK, make_AudioDeviceDescription(
+ AudioDeviceType::IN_LOOPBACK)
+ },
+ {
+ AUDIO_DEVICE_IN_BLUETOOTH_BLE, make_AudioDeviceDescription(
+ AudioDeviceType::IN_DEVICE,
+ AudioDeviceDescription::CONNECTION_BT_LE())
+ },
+ {
+ AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
+ AudioDeviceType::IN_ECHO_REFERENCE)
+ }
+ }};
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_DEFAULT, AUDIO_DEVICE_OUT_DEFAULT,
+ AudioDeviceType::IN_DEFAULT, AudioDeviceType::OUT_DEFAULT);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_ANALOG());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_BT_SCO());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI, AUDIO_DEVICE_OUT_HDMI,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_HDMI());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AudioDeviceType::IN_SUBMIX, AudioDeviceType::OUT_SUBMIX);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
+ AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+ AudioDeviceDescription::CONNECTION_ANALOG());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
+ AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+ AudioDeviceDescription::CONNECTION_USB());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_ACCESSORY,
+ AudioDeviceType::IN_ACCESSORY, AudioDeviceType::OUT_ACCESSORY,
+ AudioDeviceDescription::CONNECTION_USB());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_OUT_USB_DEVICE,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_USB());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_FM_TUNER, AUDIO_DEVICE_OUT_FM,
+ AudioDeviceType::IN_FM_TUNER, AudioDeviceType::OUT_FM);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_LINE, AUDIO_DEVICE_OUT_LINE,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_ANALOG());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_SPDIF, AUDIO_DEVICE_OUT_SPDIF,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_SPDIF());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_BT_A2DP());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_IP, AUDIO_DEVICE_OUT_IP,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_IP_V4());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_OUT_BUS,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_BUS());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_PROXY, AUDIO_DEVICE_OUT_PROXY,
+ AudioDeviceType::IN_AFE_PROXY, AudioDeviceType::OUT_AFE_PROXY);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_USB());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_ARC,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_HDMI_ARC());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI_EARC, AUDIO_DEVICE_OUT_HDMI_EARC,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ AudioDeviceDescription::CONNECTION_HDMI_EARC());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_BT_LE());
+ return pairs;
+ }();
+ return pairs;
+}
+
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+ AudioFormatDescription result;
+ result.type = type;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+ auto result = make_AudioFormatDescription(AudioFormatType::PCM);
+ result.pcm = pcm;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ AudioFormatDescription result;
+ result.encoding = encoding;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
+ const std::string& encoding) {
+ auto result = make_AudioFormatDescription(encoding);
+ result.pcm = transport;
+ return result;
+}
+
+const detail::AudioFormatPairs& getAudioFormatPairs() {
+ static const detail::AudioFormatPairs pairs = {{
+ {
+ AUDIO_FORMAT_INVALID,
+ make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)
+ },
+ {
+ AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}
+ },
+ {
+ AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)
+ },
+ {
+ AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)
+ },
+ {
+ AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
+ },
+ {
+ AUDIO_FORMAT_AMR_NB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_NB)
+ },
+ {
+ AUDIO_FORMAT_AMR_WB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB)
+ },
+ {
+ AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MP4)
+ },
+ {
+ AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MAIN)
+ },
+ {
+ AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LC)
+ },
+ {
+ AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SSR)
+ },
+ {
+ AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LTP)
+ },
+ {
+ AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V1)
+ },
+ {
+ AUDIO_FORMAT_AAC_SCALABLE,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE)
+ },
+ {
+ AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ERLC)
+ },
+ {
+ AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LD)
+ },
+ {
+ AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V2)
+ },
+ {
+ AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ELD)
+ },
+ {
+ AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_XHE)
+ },
+ // AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
+ // ago.
+ {
+ AUDIO_FORMAT_VORBIS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_VORBIS)
+ },
+ {
+ AUDIO_FORMAT_OPUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_OPUS)
+ },
+ {
+ AUDIO_FORMAT_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC3)
+ },
+ {
+ AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3)
+ },
+ {
+ AUDIO_FORMAT_E_AC3_JOC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3_JOC)
+ },
+ {
+ AUDIO_FORMAT_DTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS)
+ },
+ {
+ AUDIO_FORMAT_DTS_HD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_HD)
+ },
+ // In the future, we would like to represent encapsulated bitstreams as
+ // nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
+ // specify the format of the encapsulated bitstream.
+ {
+ AUDIO_FORMAT_IEC61937,
+ make_AudioFormatDescription(PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
+ },
+ {
+ AUDIO_FORMAT_DOLBY_TRUEHD,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD)
+ },
+ {
+ AUDIO_FORMAT_EVRC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRC)
+ },
+ {
+ AUDIO_FORMAT_EVRCB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCB)
+ },
+ {
+ AUDIO_FORMAT_EVRCWB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCWB)
+ },
+ {
+ AUDIO_FORMAT_EVRCNW, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCNW)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADIF)
+ },
+ {
+ AUDIO_FORMAT_WMA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_WMA)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_WMA_PRO, make_AudioFormatDescription("audio/x-ms-wma.pro")
+ },
+ {
+ AUDIO_FORMAT_AMR_WB_PLUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)
+ },
+ {
+ AUDIO_FORMAT_MP2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)
+ },
+ {
+ AUDIO_FORMAT_QCELP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_QCELP)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_DSD, make_AudioFormatDescription("audio/vnd.sony.dsd")
+ },
+ {
+ AUDIO_FORMAT_FLAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_FLAC)
+ },
+ {
+ AUDIO_FORMAT_ALAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_ALAC)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_MAIN,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_SSR,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_LTP,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_HE_V1,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_SCALABLE,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_ERLC,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_HE_V2,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_ELD,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD)
+ },
+ {
+ AUDIO_FORMAT_AAC_ADTS_XHE,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE)
+ },
+ {
+ // Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
+ AUDIO_FORMAT_SBC, make_AudioFormatDescription("audio/x-sbc")
+ },
+ {
+ AUDIO_FORMAT_APTX, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_APTX)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")
+ },
+ {
+ AUDIO_FORMAT_AC4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC4)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")
+ },
+ {
+ AUDIO_FORMAT_MAT, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_1_0,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".1.0"))
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_2_0,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.0"))
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_2_1,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.1"))
+ },
+ {
+ AUDIO_FORMAT_AAC_LATM, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC)
+ },
+ {
+ AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC)
+ },
+ {
+ AUDIO_FORMAT_AAC_LATM_HE_V1,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1)
+ },
+ {
+ AUDIO_FORMAT_AAC_LATM_HE_V2,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_CELT, make_AudioFormatDescription("audio/x-celt")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_ADAPTIVE, make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LHDC, make_AudioFormatDescription("audio/vnd.savitech.lhdc")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LHDC_LL, make_AudioFormatDescription("audio/vnd.savitech.lhdc.ll")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_TWSP, make_AudioFormatDescription("audio/vnd.qcom.aptx.twsp")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")
+ },
+ {
+ AUDIO_FORMAT_MPEGH, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1)
+ },
+ {
+ AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3)
+ },
+ {
+ AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4)
+ },
+ {
+ AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3)
+ },
+ {
+ AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4)
+ },
+ {
+ AUDIO_FORMAT_IEC60958,
+ make_AudioFormatDescription(PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
+ },
+ {
+ AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
+ },
+ {
+ AUDIO_FORMAT_DRA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DRA)
+ },
+ }};
+ return pairs;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(const std::vector<std::pair<S, T>>& v) {
+ std::unordered_map<S, T> result(v.begin(), v.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+ return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(
+ const std::vector<std::pair<S, T>>& v1, const std::vector<std::pair<S, T>>& v2) {
+ std::unordered_map<S, T> result(v1.begin(), v1.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v1.size(), "Duplicate key elements detected in v1");
+ result.insert(v2.begin(), v2.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v1.size() + v2.size(),
+ "Duplicate key elements detected in v1+v2");
+ return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<T, S> make_ReverseMap(const std::vector<std::pair<S, T>>& v) {
+ std::unordered_map<T, S> result;
+ std::transform(v.begin(), v.end(), std::inserter(result, result.begin()),
+ [](const std::pair<S, T>& p) {
+ return std::make_pair(p.second, p.first);
+ });
+ LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+ return result;
+}
+
+} // namespace
+
+audio_channel_mask_t aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+ int aidlLayout, bool isInput) {
+ auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+ const int aidlLayoutInitial = aidlLayout; // for error message
+ audio_channel_mask_t legacy = AUDIO_CHANNEL_NONE;
+ for (const auto& bitPair : bitMapping) {
+ if ((aidlLayout & bitPair.second) == bitPair.second) {
+ legacy = static_cast<audio_channel_mask_t>(legacy | bitPair.first);
+ aidlLayout &= ~bitPair.second;
+ if (aidlLayout == 0) {
+ return legacy;
+ }
+ }
+ }
+ ALOGE("%s: aidl layout 0x%x contains bits 0x%x that have no match to legacy %s bits",
+ __func__, aidlLayoutInitial, aidlLayout, isInput ? "input" : "output");
+ return AUDIO_CHANNEL_NONE;
+}
+
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ const AudioChannelLayout& aidl, bool isInput) {
+ using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
+ using Tag = AudioChannelLayout::Tag;
+ static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
+ static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
+ static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
+
+ auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
+ const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGW("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
+ aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+ };
+
+ switch (aidl.getTag()) {
+ case Tag::none:
+ return AUDIO_CHANNEL_NONE;
+ case Tag::invalid:
+ return AUDIO_CHANNEL_INVALID;
+ case Tag::indexMask:
+ // Index masks do not have pre-defined values.
+ if (const int bits = aidl.get<Tag::indexMask>();
+ __builtin_popcount(bits) != 0 &&
+ __builtin_popcount(bits) <= AUDIO_CHANNEL_COUNT_MAX) {
+ return audio_channel_mask_from_representation_and_bits(
+ AUDIO_CHANNEL_REPRESENTATION_INDEX, bits);
+ } else {
+ ALOGE("%s: invalid indexMask value 0x%x in %s",
+ __func__, bits, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+ case Tag::layoutMask:
+ // The fast path is to find a direct match for some known layout mask.
+ if (const auto layoutMatch = convert(aidl, isInput ? mIn : mOut, __func__,
+ isInput ? "input" : "output");
+ layoutMatch.ok()) {
+ return layoutMatch;
+ }
+ // If a match for a predefined layout wasn't found, make a custom one from bits.
+ if (audio_channel_mask_t bitMask =
+ aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+ aidl.get<Tag::layoutMask>(), isInput);
+ bitMask != AUDIO_CHANNEL_NONE) {
+ return bitMask;
+ }
+ return unexpected(BAD_VALUE);
+ case Tag::voiceMask:
+ return convert(aidl, mVoice, __func__, "voice");
+ }
+ ALOGE("%s: unexpected tag value %d", __func__, aidl.getTag());
+ return unexpected(BAD_VALUE);
+}
+
+int legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+ audio_channel_mask_t legacy, bool isInput) {
+ auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+ const int legacyInitial = legacy; // for error message
+ int aidlLayout = 0;
+ for (const auto& bitPair : bitMapping) {
+ if ((legacy & bitPair.first) == bitPair.first) {
+ aidlLayout |= bitPair.second;
+ legacy = static_cast<audio_channel_mask_t>(legacy & ~bitPair.first);
+ if (legacy == 0) {
+ return aidlLayout;
+ }
+ }
+ }
+ ALOGE("%s: legacy %s audio_channel_mask_t 0x%x contains unrecognized bits 0x%x",
+ __func__, isInput ? "input" : "output", legacyInitial, legacy);
+ return 0;
+}
+
+ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ audio_channel_mask_t legacy, bool isInput) {
+ using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
+ using Tag = AudioChannelLayout::Tag;
+ static const DirectMap mInAndVoice = make_DirectMap(
+ getInAudioChannelPairs(), getVoiceAudioChannelPairs());
+ static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
+
+ auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
+ const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGW("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
+ func, type, legacy);
+ return unexpected(BAD_VALUE);
+ }
+ };
+
+ if (legacy == AUDIO_CHANNEL_NONE) {
+ return AudioChannelLayout{};
+ } else if (legacy == AUDIO_CHANNEL_INVALID) {
+ return AudioChannelLayout::make<Tag::invalid>(0);
+ }
+
+ const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
+ if (repr == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ if (audio_channel_mask_is_valid(legacy)) {
+ const int indexMask = VALUE_OR_RETURN(
+ convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
+ return AudioChannelLayout::make<Tag::indexMask>(indexMask);
+ } else {
+ ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
+ } else if (repr == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+ // The fast path is to find a direct match for some known layout mask.
+ if (const auto layoutMatch = convert(legacy, isInput ? mInAndVoice : mOut, __func__,
+ isInput ? "input / voice" : "output");
+ layoutMatch.ok()) {
+ return layoutMatch;
+ }
+ // If a match for a predefined layout wasn't found, make a custom one from bits,
+ // rejecting those with voice channel bits.
+ if (!isInput ||
+ (legacy & (AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK)) == 0) {
+ if (int bitMaskLayout =
+ legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+ legacy, isInput);
+ bitMaskLayout != 0) {
+ return AudioChannelLayout::make<Tag::layoutMask>(bitMaskLayout);
+ }
+ } else {
+ ALOGE("%s: legacy audio_channel_mask_t value 0x%x contains voice bits",
+ __func__, legacy);
+ }
+ return unexpected(BAD_VALUE);
+ }
+
+ ALOGE("%s: unknown representation %d in audio_channel_mask_t value 0x%x",
+ __func__, repr, legacy);
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+ const AudioDeviceDescription& aidl) {
+ static const std::unordered_map<AudioDeviceDescription, audio_devices_t> m =
+ make_ReverseMap(getAudioDevicePairs());
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no legacy audio_devices_t found for %s", __func__, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
+ audio_devices_t legacy) {
+ static const std::unordered_map<audio_devices_t, AudioDeviceDescription> m =
+ make_DirectMap(getAudioDevicePairs());
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no AudioDeviceDescription found for legacy audio_devices_t value 0x%x",
+ __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+ const AudioDevice& aidl,
+ audio_devices_t* legacyType, char* legacyAddress) {
+ *legacyType = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+ return aidl2legacy_string(
+ aidl.address.get<AudioDeviceAddress::id>(),
+ legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN);
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+ const AudioDevice& aidl,
+ audio_devices_t* legacyType, String8* legacyAddress) {
+ *legacyType = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+ *legacyAddress = VALUE_OR_RETURN_STATUS(aidl2legacy_string_view_String8(
+ aidl.address.get<AudioDeviceAddress::id>()));
+ return OK;
+}
+
+status_t aidl2legacy_AudioDevice_audio_device(
+ const AudioDevice& aidl,
+ audio_devices_t* legacyType, std::string* legacyAddress) {
+ *legacyType = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+ *legacyAddress = aidl.address.get<AudioDeviceAddress::id>();
+ return OK;
+}
+
+ConversionResult<AudioDevice> legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const char* legacyAddress) {
+ AudioDevice aidl;
+ aidl.type = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+ const std::string aidl_id = VALUE_OR_RETURN(
+ legacy2aidl_string(legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+ aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+ return aidl;
+}
+
+ConversionResult<AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const String8& legacyAddress) {
+ AudioDevice aidl;
+ aidl.type = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+ const std::string aidl_id = VALUE_OR_RETURN(
+ legacy2aidl_String8_string(legacyAddress));
+ aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+ return aidl;
+}
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+ const AudioFormatDescription& aidl) {
+ static const std::unordered_map<AudioFormatDescription, audio_format_t> m =
+ make_ReverseMap(getAudioFormatPairs());
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no legacy audio_format_t found for %s", __func__, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
audio_format_t legacy) {
- // This relies on AudioFormat being kept in sync with audio_format_t.
- static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
- return static_cast<media::audio::common::AudioFormat>(legacy);
+ static const std::unordered_map<audio_format_t, AudioFormatDescription> m =
+ make_DirectMap(getAudioFormatPairs());
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no AudioFormatDescription found for legacy audio_format_t value 0x%x",
+ __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
}
-ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl) {
+ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(
+ AudioGainMode aidl) {
switch (aidl) {
- case media::AudioGainMode::JOINT:
+ case AudioGainMode::JOINT:
return AUDIO_GAIN_MODE_JOINT;
- case media::AudioGainMode::CHANNELS:
+ case AudioGainMode::CHANNELS:
return AUDIO_GAIN_MODE_CHANNELS;
- case media::AudioGainMode::RAMP:
+ case AudioGainMode::RAMP:
return AUDIO_GAIN_MODE_RAMP;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy) {
+ConversionResult<AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(
+ audio_gain_mode_t legacy) {
switch (legacy) {
case AUDIO_GAIN_MODE_JOINT:
- return media::AudioGainMode::JOINT;
+ return AudioGainMode::JOINT;
case AUDIO_GAIN_MODE_CHANNELS:
- return media::AudioGainMode::CHANNELS;
+ return AudioGainMode::CHANNELS;
case AUDIO_GAIN_MODE_RAMP:
- return media::AudioGainMode::RAMP;
+ return AudioGainMode::RAMP;
}
return unexpected(BAD_VALUE);
}
ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
- return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, media::AudioGainMode>(
+ return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, AudioGainMode>(
aidl, aidl2legacy_AudioGainMode_audio_gain_mode_t,
// AudioGainMode is index-based.
- indexToEnum_index<media::AudioGainMode>,
+ indexToEnum_index<AudioGainMode>,
// AUDIO_GAIN_MODE_* constants are mask-based.
enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
}
ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
- return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, audio_gain_mode_t>(
+ return convertBitmask<int32_t, audio_gain_mode_t, AudioGainMode, audio_gain_mode_t>(
legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
// AUDIO_GAIN_MODE_* constants are mask-based.
indexToEnum_bitmask<audio_gain_mode_t>,
// AudioGainMode is index-based.
- enumToMask_index<int32_t, media::AudioGainMode>);
-}
-
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl) {
- // TODO(ytai): bitfield?
- return convertReinterpret<audio_devices_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy) {
- // TODO(ytai): bitfield?
- return convertReinterpret<int32_t>(legacy);
+ enumToMask_index<int32_t, AudioGainMode>);
}
ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
- const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type) {
+ const AudioGainConfig& aidl, bool isInput) {
audio_gain_config legacy;
legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
- legacy.channel_mask =
- VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
- const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
- const bool isJoint = bitmaskIsSet(aidl.mode, media::AudioGainMode::JOINT);
+ legacy.channel_mask = VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ const bool isJoint = bitmaskIsSet(aidl.mode, AudioGainMode::JOINT);
size_t numValues = isJoint ? 1
: isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
: audio_channel_count_from_out_mask(legacy.channel_mask);
@@ -481,14 +1396,13 @@
return legacy;
}
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
- const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type) {
- media::AudioGainConfig aidl;
+ConversionResult<AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
+ const audio_gain_config& legacy, bool isInput) {
+ AudioGainConfig aidl;
aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
- aidl.channelMask =
- VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
- const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
const bool isJoint = (legacy.mode & AUDIO_GAIN_MODE_JOINT) != 0;
size_t numValues = isJoint ? 1
: isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
@@ -502,129 +1416,129 @@
}
ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
- media::AudioInputFlags aidl) {
+ AudioInputFlags aidl) {
switch (aidl) {
- case media::AudioInputFlags::FAST:
+ case AudioInputFlags::FAST:
return AUDIO_INPUT_FLAG_FAST;
- case media::AudioInputFlags::HW_HOTWORD:
+ case AudioInputFlags::HW_HOTWORD:
return AUDIO_INPUT_FLAG_HW_HOTWORD;
- case media::AudioInputFlags::RAW:
+ case AudioInputFlags::RAW:
return AUDIO_INPUT_FLAG_RAW;
- case media::AudioInputFlags::SYNC:
+ case AudioInputFlags::SYNC:
return AUDIO_INPUT_FLAG_SYNC;
- case media::AudioInputFlags::MMAP_NOIRQ:
+ case AudioInputFlags::MMAP_NOIRQ:
return AUDIO_INPUT_FLAG_MMAP_NOIRQ;
- case media::AudioInputFlags::VOIP_TX:
+ case AudioInputFlags::VOIP_TX:
return AUDIO_INPUT_FLAG_VOIP_TX;
- case media::AudioInputFlags::HW_AV_SYNC:
+ case AudioInputFlags::HW_AV_SYNC:
return AUDIO_INPUT_FLAG_HW_AV_SYNC;
- case media::AudioInputFlags::DIRECT:
+ case AudioInputFlags::DIRECT:
return AUDIO_INPUT_FLAG_DIRECT;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
+ConversionResult<AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
audio_input_flags_t legacy) {
switch (legacy) {
case AUDIO_INPUT_FLAG_NONE:
break; // shouldn't get here. must be listed -Werror,-Wswitch
case AUDIO_INPUT_FLAG_FAST:
- return media::AudioInputFlags::FAST;
+ return AudioInputFlags::FAST;
case AUDIO_INPUT_FLAG_HW_HOTWORD:
- return media::AudioInputFlags::HW_HOTWORD;
+ return AudioInputFlags::HW_HOTWORD;
case AUDIO_INPUT_FLAG_RAW:
- return media::AudioInputFlags::RAW;
+ return AudioInputFlags::RAW;
case AUDIO_INPUT_FLAG_SYNC:
- return media::AudioInputFlags::SYNC;
+ return AudioInputFlags::SYNC;
case AUDIO_INPUT_FLAG_MMAP_NOIRQ:
- return media::AudioInputFlags::MMAP_NOIRQ;
+ return AudioInputFlags::MMAP_NOIRQ;
case AUDIO_INPUT_FLAG_VOIP_TX:
- return media::AudioInputFlags::VOIP_TX;
+ return AudioInputFlags::VOIP_TX;
case AUDIO_INPUT_FLAG_HW_AV_SYNC:
- return media::AudioInputFlags::HW_AV_SYNC;
+ return AudioInputFlags::HW_AV_SYNC;
case AUDIO_INPUT_FLAG_DIRECT:
- return media::AudioInputFlags::DIRECT;
+ return AudioInputFlags::DIRECT;
}
return unexpected(BAD_VALUE);
}
ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
- media::AudioOutputFlags aidl) {
+ AudioOutputFlags aidl) {
switch (aidl) {
- case media::AudioOutputFlags::DIRECT:
+ case AudioOutputFlags::DIRECT:
return AUDIO_OUTPUT_FLAG_DIRECT;
- case media::AudioOutputFlags::PRIMARY:
+ case AudioOutputFlags::PRIMARY:
return AUDIO_OUTPUT_FLAG_PRIMARY;
- case media::AudioOutputFlags::FAST:
+ case AudioOutputFlags::FAST:
return AUDIO_OUTPUT_FLAG_FAST;
- case media::AudioOutputFlags::DEEP_BUFFER:
+ case AudioOutputFlags::DEEP_BUFFER:
return AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
- case media::AudioOutputFlags::COMPRESS_OFFLOAD:
+ case AudioOutputFlags::COMPRESS_OFFLOAD:
return AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
- case media::AudioOutputFlags::NON_BLOCKING:
+ case AudioOutputFlags::NON_BLOCKING:
return AUDIO_OUTPUT_FLAG_NON_BLOCKING;
- case media::AudioOutputFlags::HW_AV_SYNC:
+ case AudioOutputFlags::HW_AV_SYNC:
return AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
- case media::AudioOutputFlags::TTS:
+ case AudioOutputFlags::TTS:
return AUDIO_OUTPUT_FLAG_TTS;
- case media::AudioOutputFlags::RAW:
+ case AudioOutputFlags::RAW:
return AUDIO_OUTPUT_FLAG_RAW;
- case media::AudioOutputFlags::SYNC:
+ case AudioOutputFlags::SYNC:
return AUDIO_OUTPUT_FLAG_SYNC;
- case media::AudioOutputFlags::IEC958_NONAUDIO:
+ case AudioOutputFlags::IEC958_NONAUDIO:
return AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
- case media::AudioOutputFlags::DIRECT_PCM:
+ case AudioOutputFlags::DIRECT_PCM:
return AUDIO_OUTPUT_FLAG_DIRECT_PCM;
- case media::AudioOutputFlags::MMAP_NOIRQ:
+ case AudioOutputFlags::MMAP_NOIRQ:
return AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
- case media::AudioOutputFlags::VOIP_RX:
+ case AudioOutputFlags::VOIP_RX:
return AUDIO_OUTPUT_FLAG_VOIP_RX;
- case media::AudioOutputFlags::INCALL_MUSIC:
+ case AudioOutputFlags::INCALL_MUSIC:
return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
- case media::AudioOutputFlags::GAPLESS_OFFLOAD:
+ case AudioOutputFlags::GAPLESS_OFFLOAD:
return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
+ConversionResult<AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
audio_output_flags_t legacy) {
switch (legacy) {
case AUDIO_OUTPUT_FLAG_NONE:
break; // shouldn't get here. must be listed -Werror,-Wswitch
case AUDIO_OUTPUT_FLAG_DIRECT:
- return media::AudioOutputFlags::DIRECT;
+ return AudioOutputFlags::DIRECT;
case AUDIO_OUTPUT_FLAG_PRIMARY:
- return media::AudioOutputFlags::PRIMARY;
+ return AudioOutputFlags::PRIMARY;
case AUDIO_OUTPUT_FLAG_FAST:
- return media::AudioOutputFlags::FAST;
+ return AudioOutputFlags::FAST;
case AUDIO_OUTPUT_FLAG_DEEP_BUFFER:
- return media::AudioOutputFlags::DEEP_BUFFER;
+ return AudioOutputFlags::DEEP_BUFFER;
case AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD:
- return media::AudioOutputFlags::COMPRESS_OFFLOAD;
+ return AudioOutputFlags::COMPRESS_OFFLOAD;
case AUDIO_OUTPUT_FLAG_NON_BLOCKING:
- return media::AudioOutputFlags::NON_BLOCKING;
+ return AudioOutputFlags::NON_BLOCKING;
case AUDIO_OUTPUT_FLAG_HW_AV_SYNC:
- return media::AudioOutputFlags::HW_AV_SYNC;
+ return AudioOutputFlags::HW_AV_SYNC;
case AUDIO_OUTPUT_FLAG_TTS:
- return media::AudioOutputFlags::TTS;
+ return AudioOutputFlags::TTS;
case AUDIO_OUTPUT_FLAG_RAW:
- return media::AudioOutputFlags::RAW;
+ return AudioOutputFlags::RAW;
case AUDIO_OUTPUT_FLAG_SYNC:
- return media::AudioOutputFlags::SYNC;
+ return AudioOutputFlags::SYNC;
case AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO:
- return media::AudioOutputFlags::IEC958_NONAUDIO;
+ return AudioOutputFlags::IEC958_NONAUDIO;
case AUDIO_OUTPUT_FLAG_DIRECT_PCM:
- return media::AudioOutputFlags::DIRECT_PCM;
+ return AudioOutputFlags::DIRECT_PCM;
case AUDIO_OUTPUT_FLAG_MMAP_NOIRQ:
- return media::AudioOutputFlags::MMAP_NOIRQ;
+ return AudioOutputFlags::MMAP_NOIRQ;
case AUDIO_OUTPUT_FLAG_VOIP_RX:
- return media::AudioOutputFlags::VOIP_RX;
+ return AudioOutputFlags::VOIP_RX;
case AUDIO_OUTPUT_FLAG_INCALL_MUSIC:
- return media::AudioOutputFlags::INCALL_MUSIC;
+ return AudioOutputFlags::INCALL_MUSIC;
case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
- return media::AudioOutputFlags::GAPLESS_OFFLOAD;
+ return AudioOutputFlags::GAPLESS_OFFLOAD;
}
return unexpected(BAD_VALUE);
}
@@ -634,9 +1548,9 @@
using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
LegacyMask converted = VALUE_OR_RETURN(
- (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, media::AudioInputFlags>(
+ (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, AudioInputFlags>(
aidl, aidl2legacy_AudioInputFlags_audio_input_flags_t,
- indexToEnum_index<media::AudioInputFlags>,
+ indexToEnum_index<AudioInputFlags>,
enumToMask_bitmask<LegacyMask, audio_input_flags_t>)));
return static_cast<audio_input_flags_t>(converted);
}
@@ -646,10 +1560,10 @@
using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
- return convertBitmask<int32_t, LegacyMask, media::AudioInputFlags, audio_input_flags_t>(
+ return convertBitmask<int32_t, LegacyMask, AudioInputFlags, audio_input_flags_t>(
legacyMask, legacy2aidl_audio_input_flags_t_AudioInputFlags,
indexToEnum_bitmask<audio_input_flags_t>,
- enumToMask_index<int32_t, media::AudioInputFlags>);
+ enumToMask_index<int32_t, AudioInputFlags>);
}
ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
@@ -657,9 +1571,9 @@
return convertBitmask<audio_output_flags_t,
int32_t,
audio_output_flags_t,
- media::AudioOutputFlags>(
+ AudioOutputFlags>(
aidl, aidl2legacy_AudioOutputFlags_audio_output_flags_t,
- indexToEnum_index<media::AudioOutputFlags>,
+ indexToEnum_index<AudioOutputFlags>,
enumToMask_bitmask<audio_output_flags_t, audio_output_flags_t>);
}
@@ -668,225 +1582,211 @@
using LegacyMask = std::underlying_type_t<audio_output_flags_t>;
LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
- return convertBitmask<int32_t, LegacyMask, media::AudioOutputFlags, audio_output_flags_t>(
+ return convertBitmask<int32_t, LegacyMask, AudioOutputFlags, audio_output_flags_t>(
legacyMask, legacy2aidl_audio_output_flags_t_AudioOutputFlags,
indexToEnum_bitmask<audio_output_flags_t>,
- enumToMask_index<int32_t, media::AudioOutputFlags>);
+ enumToMask_index<int32_t, AudioOutputFlags>);
}
ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
- const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type) {
+ const AudioIoFlags& aidl, bool isInput) {
audio_io_flags legacy;
- Direction dir = VALUE_OR_RETURN(direction(role, type));
- switch (dir) {
- case Direction::INPUT: {
- legacy.input = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_input_flags_t_mask(
- VALUE_OR_RETURN(UNION_GET(aidl, input))));
- }
- break;
-
- case Direction::OUTPUT: {
- legacy.output = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_output_flags_t_mask(
- VALUE_OR_RETURN(UNION_GET(aidl, output))));
- }
- break;
+ if (isInput) {
+ legacy.input = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_input_flags_t_mask(
+ VALUE_OR_RETURN(UNION_GET(aidl, input))));
+ } else {
+ legacy.output = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_output_flags_t_mask(
+ VALUE_OR_RETURN(UNION_GET(aidl, output))));
}
-
return legacy;
}
-ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
- const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type) {
- media::AudioIoFlags aidl;
-
- Direction dir = VALUE_OR_RETURN(direction(role, type));
- switch (dir) {
- case Direction::INPUT:
- UNION_SET(aidl, input,
- VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(
- legacy.input)));
- break;
- case Direction::OUTPUT:
- UNION_SET(aidl, output,
- VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(
- legacy.output)));
- break;
+ConversionResult<AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+ const audio_io_flags& legacy, bool isInput) {
+ AudioIoFlags aidl;
+ if (isInput) {
+ UNION_SET(aidl, input,
+ VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(legacy.input)));
+ } else {
+ UNION_SET(aidl, output,
+ VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(legacy.output)));
}
return aidl;
}
ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
- const media::AudioPortConfigDeviceExt& aidl) {
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+ const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlDeviceExt) {
audio_port_config_device_ext legacy;
- legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
- legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
- RETURN_IF_ERROR(aidl2legacy_string(aidl.address, legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+ legacy.hw_module = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_module_handle_t(aidlDeviceExt.hwModule));
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+ aidl.device, &legacy.type, legacy.address));
return legacy;
}
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
- const audio_port_config_device_ext& legacy) {
- media::AudioPortConfigDeviceExt aidl;
- aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
- aidl.address = VALUE_OR_RETURN(
- legacy2aidl_string(legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
- return aidl;
+status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+ const audio_port_config_device_ext& legacy,
+ AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+ aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl->device = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+ return OK;
}
ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
- media::AudioStreamType aidl) {
+ AudioStreamType aidl) {
switch (aidl) {
- case media::AudioStreamType::DEFAULT:
+ case AudioStreamType::INVALID:
+ break; // return error
+ case AudioStreamType::SYS_RESERVED_DEFAULT:
return AUDIO_STREAM_DEFAULT;
- case media::AudioStreamType::VOICE_CALL:
+ case AudioStreamType::VOICE_CALL:
return AUDIO_STREAM_VOICE_CALL;
- case media::AudioStreamType::SYSTEM:
+ case AudioStreamType::SYSTEM:
return AUDIO_STREAM_SYSTEM;
- case media::AudioStreamType::RING:
+ case AudioStreamType::RING:
return AUDIO_STREAM_RING;
- case media::AudioStreamType::MUSIC:
+ case AudioStreamType::MUSIC:
return AUDIO_STREAM_MUSIC;
- case media::AudioStreamType::ALARM:
+ case AudioStreamType::ALARM:
return AUDIO_STREAM_ALARM;
- case media::AudioStreamType::NOTIFICATION:
+ case AudioStreamType::NOTIFICATION:
return AUDIO_STREAM_NOTIFICATION;
- case media::AudioStreamType::BLUETOOTH_SCO:
+ case AudioStreamType::BLUETOOTH_SCO:
return AUDIO_STREAM_BLUETOOTH_SCO;
- case media::AudioStreamType::ENFORCED_AUDIBLE:
+ case AudioStreamType::ENFORCED_AUDIBLE:
return AUDIO_STREAM_ENFORCED_AUDIBLE;
- case media::AudioStreamType::DTMF:
+ case AudioStreamType::DTMF:
return AUDIO_STREAM_DTMF;
- case media::AudioStreamType::TTS:
+ case AudioStreamType::TTS:
return AUDIO_STREAM_TTS;
- case media::AudioStreamType::ACCESSIBILITY:
+ case AudioStreamType::ACCESSIBILITY:
return AUDIO_STREAM_ACCESSIBILITY;
- case media::AudioStreamType::ASSISTANT:
+ case AudioStreamType::ASSISTANT:
return AUDIO_STREAM_ASSISTANT;
- case media::AudioStreamType::REROUTING:
+ case AudioStreamType::SYS_RESERVED_REROUTING:
return AUDIO_STREAM_REROUTING;
- case media::AudioStreamType::PATCH:
+ case AudioStreamType::SYS_RESERVED_PATCH:
return AUDIO_STREAM_PATCH;
- case media::AudioStreamType::CALL_ASSISTANT:
+ case AudioStreamType::CALL_ASSISTANT:
return AUDIO_STREAM_CALL_ASSISTANT;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
+ConversionResult<AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
audio_stream_type_t legacy) {
switch (legacy) {
case AUDIO_STREAM_DEFAULT:
- return media::AudioStreamType::DEFAULT;
+ return AudioStreamType::SYS_RESERVED_DEFAULT;
case AUDIO_STREAM_VOICE_CALL:
- return media::AudioStreamType::VOICE_CALL;
+ return AudioStreamType::VOICE_CALL;
case AUDIO_STREAM_SYSTEM:
- return media::AudioStreamType::SYSTEM;
+ return AudioStreamType::SYSTEM;
case AUDIO_STREAM_RING:
- return media::AudioStreamType::RING;
+ return AudioStreamType::RING;
case AUDIO_STREAM_MUSIC:
- return media::AudioStreamType::MUSIC;
+ return AudioStreamType::MUSIC;
case AUDIO_STREAM_ALARM:
- return media::AudioStreamType::ALARM;
+ return AudioStreamType::ALARM;
case AUDIO_STREAM_NOTIFICATION:
- return media::AudioStreamType::NOTIFICATION;
+ return AudioStreamType::NOTIFICATION;
case AUDIO_STREAM_BLUETOOTH_SCO:
- return media::AudioStreamType::BLUETOOTH_SCO;
+ return AudioStreamType::BLUETOOTH_SCO;
case AUDIO_STREAM_ENFORCED_AUDIBLE:
- return media::AudioStreamType::ENFORCED_AUDIBLE;
+ return AudioStreamType::ENFORCED_AUDIBLE;
case AUDIO_STREAM_DTMF:
- return media::AudioStreamType::DTMF;
+ return AudioStreamType::DTMF;
case AUDIO_STREAM_TTS:
- return media::AudioStreamType::TTS;
+ return AudioStreamType::TTS;
case AUDIO_STREAM_ACCESSIBILITY:
- return media::AudioStreamType::ACCESSIBILITY;
+ return AudioStreamType::ACCESSIBILITY;
case AUDIO_STREAM_ASSISTANT:
- return media::AudioStreamType::ASSISTANT;
+ return AudioStreamType::ASSISTANT;
case AUDIO_STREAM_REROUTING:
- return media::AudioStreamType::REROUTING;
+ return AudioStreamType::SYS_RESERVED_REROUTING;
case AUDIO_STREAM_PATCH:
- return media::AudioStreamType::PATCH;
+ return AudioStreamType::SYS_RESERVED_PATCH;
case AUDIO_STREAM_CALL_ASSISTANT:
- return media::AudioStreamType::CALL_ASSISTANT;
+ return AudioStreamType::CALL_ASSISTANT;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
- media::AudioSourceType aidl) {
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+ AudioSource aidl) {
switch (aidl) {
- case media::AudioSourceType::INVALID:
- // This value does not have an enum
+ case AudioSource::SYS_RESERVED_INVALID:
return AUDIO_SOURCE_INVALID;
- case media::AudioSourceType::DEFAULT:
+ case AudioSource::DEFAULT:
return AUDIO_SOURCE_DEFAULT;
- case media::AudioSourceType::MIC:
+ case AudioSource::MIC:
return AUDIO_SOURCE_MIC;
- case media::AudioSourceType::VOICE_UPLINK:
+ case AudioSource::VOICE_UPLINK:
return AUDIO_SOURCE_VOICE_UPLINK;
- case media::AudioSourceType::VOICE_DOWNLINK:
+ case AudioSource::VOICE_DOWNLINK:
return AUDIO_SOURCE_VOICE_DOWNLINK;
- case media::AudioSourceType::VOICE_CALL:
+ case AudioSource::VOICE_CALL:
return AUDIO_SOURCE_VOICE_CALL;
- case media::AudioSourceType::CAMCORDER:
+ case AudioSource::CAMCORDER:
return AUDIO_SOURCE_CAMCORDER;
- case media::AudioSourceType::VOICE_RECOGNITION:
+ case AudioSource::VOICE_RECOGNITION:
return AUDIO_SOURCE_VOICE_RECOGNITION;
- case media::AudioSourceType::VOICE_COMMUNICATION:
+ case AudioSource::VOICE_COMMUNICATION:
return AUDIO_SOURCE_VOICE_COMMUNICATION;
- case media::AudioSourceType::REMOTE_SUBMIX:
+ case AudioSource::REMOTE_SUBMIX:
return AUDIO_SOURCE_REMOTE_SUBMIX;
- case media::AudioSourceType::UNPROCESSED:
+ case AudioSource::UNPROCESSED:
return AUDIO_SOURCE_UNPROCESSED;
- case media::AudioSourceType::VOICE_PERFORMANCE:
+ case AudioSource::VOICE_PERFORMANCE:
return AUDIO_SOURCE_VOICE_PERFORMANCE;
- case media::AudioSourceType::ECHO_REFERENCE:
+ case AudioSource::ECHO_REFERENCE:
return AUDIO_SOURCE_ECHO_REFERENCE;
- case media::AudioSourceType::FM_TUNER:
+ case AudioSource::FM_TUNER:
return AUDIO_SOURCE_FM_TUNER;
- case media::AudioSourceType::HOTWORD:
+ case AudioSource::HOTWORD:
return AUDIO_SOURCE_HOTWORD;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<AudioSource> legacy2aidl_audio_source_t_AudioSource(
audio_source_t legacy) {
switch (legacy) {
case AUDIO_SOURCE_INVALID:
- return media::AudioSourceType::INVALID;
+ return AudioSource::SYS_RESERVED_INVALID;
case AUDIO_SOURCE_DEFAULT:
- return media::AudioSourceType::DEFAULT;
+ return AudioSource::DEFAULT;
case AUDIO_SOURCE_MIC:
- return media::AudioSourceType::MIC;
+ return AudioSource::MIC;
case AUDIO_SOURCE_VOICE_UPLINK:
- return media::AudioSourceType::VOICE_UPLINK;
+ return AudioSource::VOICE_UPLINK;
case AUDIO_SOURCE_VOICE_DOWNLINK:
- return media::AudioSourceType::VOICE_DOWNLINK;
+ return AudioSource::VOICE_DOWNLINK;
case AUDIO_SOURCE_VOICE_CALL:
- return media::AudioSourceType::VOICE_CALL;
+ return AudioSource::VOICE_CALL;
case AUDIO_SOURCE_CAMCORDER:
- return media::AudioSourceType::CAMCORDER;
+ return AudioSource::CAMCORDER;
case AUDIO_SOURCE_VOICE_RECOGNITION:
- return media::AudioSourceType::VOICE_RECOGNITION;
+ return AudioSource::VOICE_RECOGNITION;
case AUDIO_SOURCE_VOICE_COMMUNICATION:
- return media::AudioSourceType::VOICE_COMMUNICATION;
+ return AudioSource::VOICE_COMMUNICATION;
case AUDIO_SOURCE_REMOTE_SUBMIX:
- return media::AudioSourceType::REMOTE_SUBMIX;
+ return AudioSource::REMOTE_SUBMIX;
case AUDIO_SOURCE_UNPROCESSED:
- return media::AudioSourceType::UNPROCESSED;
+ return AudioSource::UNPROCESSED;
case AUDIO_SOURCE_VOICE_PERFORMANCE:
- return media::AudioSourceType::VOICE_PERFORMANCE;
+ return AudioSource::VOICE_PERFORMANCE;
case AUDIO_SOURCE_ECHO_REFERENCE:
- return media::AudioSourceType::ECHO_REFERENCE;
+ return AudioSource::ECHO_REFERENCE;
case AUDIO_SOURCE_FM_TUNER:
- return media::AudioSourceType::FM_TUNER;
+ return AudioSource::FM_TUNER;
case AUDIO_SOURCE_HOTWORD:
- return media::AudioSourceType::HOTWORD;
+ return AudioSource::HOTWORD;
}
return unexpected(BAD_VALUE);
}
@@ -902,8 +1802,8 @@
// This type is unnamed in the original definition, thus we name it here.
using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
-ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortConfigMixExtUseCase(
- const media::AudioPortConfigMixExtUseCase& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortMixExtUseCase(
+ const AudioPortMixExtUseCase& aidl, media::AudioPortRole role) {
audio_port_config_mix_ext_usecase legacy;
switch (role) {
@@ -920,16 +1820,16 @@
case media::AudioPortRole::SINK:
// This is not a bug. A SINK role corresponds to the source field.
- legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(
+ legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(
VALUE_OR_RETURN(UNION_GET(aidl, source))));
return legacy;
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
-ConversionResult<media::AudioPortConfigMixExtUseCase> legacy2aidl_AudioPortConfigMixExtUseCase(
+ConversionResult<AudioPortMixExtUseCase> legacy2aidl_AudioPortMixExtUseCase(
const audio_port_config_mix_ext_usecase& legacy, audio_port_role_t role) {
- media::AudioPortConfigMixExtUseCase aidl;
+ AudioPortMixExtUseCase aidl;
switch (role) {
case AUDIO_PORT_ROLE_NONE:
@@ -943,52 +1843,53 @@
case AUDIO_PORT_ROLE_SINK:
// This is not a bug. A SINK role corresponds to the source field.
UNION_SET(aidl, source,
- VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source)));
+ VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source)));
return aidl;
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
- const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role) {
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+ const AudioPortMixExt& aidl, media::AudioPortRole role,
+ const media::AudioPortMixExtSys& aidlMixExt) {
audio_port_config_mix_ext legacy;
- legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+ legacy.hw_module = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_module_handle_t(aidlMixExt.hwModule));
legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
- legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigMixExtUseCase(aidl.usecase, role));
+ legacy.usecase = VALUE_OR_RETURN(aidl2legacy_AudioPortMixExtUseCase(aidl.usecase, role));
return legacy;
}
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
- const audio_port_config_mix_ext& legacy, audio_port_role_t role) {
- media::AudioPortConfigMixExt aidl;
- aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
- aidl.usecase = VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExtUseCase(legacy.usecase, role));
- return aidl;
+status_t legacy2aidl_AudioPortMixExt(
+ const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+ AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+ aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+ aidl->usecase = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_AudioPortMixExtUseCase(legacy.usecase, role));
+ return OK;
}
ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
- const media::AudioPortConfigSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl) {
audio_port_config_session_ext legacy;
- legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+ legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
return legacy;
}
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+ConversionResult<int32_t>
+legacy2aidl_audio_port_config_session_ext_int32_t(
const audio_port_config_session_ext& legacy) {
- media::AudioPortConfigSessionExt aidl;
- aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
- return aidl;
+ return legacy2aidl_audio_session_t_int32_t(legacy.session);
}
// This type is unnamed in the original definition, thus we name it here.
using audio_port_config_ext = decltype(audio_port_config::ext);
-ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortConfigExt(
- const media::AudioPortConfigExt& aidl, media::AudioPortType type,
- media::AudioPortRole role) {
+ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortExt_audio_port_config_ext(
+ const AudioPortExt& aidl, media::AudioPortType type,
+ media::AudioPortRole role, const media::AudioPortExtSys& aidlSys) {
audio_port_config_ext legacy;
switch (type) {
case media::AudioPortType::NONE:
@@ -997,16 +1898,19 @@
return legacy;
case media::AudioPortType::DEVICE:
legacy.device = VALUE_OR_RETURN(
- aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
- VALUE_OR_RETURN(UNION_GET(aidl, device))));
+ aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+ VALUE_OR_RETURN(UNION_GET(aidl, device)),
+ VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
return legacy;
case media::AudioPortType::MIX:
legacy.mix = VALUE_OR_RETURN(
- aidl2legacy_AudioPortConfigMixExt(VALUE_OR_RETURN(UNION_GET(aidl, mix)), role));
+ aidl2legacy_AudioPortMixExt(
+ VALUE_OR_RETURN(UNION_GET(aidl, mix)), role,
+ VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
return legacy;
case media::AudioPortType::SESSION:
legacy.session = VALUE_OR_RETURN(
- aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
+ aidl2legacy_int32_t_audio_port_config_session_ext(
VALUE_OR_RETURN(UNION_GET(aidl, session))));
return legacy;
@@ -1014,90 +1918,113 @@
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
-ConversionResult<media::AudioPortConfigExt> legacy2aidl_AudioPortConfigExt(
- const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role) {
- media::AudioPortConfigExt aidl;
-
+status_t legacy2aidl_AudioPortExt(
+ const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role,
+ AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
switch (type) {
case AUDIO_PORT_TYPE_NONE:
- UNION_SET(aidl, unspecified, false);
- return aidl;
- case AUDIO_PORT_TYPE_DEVICE:
- UNION_SET(aidl, device,
- VALUE_OR_RETURN(
- legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
- legacy.device)));
- return aidl;
- case AUDIO_PORT_TYPE_MIX:
- UNION_SET(aidl, mix,
- VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExt(legacy.mix, role)));
- return aidl;
+ UNION_SET(*aidl, unspecified, false);
+ UNION_SET(*aidlSys, unspecified, false);
+ return OK;
+ case AUDIO_PORT_TYPE_DEVICE: {
+ AudioPortDeviceExt device;
+ media::AudioPortDeviceExtSys deviceSys;
+ RETURN_STATUS_IF_ERROR(
+ legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+ legacy.device, &device, &deviceSys));
+ UNION_SET(*aidl, device, device);
+ UNION_SET(*aidlSys, device, deviceSys);
+ return OK;
+ }
+ case AUDIO_PORT_TYPE_MIX: {
+ AudioPortMixExt mix;
+ media::AudioPortMixExtSys mixSys;
+ RETURN_STATUS_IF_ERROR(legacy2aidl_AudioPortMixExt(legacy.mix, role, &mix, &mixSys));
+ UNION_SET(*aidl, mix, mix);
+ UNION_SET(*aidlSys, mix, mixSys);
+ return OK;
+ }
case AUDIO_PORT_TYPE_SESSION:
- UNION_SET(aidl, session,
- VALUE_OR_RETURN(
- legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
- legacy.session)));
- return aidl;
+ UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_config_session_ext_int32_t(legacy.session)));
+ UNION_SET(*aidlSys, unspecified, false);
+ return OK;
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
const media::AudioPortConfig& aidl) {
- audio_port_config legacy;
- legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
- legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
- legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
- legacy.config_mask = VALUE_OR_RETURN(aidl2legacy_int32_t_config_mask(aidl.configMask));
- if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::SAMPLE_RATE)) {
- legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.sampleRate));
+ audio_port_config legacy{};
+ legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+ legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+ legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+ const bool isInput =
+ VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+ if (aidl.hal.sampleRate.has_value()) {
+ legacy.sample_rate = VALUE_OR_RETURN(
+ convertIntegral<unsigned int>(aidl.hal.sampleRate.value().value));
+ legacy.config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
}
- if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::CHANNEL_MASK)) {
+ if (aidl.hal.channelMask.has_value()) {
legacy.channel_mask =
- VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+ VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.hal.channelMask.value(), isInput));
+ legacy.config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
}
- if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FORMAT)) {
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+ if (aidl.hal.format.has_value()) {
+ legacy.format = VALUE_OR_RETURN(
+ aidl2legacy_AudioFormatDescription_audio_format_t(aidl.hal.format.value()));
+ legacy.config_mask |= AUDIO_PORT_CONFIG_FORMAT;
}
- if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::GAIN)) {
- legacy.gain = VALUE_OR_RETURN(
- aidl2legacy_AudioGainConfig_audio_gain_config(aidl.gain, aidl.role, aidl.type));
+ if (aidl.hal.gain.has_value()) {
+ legacy.gain = VALUE_OR_RETURN(aidl2legacy_AudioGainConfig_audio_gain_config(
+ aidl.hal.gain.value(), isInput));
+ legacy.config_mask |= AUDIO_PORT_CONFIG_GAIN;
}
- if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FLAGS)) {
+ if (aidl.hal.flags.has_value()) {
legacy.flags = VALUE_OR_RETURN(
- aidl2legacy_AudioIoFlags_audio_io_flags(aidl.flags, aidl.role, aidl.type));
+ aidl2legacy_AudioIoFlags_audio_io_flags(aidl.hal.flags.value(), isInput));
+ legacy.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
}
- legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigExt(aidl.ext, aidl.type, aidl.role));
+ legacy.ext = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortExt_audio_port_config_ext(
+ aidl.hal.ext, aidl.sys.type, aidl.sys.role, aidl.sys.ext));
return legacy;
}
ConversionResult<media::AudioPortConfig> legacy2aidl_audio_port_config_AudioPortConfig(
const audio_port_config& legacy) {
media::AudioPortConfig aidl;
- aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
- aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
- aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
- aidl.configMask = VALUE_OR_RETURN(legacy2aidl_config_mask_int32_t(legacy.config_mask));
+ aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+ aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+ aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+ const bool isInput = VALUE_OR_RETURN(
+ direction(legacy.role, legacy.type)) == Direction::INPUT;
if (legacy.config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
- aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+ Int aidl_sampleRate;
+ aidl_sampleRate.value = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+ aidl.hal.sampleRate = aidl_sampleRate;
}
if (legacy.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- aidl.channelMask =
- VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+ aidl.hal.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
}
if (legacy.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ aidl.hal.format = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
}
if (legacy.config_mask & AUDIO_PORT_CONFIG_GAIN) {
- aidl.gain = VALUE_OR_RETURN(legacy2aidl_audio_gain_config_AudioGainConfig(
- legacy.gain, legacy.role, legacy.type));
+ aidl.hal.gain = VALUE_OR_RETURN(
+ legacy2aidl_audio_gain_config_AudioGainConfig(legacy.gain, isInput));
}
if (legacy.config_mask & AUDIO_PORT_CONFIG_FLAGS) {
- aidl.flags = VALUE_OR_RETURN(
- legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, legacy.role, legacy.type));
+ aidl.hal.flags = VALUE_OR_RETURN(
+ legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, isInput));
}
- aidl.ext =
- VALUE_OR_RETURN(legacy2aidl_AudioPortConfigExt(legacy.ext, legacy.type, legacy.role));
+ RETURN_IF_ERROR(legacy2aidl_AudioPortExt(legacy.ext, legacy.type, legacy.role,
+ &aidl.hal.ext, &aidl.sys.ext));
return aidl;
}
@@ -1148,33 +2075,40 @@
ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
const media::AudioIoDescriptor& aidl) {
- sp<AudioIoDescriptor> legacy(new AudioIoDescriptor());
- legacy->mIoHandle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
- legacy->mPatch = VALUE_OR_RETURN(aidl2legacy_AudioPatch_audio_patch(aidl.patch));
- legacy->mSamplingRate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
- legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
- legacy->mChannelMask =
- VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
- legacy->mFrameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
- legacy->mFrameCountHAL = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
- legacy->mLatency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
- legacy->mPortId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
- return legacy;
+ const audio_io_handle_t io_handle = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
+ const struct audio_patch patch = VALUE_OR_RETURN(
+ aidl2legacy_AudioPatch_audio_patch(aidl.patch));
+ const bool isInput = aidl.isInput;
+ const uint32_t sampling_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
+ const audio_format_t format = VALUE_OR_RETURN(
+ aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+ const audio_channel_mask_t channel_mask = VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ const size_t frame_count = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
+ const size_t frame_count_hal = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
+ const uint32_t latency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
+ const audio_port_handle_t port_id = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
+ return sp<AudioIoDescriptor>::make(io_handle, patch, isInput, sampling_rate, format,
+ channel_mask, frame_count, frame_count_hal, latency, port_id);
}
ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
const sp<AudioIoDescriptor>& legacy) {
media::AudioIoDescriptor aidl;
- aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->mIoHandle));
- aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->mPatch));
- aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mSamplingRate));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy->mFormat));
- aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy->mChannelMask));
- aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCount));
- aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCountHAL));
- aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mLatency));
- aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->mPortId));
+ aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->getIoHandle()));
+ aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->getPatch()));
+ aidl.isInput = legacy->getIsInput();
+ aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getSamplingRate()));
+ aidl.format = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(legacy->getFormat()));
+ aidl.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ legacy->getChannelMask(), legacy->getIsInput()));
+ aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCount()));
+ aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCountHAL()));
+ aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getLatency()));
+ aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->getPortId()));
return aidl;
}
@@ -1195,137 +2129,139 @@
}
ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl) {
+aidl2legacy_AudioContentType_audio_content_type_t(AudioContentType aidl) {
switch (aidl) {
- case media::AudioContentType::UNKNOWN:
+ case AudioContentType::UNKNOWN:
return AUDIO_CONTENT_TYPE_UNKNOWN;
- case media::AudioContentType::SPEECH:
+ case AudioContentType::SPEECH:
return AUDIO_CONTENT_TYPE_SPEECH;
- case media::AudioContentType::MUSIC:
+ case AudioContentType::MUSIC:
return AUDIO_CONTENT_TYPE_MUSIC;
- case media::AudioContentType::MOVIE:
+ case AudioContentType::MOVIE:
return AUDIO_CONTENT_TYPE_MOVIE;
- case media::AudioContentType::SONIFICATION:
+ case AudioContentType::SONIFICATION:
return AUDIO_CONTENT_TYPE_SONIFICATION;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioContentType>
+ConversionResult<AudioContentType>
legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy) {
switch (legacy) {
case AUDIO_CONTENT_TYPE_UNKNOWN:
- return media::AudioContentType::UNKNOWN;
+ return AudioContentType::UNKNOWN;
case AUDIO_CONTENT_TYPE_SPEECH:
- return media::AudioContentType::SPEECH;
+ return AudioContentType::SPEECH;
case AUDIO_CONTENT_TYPE_MUSIC:
- return media::AudioContentType::MUSIC;
+ return AudioContentType::MUSIC;
case AUDIO_CONTENT_TYPE_MOVIE:
- return media::AudioContentType::MOVIE;
+ return AudioContentType::MOVIE;
case AUDIO_CONTENT_TYPE_SONIFICATION:
- return media::AudioContentType::SONIFICATION;
+ return AudioContentType::SONIFICATION;
}
return unexpected(BAD_VALUE);
}
ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl) {
+aidl2legacy_AudioUsage_audio_usage_t(AudioUsage aidl) {
switch (aidl) {
- case media::AudioUsage::UNKNOWN:
+ case AudioUsage::INVALID:
+ break; // return error
+ case AudioUsage::UNKNOWN:
return AUDIO_USAGE_UNKNOWN;
- case media::AudioUsage::MEDIA:
+ case AudioUsage::MEDIA:
return AUDIO_USAGE_MEDIA;
- case media::AudioUsage::VOICE_COMMUNICATION:
+ case AudioUsage::VOICE_COMMUNICATION:
return AUDIO_USAGE_VOICE_COMMUNICATION;
- case media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
+ case AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
return AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
- case media::AudioUsage::ALARM:
+ case AudioUsage::ALARM:
return AUDIO_USAGE_ALARM;
- case media::AudioUsage::NOTIFICATION:
+ case AudioUsage::NOTIFICATION:
return AUDIO_USAGE_NOTIFICATION;
- case media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
+ case AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
return AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
- case media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST:
+ case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST:
return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST;
- case media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT:
+ case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT:
return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT;
- case media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED:
+ case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED:
return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED;
- case media::AudioUsage::NOTIFICATION_EVENT:
+ case AudioUsage::NOTIFICATION_EVENT:
return AUDIO_USAGE_NOTIFICATION_EVENT;
- case media::AudioUsage::ASSISTANCE_ACCESSIBILITY:
+ case AudioUsage::ASSISTANCE_ACCESSIBILITY:
return AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
- case media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
+ case AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
return AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
- case media::AudioUsage::ASSISTANCE_SONIFICATION:
+ case AudioUsage::ASSISTANCE_SONIFICATION:
return AUDIO_USAGE_ASSISTANCE_SONIFICATION;
- case media::AudioUsage::GAME:
+ case AudioUsage::GAME:
return AUDIO_USAGE_GAME;
- case media::AudioUsage::VIRTUAL_SOURCE:
+ case AudioUsage::VIRTUAL_SOURCE:
return AUDIO_USAGE_VIRTUAL_SOURCE;
- case media::AudioUsage::ASSISTANT:
+ case AudioUsage::ASSISTANT:
return AUDIO_USAGE_ASSISTANT;
- case media::AudioUsage::CALL_ASSISTANT:
+ case AudioUsage::CALL_ASSISTANT:
return AUDIO_USAGE_CALL_ASSISTANT;
- case media::AudioUsage::EMERGENCY:
+ case AudioUsage::EMERGENCY:
return AUDIO_USAGE_EMERGENCY;
- case media::AudioUsage::SAFETY:
+ case AudioUsage::SAFETY:
return AUDIO_USAGE_SAFETY;
- case media::AudioUsage::VEHICLE_STATUS:
+ case AudioUsage::VEHICLE_STATUS:
return AUDIO_USAGE_VEHICLE_STATUS;
- case media::AudioUsage::ANNOUNCEMENT:
+ case AudioUsage::ANNOUNCEMENT:
return AUDIO_USAGE_ANNOUNCEMENT;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioUsage>
+ConversionResult<AudioUsage>
legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy) {
switch (legacy) {
case AUDIO_USAGE_UNKNOWN:
- return media::AudioUsage::UNKNOWN;
+ return AudioUsage::UNKNOWN;
case AUDIO_USAGE_MEDIA:
- return media::AudioUsage::MEDIA;
+ return AudioUsage::MEDIA;
case AUDIO_USAGE_VOICE_COMMUNICATION:
- return media::AudioUsage::VOICE_COMMUNICATION;
+ return AudioUsage::VOICE_COMMUNICATION;
case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
- return media::AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
+ return AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
case AUDIO_USAGE_ALARM:
- return media::AudioUsage::ALARM;
+ return AudioUsage::ALARM;
case AUDIO_USAGE_NOTIFICATION:
- return media::AudioUsage::NOTIFICATION;
+ return AudioUsage::NOTIFICATION;
case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
- return media::AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
+ return AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
- return media::AudioUsage::NOTIFICATION_COMMUNICATION_REQUEST;
+ return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST;
case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
- return media::AudioUsage::NOTIFICATION_COMMUNICATION_INSTANT;
+ return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT;
case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
- return media::AudioUsage::NOTIFICATION_COMMUNICATION_DELAYED;
+ return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED;
case AUDIO_USAGE_NOTIFICATION_EVENT:
- return media::AudioUsage::NOTIFICATION_EVENT;
+ return AudioUsage::NOTIFICATION_EVENT;
case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- return media::AudioUsage::ASSISTANCE_ACCESSIBILITY;
+ return AudioUsage::ASSISTANCE_ACCESSIBILITY;
case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
- return media::AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
+ return AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
- return media::AudioUsage::ASSISTANCE_SONIFICATION;
+ return AudioUsage::ASSISTANCE_SONIFICATION;
case AUDIO_USAGE_GAME:
- return media::AudioUsage::GAME;
+ return AudioUsage::GAME;
case AUDIO_USAGE_VIRTUAL_SOURCE:
- return media::AudioUsage::VIRTUAL_SOURCE;
+ return AudioUsage::VIRTUAL_SOURCE;
case AUDIO_USAGE_ASSISTANT:
- return media::AudioUsage::ASSISTANT;
+ return AudioUsage::ASSISTANT;
case AUDIO_USAGE_CALL_ASSISTANT:
- return media::AudioUsage::CALL_ASSISTANT;
+ return AudioUsage::CALL_ASSISTANT;
case AUDIO_USAGE_EMERGENCY:
- return media::AudioUsage::EMERGENCY;
+ return AudioUsage::EMERGENCY;
case AUDIO_USAGE_SAFETY:
- return media::AudioUsage::SAFETY;
+ return AudioUsage::SAFETY;
case AUDIO_USAGE_VEHICLE_STATUS:
- return media::AudioUsage::VEHICLE_STATUS;
+ return AudioUsage::VEHICLE_STATUS;
case AUDIO_USAGE_ANNOUNCEMENT:
- return media::AudioUsage::ANNOUNCEMENT;
+ return AudioUsage::ANNOUNCEMENT;
}
return unexpected(BAD_VALUE);
}
@@ -1361,6 +2297,12 @@
return AUDIO_FLAG_NO_SYSTEM_CAPTURE;
case media::AudioFlag::CAPTURE_PRIVATE:
return AUDIO_FLAG_CAPTURE_PRIVATE;
+ case media::AudioFlag::CONTENT_SPATIALIZED:
+ return AUDIO_FLAG_CONTENT_SPATIALIZED;
+ case media::AudioFlag::NEVER_SPATIALIZE:
+ return AUDIO_FLAG_NEVER_SPATIALIZE;
+ case media::AudioFlag::CALL_REDIRECTION:
+ return AUDIO_FLAG_CALL_REDIRECTION;
}
return unexpected(BAD_VALUE);
}
@@ -1398,6 +2340,12 @@
return media::AudioFlag::NO_SYSTEM_CAPTURE;
case AUDIO_FLAG_CAPTURE_PRIVATE:
return media::AudioFlag::CAPTURE_PRIVATE;
+ case AUDIO_FLAG_CONTENT_SPATIALIZED:
+ return media::AudioFlag::CONTENT_SPATIALIZED;
+ case AUDIO_FLAG_NEVER_SPATIALIZE:
+ return media::AudioFlag::NEVER_SPATIALIZE;
+ case AUDIO_FLAG_CALL_REDIRECTION:
+ return media::AudioFlag::CALL_REDIRECTION;
}
return unexpected(BAD_VALUE);
}
@@ -1423,7 +2371,7 @@
legacy.content_type = VALUE_OR_RETURN(
aidl2legacy_AudioContentType_audio_content_type_t(aidl.contentType));
legacy.usage = VALUE_OR_RETURN(aidl2legacy_AudioUsage_audio_usage_t(aidl.usage));
- legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+ legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_flags_mask_t_mask(aidl.flags));
RETURN_IF_ERROR(aidl2legacy_string(aidl.tags, legacy.tags, sizeof(legacy.tags)));
return legacy;
@@ -1435,51 +2383,51 @@
aidl.contentType = VALUE_OR_RETURN(
legacy2aidl_audio_content_type_t_AudioContentType(legacy.content_type));
aidl.usage = VALUE_OR_RETURN(legacy2aidl_audio_usage_t_AudioUsage(legacy.usage));
- aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+ aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_flags_mask_t_int32_t_mask(legacy.flags));
aidl.tags = VALUE_OR_RETURN(legacy2aidl_string(legacy.tags, sizeof(legacy.tags)));
return aidl;
}
ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl) {
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(AudioEncapsulationMode aidl) {
switch (aidl) {
- case media::AudioEncapsulationMode::NONE:
+ case AudioEncapsulationMode::INVALID:
+ break; // return error
+ case AudioEncapsulationMode::NONE:
return AUDIO_ENCAPSULATION_MODE_NONE;
- case media::AudioEncapsulationMode::ELEMENTARY_STREAM:
+ case AudioEncapsulationMode::ELEMENTARY_STREAM:
return AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM;
- case media::AudioEncapsulationMode::HANDLE:
+ case AudioEncapsulationMode::HANDLE:
return AUDIO_ENCAPSULATION_MODE_HANDLE;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioEncapsulationMode>
+ConversionResult<AudioEncapsulationMode>
legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy) {
switch (legacy) {
case AUDIO_ENCAPSULATION_MODE_NONE:
- return media::AudioEncapsulationMode::NONE;
+ return AudioEncapsulationMode::NONE;
case AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM:
- return media::AudioEncapsulationMode::ELEMENTARY_STREAM;
+ return AudioEncapsulationMode::ELEMENTARY_STREAM;
case AUDIO_ENCAPSULATION_MODE_HANDLE:
- return media::AudioEncapsulationMode::HANDLE;
+ return AudioEncapsulationMode::HANDLE;
}
return unexpected(BAD_VALUE);
}
ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl) {
- audio_offload_info_t legacy;
- legacy.version = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.version));
- legacy.size = sizeof(audio_offload_info_t);
- audio_config_base_t config = VALUE_OR_RETURN(
- aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
- legacy.sample_rate = config.sample_rate;
- legacy.channel_mask = config.channel_mask;
- legacy.format = config.format;
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const AudioOffloadInfo& aidl) {
+ audio_offload_info_t legacy = AUDIO_INFO_INITIALIZER;
+ audio_config_base_t base = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, false /*isInput*/));
+ legacy.sample_rate = base.sample_rate;
+ legacy.channel_mask = base.channel_mask;
+ legacy.format = base.format;
legacy.stream_type = VALUE_OR_RETURN(
aidl2legacy_AudioStreamType_audio_stream_type_t(aidl.streamType));
- legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRate));
+ legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRatePerSecond));
legacy.duration_us = VALUE_OR_RETURN(convertIntegral<int64_t>(aidl.durationUs));
legacy.has_video = aidl.hasVideo;
legacy.is_streaming = aidl.isStreaming;
@@ -1493,21 +2441,20 @@
return legacy;
}
-ConversionResult<media::AudioOffloadInfo>
+ConversionResult<AudioOffloadInfo>
legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy) {
- media::AudioOffloadInfo aidl;
+ AudioOffloadInfo aidl;
// Version 0.1 fields.
if (legacy.size < offsetof(audio_offload_info_t, usage) + sizeof(audio_offload_info_t::usage)) {
return unexpected(BAD_VALUE);
}
- aidl.version = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.version));
- aidl.config.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
- aidl.config.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
- aidl.config.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+ .channel_mask = legacy.channel_mask, .format = legacy.format };
+ aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(
+ base, false /*isInput*/));
aidl.streamType = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream_type));
- aidl.bitRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
+ aidl.bitRatePerSecond = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
aidl.durationUs = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.duration_us));
aidl.hasVideo = legacy.has_video;
aidl.isStreaming = legacy.is_streaming;
@@ -1531,25 +2478,25 @@
}
ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl) {
- audio_config_t legacy;
- legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
- legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+aidl2legacy_AudioConfig_audio_config_t(const AudioConfig& aidl, bool isInput) {
+ const audio_config_base_t legacyBase = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, isInput));
+ audio_config_t legacy = AUDIO_CONFIG_INITIALIZER;
+ legacy.sample_rate = legacyBase.sample_rate;
+ legacy.channel_mask = legacyBase.channel_mask;
+ legacy.format = legacyBase.format;
legacy.offload_info = VALUE_OR_RETURN(
aidl2legacy_AudioOffloadInfo_audio_offload_info_t(aidl.offloadInfo));
legacy.frame_count = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.frameCount));
return legacy;
}
-ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy) {
- media::AudioConfig aidl;
- aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
- aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ConversionResult<AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput) {
+ const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+ .channel_mask = legacy.channel_mask, .format = legacy.format };
+ AudioConfig aidl;
+ aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(base, isInput));
aidl.offloadInfo = VALUE_OR_RETURN(
legacy2aidl_audio_offload_info_t_AudioOffloadInfo(legacy.offload_info));
aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.frame_count));
@@ -1557,22 +2504,22 @@
}
ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl) {
+aidl2legacy_AudioConfigBase_audio_config_base_t(const AudioConfigBase& aidl, bool isInput) {
audio_config_base_t legacy;
legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
return legacy;
}
-ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy) {
- media::AudioConfigBase aidl;
+ConversionResult<AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput) {
+ AudioConfigBase aidl;
aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
return aidl;
}
@@ -1631,7 +2578,7 @@
}
ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl) {
+aidl2legacy_AudioUuid_audio_uuid_t(const AudioUuid& aidl) {
audio_uuid_t legacy;
legacy.timeLow = VALUE_OR_RETURN(convertReinterpret<uint32_t>(aidl.timeLow));
legacy.timeMid = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeMid));
@@ -1644,9 +2591,9 @@
return legacy;
}
-ConversionResult<media::AudioUuid>
+ConversionResult<AudioUuid>
legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy) {
- media::AudioUuid aidl;
+ AudioUuid aidl;
aidl.timeLow = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.timeLow));
aidl.timeMid = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeMid));
aidl.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeHiAndVersion));
@@ -1687,28 +2634,28 @@
ConversionResult<audio_encapsulation_metadata_type_t>
aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
- media::AudioEncapsulationMetadataType aidl) {
+ AudioEncapsulationMetadataType aidl) {
switch (aidl) {
- case media::AudioEncapsulationMetadataType::NONE:
+ case AudioEncapsulationMetadataType::NONE:
return AUDIO_ENCAPSULATION_METADATA_TYPE_NONE;
- case media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
+ case AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
return AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER;
- case media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
+ case AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
return AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioEncapsulationMetadataType>
+ConversionResult<AudioEncapsulationMetadataType>
legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
audio_encapsulation_metadata_type_t legacy) {
switch (legacy) {
case AUDIO_ENCAPSULATION_METADATA_TYPE_NONE:
- return media::AudioEncapsulationMetadataType::NONE;
+ return AudioEncapsulationMetadataType::NONE;
case AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER:
- return media::AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
+ return AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
case AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR:
- return media::AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
+ return AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
}
return unexpected(BAD_VALUE);
}
@@ -1718,9 +2665,9 @@
return convertBitmask<uint32_t,
int32_t,
audio_encapsulation_mode_t,
- media::AudioEncapsulationMode>(
+ AudioEncapsulationMode>(
aidl, aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t,
- indexToEnum_index<media::AudioEncapsulationMode>,
+ indexToEnum_index<AudioEncapsulationMode>,
enumToMask_index<uint32_t, audio_encapsulation_mode_t>);
}
@@ -1728,11 +2675,11 @@
legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy) {
return convertBitmask<int32_t,
uint32_t,
- media::AudioEncapsulationMode,
+ AudioEncapsulationMode,
audio_encapsulation_mode_t>(
legacy, legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode,
indexToEnum_index<audio_encapsulation_mode_t>,
- enumToMask_index<int32_t, media::AudioEncapsulationMode>);
+ enumToMask_index<int32_t, AudioEncapsulationMode>);
}
ConversionResult<uint32_t>
@@ -1740,9 +2687,9 @@
return convertBitmask<uint32_t,
int32_t,
audio_encapsulation_metadata_type_t,
- media::AudioEncapsulationMetadataType>(
+ AudioEncapsulationMetadataType>(
aidl, aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t,
- indexToEnum_index<media::AudioEncapsulationMetadataType>,
+ indexToEnum_index<AudioEncapsulationMetadataType>,
enumToMask_index<uint32_t, audio_encapsulation_metadata_type_t>);
}
@@ -1750,104 +2697,79 @@
legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy) {
return convertBitmask<int32_t,
uint32_t,
- media::AudioEncapsulationMetadataType,
+ AudioEncapsulationMetadataType,
audio_encapsulation_metadata_type_t>(
legacy, legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType,
indexToEnum_index<audio_encapsulation_metadata_type_t>,
- enumToMask_index<int32_t, media::AudioEncapsulationMetadataType>);
-}
-
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
- media::AudioMixLatencyClass aidl) {
- switch (aidl) {
- case media::AudioMixLatencyClass::LOW:
- return AUDIO_LATENCY_LOW;
- case media::AudioMixLatencyClass::NORMAL:
- return AUDIO_LATENCY_NORMAL;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
- audio_mix_latency_class_t legacy) {
- switch (legacy) {
- case AUDIO_LATENCY_LOW:
- return media::AudioMixLatencyClass::LOW;
- case AUDIO_LATENCY_NORMAL:
- return media::AudioMixLatencyClass::NORMAL;
- }
- return unexpected(BAD_VALUE);
+ enumToMask_index<int32_t, AudioEncapsulationMetadataType>);
}
ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl) {
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+ const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlSys) {
audio_port_device_ext legacy;
- legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
- legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
- RETURN_IF_ERROR(
- aidl2legacy_string(aidl.device.address, legacy.address, sizeof(legacy.address)));
+ legacy.hw_module = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+ aidl.device, &legacy.type, legacy.address));
legacy.encapsulation_modes = VALUE_OR_RETURN(
- aidl2legacy_AudioEncapsulationMode_mask(aidl.encapsulationModes));
+ aidl2legacy_AudioEncapsulationMode_mask(aidlSys.encapsulationModes));
legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
- aidl2legacy_AudioEncapsulationMetadataType_mask(aidl.encapsulationMetadataTypes));
+ aidl2legacy_AudioEncapsulationMetadataType_mask(
+ aidlSys.encapsulationMetadataTypes));
return legacy;
}
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy) {
- media::AudioPortDeviceExt aidl;
- aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
- aidl.device.address = VALUE_OR_RETURN(
- legacy2aidl_string(legacy.address, sizeof(legacy.address)));
- aidl.encapsulationModes = VALUE_OR_RETURN(
+status_t legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+ const audio_port_device_ext& legacy,
+ AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+ aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl->device = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+ aidlDeviceExt->encapsulationModes = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
- aidl.encapsulationMetadataTypes = VALUE_OR_RETURN(
+ aidlDeviceExt->encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioEncapsulationMetadataType_mask(legacy.encapsulation_metadata_types));
- return aidl;
+ return OK;
}
ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl) {
- audio_port_mix_ext legacy;
- legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+ const AudioPortMixExt& aidl, const media::AudioPortMixExtSys& aidlSys) {
+ audio_port_mix_ext legacy{};
+ legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
- legacy.latency_class = VALUE_OR_RETURN(
- aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(aidl.latencyClass));
return legacy;
}
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy) {
- media::AudioPortMixExt aidl;
- aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
- aidl.latencyClass = VALUE_OR_RETURN(
- legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(legacy.latency_class));
- return aidl;
+status_t
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy,
+ AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+ aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+ return OK;
}
ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl) {
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl) {
audio_port_session_ext legacy;
- legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+ legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
return legacy;
}
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy) {
- media::AudioPortSessionExt aidl;
- aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
- return aidl;
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy) {
+ return legacy2aidl_audio_session_t_int32_t(legacy.session);
}
// This type is unnamed in the original definition, thus we name it here.
using audio_port_v7_ext = decltype(audio_port_v7::ext);
-ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt(
- const media::AudioPortExt& aidl, media::AudioPortType type) {
+ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt_audio_port_v7_ext(
+ const AudioPortExt& aidl, media::AudioPortType type,
+ const media::AudioPortExtSys& aidlSys) {
audio_port_v7_ext legacy;
switch (type) {
case media::AudioPortType::NONE:
@@ -1857,66 +2779,83 @@
case media::AudioPortType::DEVICE:
legacy.device = VALUE_OR_RETURN(
aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
- VALUE_OR_RETURN(UNION_GET(aidl, device))));
+ VALUE_OR_RETURN(UNION_GET(aidl, device)),
+ VALUE_OR_RETURN(UNION_GET(aidlSys, device))));
return legacy;
case media::AudioPortType::MIX:
legacy.mix = VALUE_OR_RETURN(
aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
- VALUE_OR_RETURN(UNION_GET(aidl, mix))));
+ VALUE_OR_RETURN(UNION_GET(aidl, mix)),
+ VALUE_OR_RETURN(UNION_GET(aidlSys, mix))));
return legacy;
case media::AudioPortType::SESSION:
- legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortSessionExt_audio_port_session_ext(
- VALUE_OR_RETURN(UNION_GET(aidl, session))));
+ legacy.session = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_port_session_ext(
+ VALUE_OR_RETURN(UNION_GET(aidl, session))));
return legacy;
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
-ConversionResult<media::AudioPortExt> legacy2aidl_AudioPortExt(
- const audio_port_v7_ext& legacy, audio_port_type_t type) {
- media::AudioPortExt aidl;
+status_t legacy2aidl_AudioPortExt(
+ const audio_port_v7_ext& legacy, audio_port_type_t type,
+ AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
switch (type) {
case AUDIO_PORT_TYPE_NONE:
- UNION_SET(aidl, unspecified, false);
- return aidl;
- case AUDIO_PORT_TYPE_DEVICE:
- UNION_SET(aidl, device,
- VALUE_OR_RETURN(
- legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device)));
- return aidl;
- case AUDIO_PORT_TYPE_MIX:
- UNION_SET(aidl, mix,
- VALUE_OR_RETURN(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix)));
- return aidl;
+ UNION_SET(*aidl, unspecified, false);
+ UNION_SET(*aidlSys, unspecified, false);
+ return OK;
+ case AUDIO_PORT_TYPE_DEVICE: {
+ AudioPortDeviceExt device;
+ media::AudioPortDeviceExtSys deviceSys;
+ RETURN_STATUS_IF_ERROR(
+ legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+ legacy.device, &device, &deviceSys));
+ UNION_SET(*aidl, device, device);
+ UNION_SET(*aidlSys, device, deviceSys);
+ return OK;
+ }
+ case AUDIO_PORT_TYPE_MIX: {
+ AudioPortMixExt mix;
+ media::AudioPortMixExtSys mixSys;
+ RETURN_STATUS_IF_ERROR(
+ legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+ legacy.mix, &mix, &mixSys));
+ UNION_SET(*aidl, mix, mix);
+ UNION_SET(*aidlSys, mix, mixSys);
+ return OK;
+ }
case AUDIO_PORT_TYPE_SESSION:
- UNION_SET(aidl, session,
- VALUE_OR_RETURN(legacy2aidl_audio_port_session_ext_AudioPortSessionExt(
- legacy.session)));
- return aidl;
+ UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_session_ext_int32_t(legacy.session)));
+ UNION_SET(*aidlSys, unspecified, false);
+ return OK;
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl) {
+aidl2legacy_AudioProfile_audio_profile(const AudioProfile& aidl, bool isInput) {
audio_profile legacy;
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
- if (aidl.samplingRates.size() > std::size(legacy.sample_rates)) {
+ if (aidl.sampleRates.size() > std::size(legacy.sample_rates)) {
return unexpected(BAD_VALUE);
}
RETURN_IF_ERROR(
- convertRange(aidl.samplingRates.begin(), aidl.samplingRates.end(), legacy.sample_rates,
+ convertRange(aidl.sampleRates.begin(), aidl.sampleRates.end(), legacy.sample_rates,
convertIntegral<int32_t, unsigned int>));
- legacy.num_sample_rates = aidl.samplingRates.size();
+ legacy.num_sample_rates = aidl.sampleRates.size();
if (aidl.channelMasks.size() > std::size(legacy.channel_masks)) {
return unexpected(BAD_VALUE);
}
RETURN_IF_ERROR(
convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
- aidl2legacy_int32_t_audio_channel_mask_t));
+ [isInput](const AudioChannelLayout& l) {
+ return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+ }));
legacy.num_channel_masks = aidl.channelMasks.size();
legacy.encapsulation_type = VALUE_OR_RETURN(
@@ -1924,17 +2863,17 @@
return legacy;
}
-ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy) {
- media::AudioProfile aidl;
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ConversionResult<AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput) {
+ AudioProfile aidl;
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
return unexpected(BAD_VALUE);
}
RETURN_IF_ERROR(
convertRange(legacy.sample_rates, legacy.sample_rates + legacy.num_sample_rates,
- std::back_inserter(aidl.samplingRates),
+ std::back_inserter(aidl.sampleRates),
convertIntegral<unsigned int, int32_t>));
if (legacy.num_channel_masks > std::size(legacy.channel_masks)) {
@@ -1943,7 +2882,9 @@
RETURN_IF_ERROR(
convertRange(legacy.channel_masks, legacy.channel_masks + legacy.num_channel_masks,
std::back_inserter(aidl.channelMasks),
- legacy2aidl_audio_channel_mask_t_int32_t));
+ [isInput](audio_channel_mask_t m) {
+ return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+ }));
aidl.encapsulationType = VALUE_OR_RETURN(
legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
@@ -1952,11 +2893,11 @@
}
ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl) {
+aidl2legacy_AudioGain_audio_gain(const AudioGain& aidl, bool isInput) {
audio_gain legacy;
legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
- legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+ legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.channelMask, isInput));
legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
@@ -1966,12 +2907,12 @@
return legacy;
}
-ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy) {
- media::AudioGain aidl;
+ConversionResult<AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput) {
+ AudioGain aidl;
aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
aidl.maxValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_value));
aidl.defaultValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.default_value));
@@ -1984,63 +2925,76 @@
ConversionResult<audio_port_v7>
aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl) {
audio_port_v7 legacy;
- legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
- legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
- legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
- RETURN_IF_ERROR(aidl2legacy_string(aidl.name, legacy.name, sizeof(legacy.name)));
+ legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+ legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
+ legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
+ RETURN_IF_ERROR(aidl2legacy_string(aidl.hal.name, legacy.name, sizeof(legacy.name)));
- if (aidl.profiles.size() > std::size(legacy.audio_profiles)) {
+ if (aidl.hal.profiles.size() > std::size(legacy.audio_profiles)) {
return unexpected(BAD_VALUE);
}
- RETURN_IF_ERROR(convertRange(aidl.profiles.begin(), aidl.profiles.end(), legacy.audio_profiles,
- aidl2legacy_AudioProfile_audio_profile));
- legacy.num_audio_profiles = aidl.profiles.size();
+ const bool isInput =
+ VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
+ RETURN_IF_ERROR(convertRange(
+ aidl.hal.profiles.begin(), aidl.hal.profiles.end(), legacy.audio_profiles,
+ [isInput](const AudioProfile& p) {
+ return aidl2legacy_AudioProfile_audio_profile(p, isInput);
+ }));
+ legacy.num_audio_profiles = aidl.hal.profiles.size();
- if (aidl.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
+ if (aidl.hal.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
return unexpected(BAD_VALUE);
}
RETURN_IF_ERROR(
- convertRange(aidl.extraAudioDescriptors.begin(), aidl.extraAudioDescriptors.end(),
- legacy.extra_audio_descriptors,
- aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
- legacy.num_extra_audio_descriptors = aidl.extraAudioDescriptors.size();
+ convertRange(
+ aidl.hal.extraAudioDescriptors.begin(), aidl.hal.extraAudioDescriptors.end(),
+ legacy.extra_audio_descriptors,
+ aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
+ legacy.num_extra_audio_descriptors = aidl.hal.extraAudioDescriptors.size();
- if (aidl.gains.size() > std::size(legacy.gains)) {
+ if (aidl.hal.gains.size() > std::size(legacy.gains)) {
return unexpected(BAD_VALUE);
}
- RETURN_IF_ERROR(convertRange(aidl.gains.begin(), aidl.gains.end(), legacy.gains,
- aidl2legacy_AudioGain_audio_gain));
- legacy.num_gains = aidl.gains.size();
+ RETURN_IF_ERROR(convertRange(aidl.hal.gains.begin(), aidl.hal.gains.end(), legacy.gains,
+ [isInput](const AudioGain& g) {
+ return aidl2legacy_AudioGain_audio_gain(g, isInput);
+ }));
+ legacy.num_gains = aidl.hal.gains.size();
legacy.active_config = VALUE_OR_RETURN(
- aidl2legacy_AudioPortConfig_audio_port_config(aidl.activeConfig));
- legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortExt(aidl.ext, aidl.type));
+ aidl2legacy_AudioPortConfig_audio_port_config(aidl.sys.activeConfig));
+ legacy.ext = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortExt_audio_port_v7_ext(aidl.hal.ext, aidl.sys.type, aidl.sys.ext));
return legacy;
}
ConversionResult<media::AudioPort>
legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy) {
media::AudioPort aidl;
- aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
- aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
- aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
- aidl.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+ aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+ aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+ aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+ aidl.hal.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
return unexpected(BAD_VALUE);
}
+ const bool isInput = VALUE_OR_RETURN(direction(legacy.role, legacy.type)) == Direction::INPUT;
RETURN_IF_ERROR(
convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
- std::back_inserter(aidl.profiles),
- legacy2aidl_audio_profile_AudioProfile));
+ std::back_inserter(aidl.hal.profiles),
+ [isInput](const audio_profile& p) {
+ return legacy2aidl_audio_profile_AudioProfile(p, isInput);
+ }));
if (legacy.num_extra_audio_descriptors > std::size(legacy.extra_audio_descriptors)) {
return unexpected(BAD_VALUE);
}
+ aidl.sys.profiles.resize(legacy.num_audio_profiles);
RETURN_IF_ERROR(
convertRange(legacy.extra_audio_descriptors,
legacy.extra_audio_descriptors + legacy.num_extra_audio_descriptors,
- std::back_inserter(aidl.extraAudioDescriptors),
+ std::back_inserter(aidl.hal.extraAudioDescriptors),
legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor));
if (legacy.num_gains > std::size(legacy.gains)) {
@@ -2048,53 +3002,66 @@
}
RETURN_IF_ERROR(
convertRange(legacy.gains, legacy.gains + legacy.num_gains,
- std::back_inserter(aidl.gains),
- legacy2aidl_audio_gain_AudioGain));
+ std::back_inserter(aidl.hal.gains),
+ [isInput](const audio_gain& g) {
+ return legacy2aidl_audio_gain_AudioGain(g, isInput);
+ }));
+ aidl.sys.gains.resize(legacy.num_gains);
- aidl.activeConfig = VALUE_OR_RETURN(
+ aidl.sys.activeConfig = VALUE_OR_RETURN(
legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
- aidl.ext = VALUE_OR_RETURN(legacy2aidl_AudioPortExt(legacy.ext, legacy.type));
+ aidl.sys.activeConfig.hal.portId = aidl.hal.id;
+ RETURN_IF_ERROR(
+ legacy2aidl_AudioPortExt(legacy.ext, legacy.type, &aidl.hal.ext, &aidl.sys.ext));
return aidl;
}
ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl) {
+aidl2legacy_AudioMode_audio_mode_t(AudioMode aidl) {
switch (aidl) {
- case media::AudioMode::INVALID:
+ case AudioMode::SYS_RESERVED_INVALID:
return AUDIO_MODE_INVALID;
- case media::AudioMode::CURRENT:
+ case AudioMode::SYS_RESERVED_CURRENT:
return AUDIO_MODE_CURRENT;
- case media::AudioMode::NORMAL:
+ case AudioMode::NORMAL:
return AUDIO_MODE_NORMAL;
- case media::AudioMode::RINGTONE:
+ case AudioMode::RINGTONE:
return AUDIO_MODE_RINGTONE;
- case media::AudioMode::IN_CALL:
+ case AudioMode::IN_CALL:
return AUDIO_MODE_IN_CALL;
- case media::AudioMode::IN_COMMUNICATION:
+ case AudioMode::IN_COMMUNICATION:
return AUDIO_MODE_IN_COMMUNICATION;
- case media::AudioMode::CALL_SCREEN:
+ case AudioMode::CALL_SCREEN:
return AUDIO_MODE_CALL_SCREEN;
+ case AudioMode::SYS_RESERVED_CALL_REDIRECT:
+ return AUDIO_MODE_CALL_REDIRECT;
+ case AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT:
+ return AUDIO_MODE_COMMUNICATION_REDIRECT;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioMode>
+ConversionResult<AudioMode>
legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
switch (legacy) {
case AUDIO_MODE_INVALID:
- return media::AudioMode::INVALID;
+ return AudioMode::SYS_RESERVED_INVALID;
case AUDIO_MODE_CURRENT:
- return media::AudioMode::CURRENT;
+ return AudioMode::SYS_RESERVED_CURRENT;
case AUDIO_MODE_NORMAL:
- return media::AudioMode::NORMAL;
+ return AudioMode::NORMAL;
case AUDIO_MODE_RINGTONE:
- return media::AudioMode::RINGTONE;
+ return AudioMode::RINGTONE;
case AUDIO_MODE_IN_CALL:
- return media::AudioMode::IN_CALL;
+ return AudioMode::IN_CALL;
case AUDIO_MODE_IN_COMMUNICATION:
- return media::AudioMode::IN_COMMUNICATION;
+ return AudioMode::IN_COMMUNICATION;
case AUDIO_MODE_CALL_SCREEN:
- return media::AudioMode::CALL_SCREEN;
+ return AudioMode::CALL_SCREEN;
+ case AUDIO_MODE_CALL_REDIRECT:
+ return AudioMode::SYS_RESERVED_CALL_REDIRECT;
+ case AUDIO_MODE_COMMUNICATION_REDIRECT:
+ return AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT;
case AUDIO_MODE_CNT:
break;
}
@@ -2244,30 +3211,30 @@
}
ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl) {
+aidl2legacy_AudioStandard_audio_standard_t(AudioStandard aidl) {
switch (aidl) {
- case media::AudioStandard::NONE:
+ case AudioStandard::NONE:
return AUDIO_STANDARD_NONE;
- case media::AudioStandard::EDID:
+ case AudioStandard::EDID:
return AUDIO_STANDARD_EDID;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioStandard>
+ConversionResult<AudioStandard>
legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy) {
switch (legacy) {
case AUDIO_STANDARD_NONE:
- return media::AudioStandard::NONE;
+ return AudioStandard::NONE;
case AUDIO_STANDARD_EDID:
- return media::AudioStandard::EDID;
+ return AudioStandard::EDID;
}
return unexpected(BAD_VALUE);
}
ConversionResult<audio_extra_audio_descriptor>
aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
- const media::ExtraAudioDescriptor& aidl) {
+ const ExtraAudioDescriptor& aidl) {
audio_extra_audio_descriptor legacy;
legacy.standard = VALUE_OR_RETURN(aidl2legacy_AudioStandard_audio_standard_t(aidl.standard));
if (aidl.audioDescriptor.size() > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
@@ -2282,10 +3249,10 @@
return legacy;
}
-ConversionResult<media::ExtraAudioDescriptor>
+ConversionResult<ExtraAudioDescriptor>
legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
const audio_extra_audio_descriptor& legacy) {
- media::ExtraAudioDescriptor aidl;
+ ExtraAudioDescriptor aidl;
aidl.standard = VALUE_OR_RETURN(legacy2aidl_audio_standard_t_AudioStandard(legacy.standard));
if (legacy.descriptor_length > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
return unexpected(BAD_VALUE);
@@ -2301,24 +3268,24 @@
ConversionResult<audio_encapsulation_type_t>
aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
- const media::AudioEncapsulationType& aidl) {
+ const AudioEncapsulationType& aidl) {
switch (aidl) {
- case media::AudioEncapsulationType::NONE:
+ case AudioEncapsulationType::NONE:
return AUDIO_ENCAPSULATION_TYPE_NONE;
- case media::AudioEncapsulationType::IEC61937:
+ case AudioEncapsulationType::IEC61937:
return AUDIO_ENCAPSULATION_TYPE_IEC61937;
}
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioEncapsulationType>
+ConversionResult<AudioEncapsulationType>
legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
const audio_encapsulation_type_t & legacy) {
switch (legacy) {
case AUDIO_ENCAPSULATION_TYPE_NONE:
- return media::AudioEncapsulationType::NONE;
+ return AudioEncapsulationType::NONE;
case AUDIO_ENCAPSULATION_TYPE_IEC61937:
- return media::AudioEncapsulationType::IEC61937;
+ return AudioEncapsulationType::IEC61937;
}
return unexpected(BAD_VALUE);
}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 9c307ff..7e180a2 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -25,11 +25,13 @@
static_libs: [
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"av-types-aidl-cpp",
],
export_static_lib_headers: [
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"av-types-aidl-cpp",
],
target: {
@@ -49,6 +51,7 @@
"PolicyAidlConversion.cpp"
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -69,6 +72,7 @@
include_dirs: ["system/media/audio_utils/include"],
export_include_dirs: ["include"],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -109,9 +113,11 @@
"TrackPlayerBase.cpp",
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"audiopolicy-types-aidl-cpp",
"av-types-aidl-cpp",
"capture_state_listener-aidl-cpp",
@@ -131,14 +137,16 @@
"libprocessgroup",
"libshmemcompat",
"libutils",
- "libvibrator",
"framework-permission-aidl-cpp",
+ "packagemanager_aidl-cpp",
],
export_shared_lib_headers: [
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"framework-permission-aidl-cpp",
"libbinder",
+ "libmediametrics",
],
include_dirs: [
@@ -224,16 +232,19 @@
"libaudioclient_aidl_conversion_util",
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libbase",
"libbinder",
"liblog",
"libshmemcompat",
+ "libstagefright_foundation",
"libutils",
"shared-file-region-aidl-cpp",
"framework-permission-aidl-cpp",
],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libbase",
"shared-file-region-aidl-cpp",
@@ -303,57 +314,31 @@
srcs: [
"aidl/android/media/AudioAttributesInternal.aidl",
"aidl/android/media/AudioClient.aidl",
- "aidl/android/media/AudioConfig.aidl",
- "aidl/android/media/AudioConfigBase.aidl",
- "aidl/android/media/AudioContentType.aidl",
- "aidl/android/media/AudioDevice.aidl",
"aidl/android/media/AudioDualMonoMode.aidl",
- "aidl/android/media/AudioEncapsulationMode.aidl",
- "aidl/android/media/AudioEncapsulationMetadataType.aidl",
- "aidl/android/media/AudioEncapsulationType.aidl",
"aidl/android/media/AudioFlag.aidl",
- "aidl/android/media/AudioGain.aidl",
- "aidl/android/media/AudioGainConfig.aidl",
- "aidl/android/media/AudioGainMode.aidl",
- "aidl/android/media/AudioInputFlags.aidl",
+ "aidl/android/media/AudioGainSys.aidl",
"aidl/android/media/AudioIoConfigEvent.aidl",
"aidl/android/media/AudioIoDescriptor.aidl",
- "aidl/android/media/AudioIoFlags.aidl",
- "aidl/android/media/AudioMixLatencyClass.aidl",
- "aidl/android/media/AudioMode.aidl",
- "aidl/android/media/AudioOffloadInfo.aidl",
- "aidl/android/media/AudioOutputFlags.aidl",
"aidl/android/media/AudioPatch.aidl",
"aidl/android/media/AudioPlaybackRate.aidl",
"aidl/android/media/AudioPort.aidl",
+ "aidl/android/media/AudioPortSys.aidl",
"aidl/android/media/AudioPortConfig.aidl",
- "aidl/android/media/AudioPortConfigType.aidl",
- "aidl/android/media/AudioPortConfigDeviceExt.aidl",
- "aidl/android/media/AudioPortConfigExt.aidl",
- "aidl/android/media/AudioPortConfigMixExt.aidl",
- "aidl/android/media/AudioPortConfigMixExtUseCase.aidl",
- "aidl/android/media/AudioPortConfigSessionExt.aidl",
- "aidl/android/media/AudioPortDeviceExt.aidl",
- "aidl/android/media/AudioPortExt.aidl",
- "aidl/android/media/AudioPortMixExt.aidl",
+ "aidl/android/media/AudioPortConfigSys.aidl",
+ "aidl/android/media/AudioPortDeviceExtSys.aidl",
+ "aidl/android/media/AudioPortExtSys.aidl",
+ "aidl/android/media/AudioPortMixExtSys.aidl",
"aidl/android/media/AudioPortRole.aidl",
- "aidl/android/media/AudioPortSessionExt.aidl",
"aidl/android/media/AudioPortType.aidl",
- "aidl/android/media/AudioProfile.aidl",
- "aidl/android/media/AudioSourceType.aidl",
- "aidl/android/media/AudioStandard.aidl",
- "aidl/android/media/AudioStreamType.aidl",
+ "aidl/android/media/AudioProfileSys.aidl",
"aidl/android/media/AudioTimestampInternal.aidl",
"aidl/android/media/AudioUniqueIdUse.aidl",
- "aidl/android/media/AudioUsage.aidl",
- "aidl/android/media/AudioUuid.aidl",
"aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
- "aidl/android/media/ExtraAudioDescriptor.aidl",
"aidl/android/media/TrackSecondaryOutputInfo.aidl",
],
imports: [
- "audio_common-aidl",
+ "android.media.audio.common.types",
"framework-permission-aidl",
],
backend: {
@@ -364,6 +349,9 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
},
}
aidl_interface {
@@ -389,9 +377,12 @@
"aidl/android/media/AudioVolumeGroup.aidl",
"aidl/android/media/DeviceRole.aidl",
"aidl/android/media/SoundTriggerSession.aidl",
+ "aidl/android/media/SpatializationLevel.aidl",
+ "aidl/android/media/SpatializationMode.aidl",
+ "aidl/android/media/SpatializerHeadTrackingMode.aidl",
],
imports: [
- "audio_common-aidl",
+ "android.media.audio.common.types",
"audioclient-types-aidl",
],
backend: {
@@ -402,6 +393,9 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
},
}
@@ -431,7 +425,7 @@
"aidl/android/media/IAudioTrackCallback.aidl",
],
imports: [
- "audio_common-aidl",
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"av-types-aidl",
"effect-aidl",
@@ -447,6 +441,9 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
},
}
@@ -459,17 +456,18 @@
srcs: [
"aidl/android/media/GetInputForAttrResponse.aidl",
"aidl/android/media/GetOutputForAttrResponse.aidl",
- "aidl/android/media/Int.aidl",
+ "aidl/android/media/GetSpatializerResponse.aidl",
"aidl/android/media/RecordClientInfo.aidl",
"aidl/android/media/IAudioPolicyService.aidl",
"aidl/android/media/IAudioPolicyServiceClient.aidl",
],
imports: [
- "audio_common-aidl",
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"audiopolicy-types-aidl",
"capture_state_listener-aidl",
"framework-permission-aidl",
+ "spatializer-aidl",
],
double_loadable: true,
@@ -481,5 +479,38 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
+ },
+}
+
+aidl_interface {
+ name: "spatializer-aidl",
+ unstable: true,
+ local_include_dir: "aidl",
+ host_supported: true,
+ vendor_available: true,
+ srcs: [
+ "aidl/android/media/INativeSpatializerCallback.aidl",
+ "aidl/android/media/ISpatializer.aidl",
+ "aidl/android/media/ISpatializerHeadTrackingCallback.aidl",
+ ],
+ imports: [
+ "audiopolicy-types-aidl",
+ ],
+
+ double_loadable: true,
+ backend: {
+ cpp: {
+ min_sdk_version: "29",
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.media",
+ ],
+ },
+ java: {
+ sdk_version: "module_current",
+ },
},
}
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
index 83bf5a7..260c06c 100644
--- a/media/libaudioclient/AudioAttributes.cpp
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -24,9 +24,6 @@
#include <media/AudioAttributes.h>
#include <media/PolicyAidlConversion.h>
-#define RETURN_STATUS_IF_ERROR(x) \
- { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
namespace android {
status_t AudioAttributes::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 6ad5483..62f863d 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -32,16 +32,12 @@
#include <private/media/AudioEffectShared.h>
#include <utils/Log.h>
-#define RETURN_STATUS_IF_ERROR(x) \
- { \
- auto _tmp = (x); \
- if (_tmp != OK) return _tmp; \
- }
-
namespace android {
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
using media::IAudioPolicyService;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioUuid;
namespace {
@@ -70,7 +66,8 @@
audio_session_t sessionId,
audio_io_handle_t io,
const AudioDeviceTypeAddr& device,
- bool probe)
+ bool probe,
+ bool notifyFramesProcessed)
{
sp<media::IEffect> iEffect;
sp<IMemory> cblk;
@@ -124,6 +121,7 @@
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(device));
request.attributionSource = mClientAttributionSource;
request.probe = probe;
+ request.notifyFramesProcessed = notifyFramesProcessed;
media::CreateEffectResponse response;
@@ -194,7 +192,8 @@
audio_session_t sessionId,
audio_io_handle_t io,
const AudioDeviceTypeAddr& device,
- bool probe)
+ bool probe,
+ bool notifyFramesProcessed)
{
effect_uuid_t type;
effect_uuid_t *pType = nullptr;
@@ -211,7 +210,8 @@
pUuid = &uuid;
}
- return set(pType, pUuid, priority, cbf, user, sessionId, io, device, probe);
+ return set(pType, pUuid, priority, cbf, user, sessionId, io,
+ device, probe, notifyFramesProcessed);
}
@@ -522,6 +522,13 @@
}
}
+void AudioEffect::framesProcessed(int32_t frames)
+{
+ if (mCbf != NULL) {
+ mCbf(EVENT_FRAMES_PROCESSED, mUserData, &frames);
+ }
+}
+
// -------------------------------------------------------------------------
status_t AudioEffect::queryNumberEffects(uint32_t *numEffects)
@@ -560,7 +567,7 @@
int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_session_t_int32_t(audioSession));
- media::Int countAidl;
+ media::audio::common::Int countAidl;
countAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*count));
std::vector<media::EffectDescriptor> retAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -608,12 +615,12 @@
uuid = *EFFECT_UUID_NULL;
}
- media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
- media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+ AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+ AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_String16_string(opPackageName));
- media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(source));
+ AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(source));
int32_t retAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->addSourceDefaultEffect(typeAidl, opPackageNameAidl, uuidAidl, priority, sourceAidl,
@@ -651,11 +658,11 @@
uuid = *EFFECT_UUID_NULL;
}
- media::AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
- media::AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
+ AudioUuid typeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(type));
+ AudioUuid uuidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(uuid));
std::string opPackageNameAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_String16_string(opPackageName));
- media::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
+ media::audio::common::AudioUsage usageAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_usage_t_AudioUsage(usage));
int32_t retAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index f98027a..ecd423a 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -21,9 +21,6 @@
#include <media/AudioAttributes.h>
#include <media/PolicyAidlConversion.h>
-#define RETURN_STATUS_IF_ERROR(x) \
- { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
namespace android {
status_t AudioProductStrategy::readFromParcel(const Parcel* parcel) {
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a1d3bdb..5924d55 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -22,6 +22,7 @@
#include <android-base/macros.h>
#include <sys/resource.h>
+#include <audio_utils/format.h>
#include <audiomanager/AudioManager.h>
#include <audiomanager/IAudioManager.h>
#include <binder/Binder.h>
@@ -142,7 +143,7 @@
audio_channel_mask_t channelMask,
const AttributionSourceState& client,
size_t frameCount,
- callback_t cbf,
+ legacy_callback_t callback,
void* user,
uint32_t notificationFrames,
audio_session_t sessionId,
@@ -162,7 +163,39 @@
{
uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
- (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+ (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback, user,
+ notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
+ uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
+ microphoneFieldDimension);
+}
+
+AudioRecord::AudioRecord(
+ audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const AttributionSourceState& client,
+ size_t frameCount,
+ const wp<IAudioRecordCallback>& callback,
+ uint32_t notificationFrames,
+ audio_session_t sessionId,
+ transfer_type transferType,
+ audio_input_flags_t flags,
+ const audio_attributes_t* pAttributes,
+ audio_port_handle_t selectedDeviceId,
+ audio_microphone_direction_t selectedMicDirection,
+ float microphoneFieldDimension)
+ : mActive(false),
+ mStatus(NO_INIT),
+ mClientAttributionSource(client),
+ mSessionId(AUDIO_SESSION_ALLOCATE),
+ mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+ mPreviousSchedulingGroup(SP_DEFAULT),
+ mProxy(nullptr)
+{
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
+ (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
microphoneFieldDimension);
@@ -205,8 +238,8 @@
// Otherwise the callback thread will never exit.
stop();
if (mAudioRecordThread != 0) {
- mProxy->interrupt();
mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
+ mProxy->interrupt();
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
@@ -217,14 +250,44 @@
AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
}
}
+namespace {
+class LegacyCallbackWrapper : public AudioRecord::IAudioRecordCallback {
+ const AudioRecord::legacy_callback_t mCallback;
+ void* const mData;
+
+ public:
+ LegacyCallbackWrapper(AudioRecord::legacy_callback_t callback, void* user)
+ : mCallback(callback), mData(user) {}
+
+ size_t onMoreData(const AudioRecord::Buffer& buffer) override {
+ AudioRecord::Buffer copy = buffer;
+ mCallback(AudioRecord::EVENT_MORE_DATA, mData, ©);
+ return copy.size;
+ }
+
+ void onOverrun() override { mCallback(AudioRecord::EVENT_OVERRUN, mData, nullptr); }
+
+ void onMarker(uint32_t markerPosition) override {
+ mCallback(AudioRecord::EVENT_MARKER, mData, &markerPosition);
+ }
+
+ void onNewPos(uint32_t newPos) override {
+ mCallback(AudioRecord::EVENT_NEW_POS, mData, &newPos);
+ }
+
+ void onNewIAudioRecord() override {
+ mCallback(AudioRecord::EVENT_NEW_IAUDIORECORD, mData, nullptr);
+ }
+};
+} // namespace
+
status_t AudioRecord::set(
audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- callback_t cbf,
- void* user,
+ const wp<IAudioRecordCallback>& callback,
uint32_t notificationFrames,
bool threadCanCallJava,
audio_session_t sessionId,
@@ -240,7 +303,7 @@
{
status_t status = NO_ERROR;
uint32_t channelCount;
-
+ const sp<IAudioRecordCallback> callbackHandle = callback.promote();
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
@@ -273,15 +336,15 @@
switch (transferType) {
case TRANSFER_DEFAULT:
- if (cbf == NULL || threadCanCallJava) {
+ if (callbackHandle == nullptr || threadCanCallJava) {
transferType = TRANSFER_SYNC;
} else {
transferType = TRANSFER_CALLBACK;
}
break;
case TRANSFER_CALLBACK:
- if (cbf == NULL) {
- ALOGE("%s(): Transfer type TRANSFER_CALLBACK but cbf == NULL", __func__);
+ if (callbackHandle == nullptr) {
+ ALOGE("%s(): Transfer type TRANSFER_CALLBACK but callback == nullptr", __func__);
status = BAD_VALUE;
goto exit;
}
@@ -303,7 +366,7 @@
goto exit;
}
- if (pAttributes == NULL) {
+ if (pAttributes == nullptr) {
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
mAttributes.source = inputSource;
if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION
@@ -359,9 +422,9 @@
ALOGV("%s(): mSessionId %d", __func__, mSessionId);
mOrigFlags = mFlags = flags;
- mCbf = cbf;
+ mCallback = callbackHandle;
- if (cbf != NULL) {
+ if (mCallback != nullptr) {
mAudioRecordThread = new AudioRecordThread(*this);
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
// thread begins in paused state, and will not reference us until start()
@@ -384,7 +447,6 @@
goto exit;
}
- mUserData = user;
// TODO: add audio hardware input latency here
mLatency = (1000LL * mFrameCount) / mSampleRate;
mMarkerPosition = 0;
@@ -406,6 +468,37 @@
return status;
}
+status_t AudioRecord::set(
+ audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ legacy_callback_t callback,
+ void* user,
+ uint32_t notificationFrames,
+ bool threadCanCallJava,
+ audio_session_t sessionId,
+ transfer_type transferType,
+ audio_input_flags_t flags,
+ uid_t uid,
+ pid_t pid,
+ const audio_attributes_t* pAttributes,
+ audio_port_handle_t selectedDeviceId,
+ audio_microphone_direction_t selectedMicDirection,
+ float microphoneFieldDimension,
+ int32_t maxSharedAudioHistoryMs)
+{
+ if (callback != nullptr) {
+ mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+ } else if (user) {
+ LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+ }
+ return set(inputSource, sampleRate, format, channelMask, frameCount, mLegacyCallbackWrapper,
+ notificationFrames, threadCanCallJava, sessionId, transferType, flags, uid, pid,
+ pAttributes, selectedDeviceId, selectedMicDirection, microphoneFieldDimension,
+ maxSharedAudioHistoryMs);
+}
// -------------------------------------------------------------------------
status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
@@ -535,12 +628,12 @@
status_t AudioRecord::setMarkerPosition(uint32_t marker)
{
+ AutoMutex lock(mLock);
// The only purpose of setting marker position is to get a callback
- if (mCbf == NULL) {
+ if (mCallback.promote() == nullptr) {
return INVALID_OPERATION;
}
- AutoMutex lock(mLock);
mMarkerPosition = marker;
mMarkerReached = false;
@@ -565,12 +658,12 @@
status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
{
+ AutoMutex lock(mLock);
// The only purpose of setting position update period is to get a callback
- if (mCbf == NULL) {
+ if (mCallback.promote() == nullptr) {
return INVALID_OPERATION;
}
- AutoMutex lock(mLock);
mNewPosition = mProxy->getPosition() + updatePeriod;
mUpdatePeriod = updatePeriod;
@@ -668,6 +761,8 @@
// ---- Explicit Routing ---------------------------------------------------
status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
AutoMutex lock(mLock);
+ ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+ __func__, mPortId, deviceId, mSelectedDeviceId);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
if (mStatus == NO_ERROR) {
@@ -754,7 +849,7 @@
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
IAudioFlinger::CreateRecordInput input;
IAudioFlinger::CreateRecordOutput output;
- audio_session_t originalSessionId;
+ [[maybe_unused]] audio_session_t originalSessionId;
void *iMemPointer;
audio_track_cblk_t* cblk;
status_t status;
@@ -856,6 +951,10 @@
mRoutedDeviceId = output.selectedDeviceId;
mSessionId = output.sessionId;
mSampleRate = output.sampleRate;
+ mServerConfig = output.serverConfig;
+ mServerFrameSize = audio_bytes_per_frame(
+ audio_channel_count_from_in_mask(mServerConfig.channel_mask), mServerConfig.format);
+ mServerSampleSize = audio_bytes_per_sample(mServerConfig.format);
if (output.cblk == 0) {
ALOGE("%s(%d): Could not get control block", __func__, mPortId);
@@ -919,6 +1018,10 @@
mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
}
mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
+ if (mServerConfig.format != mFormat && mCallback.promote() != nullptr) {
+ mFormatConversionBufRaw = std::make_unique<uint8_t[]>(mNotificationFramesAct * mFrameSize);
+ mFormatConversionBuffer.raw = mFormatConversionBufRaw.get();
+ }
//mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
if (mDeviceCallback != 0) {
@@ -945,7 +1048,7 @@
}
// update proxy
- mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
+ mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mServerFrameSize);
mProxy->setEpoch(epoch);
mProxy->setMinimum(mNotificationFramesAct);
@@ -1075,7 +1178,7 @@
} while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
audioBuffer->frameCount = buffer.mFrameCount;
- audioBuffer->size = buffer.mFrameCount * mFrameSize;
+ audioBuffer->size = buffer.mFrameCount * mServerFrameSize;
audioBuffer->raw = buffer.mRaw;
audioBuffer->sequence = oldSequence;
if (nonContig != NULL) {
@@ -1088,7 +1191,7 @@
{
// FIXME add error checking on mode, by adding an internal version
- size_t stepCount = audioBuffer->size / mFrameSize;
+ size_t stepCount = audioBuffer->frameCount;
if (stepCount == 0) {
return;
}
@@ -1150,8 +1253,9 @@
return ssize_t(err);
}
- size_t bytesRead = audioBuffer.size;
- memcpy(buffer, audioBuffer.i8, bytesRead);
+ size_t bytesRead = audioBuffer.frameCount * mFrameSize;
+ memcpy_by_audio_format(buffer, mFormat, audioBuffer.raw, mServerConfig.format,
+ audioBuffer.size / mServerSampleSize);
buffer = ((char *) buffer) + bytesRead;
userSize -= bytesRead;
read += bytesRead;
@@ -1170,6 +1274,11 @@
nsecs_t AudioRecord::processAudioBuffer()
{
mLock.lock();
+ const sp<IAudioRecordCallback> callback = mCallback.promote();
+ if (!callback) {
+ mCallback = nullptr;
+ return NS_NEVER;
+ }
if (mAwaitBoost) {
mAwaitBoost = false;
mLock.unlock();
@@ -1245,26 +1354,26 @@
uint32_t sequence = mSequence;
// These fields don't need to be cached, because they are assigned only by set():
- // mTransfer, mCbf, mUserData, mSampleRate, mFrameSize
+ // mTransfer, mCallback, mUserData, mSampleRate, mFrameSize
mLock.unlock();
// perform callbacks while unlocked
if (newOverrun) {
- mCbf(EVENT_OVERRUN, mUserData, NULL);
+ callback->onOverrun();
+
}
if (markerReached) {
- mCbf(EVENT_MARKER, mUserData, &markerPosition);
+ callback->onMarker(markerPosition.value());
}
while (newPosCount > 0) {
- size_t temp = newPosition.value(); // FIXME size_t != uint32_t
- mCbf(EVENT_NEW_POS, mUserData, &temp);
+ callback->onNewPos(newPosition.value());
newPosition += updatePeriod;
newPosCount--;
}
if (mObservedSequence != sequence) {
mObservedSequence = sequence;
- mCbf(EVENT_NEW_IAUDIORECORD, mUserData, NULL);
+ callback->onNewIAudioRecord();
}
// if inactive, then don't run me again until re-started
@@ -1348,9 +1457,19 @@
}
}
- size_t reqSize = audioBuffer.size;
- mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
- size_t readSize = audioBuffer.size;
+ Buffer* buffer = &audioBuffer;
+ if (mServerConfig.format != mFormat) {
+ buffer = &mFormatConversionBuffer;
+ buffer->frameCount = audioBuffer.frameCount;
+ buffer->size = buffer->frameCount * mFrameSize;
+ buffer->sequence = audioBuffer.sequence;
+ memcpy_by_audio_format(buffer->raw, mFormat, audioBuffer.raw,
+ mServerConfig.format, audioBuffer.size / mServerSampleSize);
+ }
+
+ const size_t reqSize = buffer->size;
+ const size_t readSize = callback->onMoreData(*buffer);
+ buffer->size = readSize;
// Validate on returned size
if (ssize_t(readSize) < 0 || readSize > reqSize) {
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index f1eeaa3..07ef246 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -40,19 +40,25 @@
if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
std::move(_tmp.value()); })
-#define RETURN_STATUS_IF_ERROR(x) \
- { \
- auto _tmp = (x); \
- if (_tmp != OK) return _tmp; \
- }
-
// ----------------------------------------------------------------------------
namespace android {
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
+using content::AttributionSourceState;
using media::IAudioPolicyService;
-using android::content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::Int;
// client singleton for AudioFlinger binder interface
Mutex AudioSystem::gLock;
@@ -336,7 +342,7 @@
if (desc == 0) {
*samplingRate = af->sampleRate(ioHandle);
} else {
- *samplingRate = desc->mSamplingRate;
+ *samplingRate = desc->getSamplingRate();
}
if (*samplingRate == 0) {
ALOGE("AudioSystem::getSamplingRate failed for ioHandle %d", ioHandle);
@@ -371,7 +377,7 @@
if (desc == 0) {
*frameCount = af->frameCount(ioHandle);
} else {
- *frameCount = desc->mFrameCount;
+ *frameCount = desc->getFrameCount();
}
if (*frameCount == 0) {
ALOGE("AudioSystem::getFrameCount failed for ioHandle %d", ioHandle);
@@ -406,7 +412,7 @@
if (outputDesc == 0) {
*latency = af->latency(output);
} else {
- *latency = outputDesc->mLatency;
+ *latency = outputDesc->getLatency();
}
ALOGV("getLatency() output %d, latency %d", output, *latency);
@@ -480,6 +486,12 @@
return af->systemReady();
}
+status_t AudioSystem::audioPolicyReady() {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return NO_INIT;
+ return af->audioPolicyReady();
+}
+
status_t AudioSystem::getFrameCountHAL(audio_io_handle_t ioHandle,
size_t* frameCount) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -488,7 +500,7 @@
if (desc == 0) {
*frameCount = af->frameCountHAL(ioHandle);
} else {
- *frameCount = desc->mFrameCountHAL;
+ *frameCount = desc->getFrameCountHAL();
}
if (*frameCount == 0) {
ALOGE("AudioSystem::getFrameCountHAL failed for ioHandle %d", ioHandle);
@@ -529,15 +541,15 @@
Status AudioSystem::AudioFlingerClient::ioConfigChanged(
media::AudioIoConfigEvent _event,
const media::AudioIoDescriptor& _ioDesc) {
- audio_io_config_event event = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioIoConfigEvent_audio_io_config_event(_event));
+ audio_io_config_event_t event = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(_event));
sp<AudioIoDescriptor> ioDesc(
VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(_ioDesc)));
ALOGV("ioConfigChanged() event %d", event);
- if (ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return Status::ok();
+ if (ioDesc->getIoHandle() == AUDIO_IO_HANDLE_NONE) return Status::ok();
audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
std::vector<sp<AudioDeviceCallback>> callbacksToCall;
@@ -550,93 +562,88 @@
case AUDIO_OUTPUT_REGISTERED:
case AUDIO_INPUT_OPENED:
case AUDIO_INPUT_REGISTERED: {
- sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+ sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
if (oldDesc == 0) {
- mIoDescriptors.add(ioDesc->mIoHandle, ioDesc);
+ mIoDescriptors.add(ioDesc->getIoHandle(), ioDesc);
} else {
deviceId = oldDesc->getDeviceId();
- mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+ mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
}
if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
deviceId = ioDesc->getDeviceId();
if (event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED) {
- auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+ auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
if (it != mAudioDeviceCallbacks.end()) {
callbacks = it->second;
}
}
}
- ALOGV("ioConfigChanged() new %s %s %d samplingRate %u, format %#x channel mask %#x "
- "frameCount %zu deviceId %d",
+ ALOGV("ioConfigChanged() new %s %s %s",
event == AUDIO_OUTPUT_OPENED || event == AUDIO_OUTPUT_REGISTERED ?
"output" : "input",
event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED ?
"opened" : "registered",
- ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
- ioDesc->mChannelMask,
- ioDesc->mFrameCount, ioDesc->getDeviceId());
+ ioDesc->toDebugString().c_str());
}
break;
case AUDIO_OUTPUT_CLOSED:
case AUDIO_INPUT_CLOSED: {
- if (getIoDescriptor_l(ioDesc->mIoHandle) == 0) {
+ if (getIoDescriptor_l(ioDesc->getIoHandle()) == 0) {
ALOGW("ioConfigChanged() closing unknown %s %d",
- event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+ event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
break;
}
ALOGV("ioConfigChanged() %s %d closed",
- event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+ event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
- mIoDescriptors.removeItem(ioDesc->mIoHandle);
- mAudioDeviceCallbacks.erase(ioDesc->mIoHandle);
+ mIoDescriptors.removeItem(ioDesc->getIoHandle());
+ mAudioDeviceCallbacks.erase(ioDesc->getIoHandle());
}
break;
case AUDIO_OUTPUT_CONFIG_CHANGED:
case AUDIO_INPUT_CONFIG_CHANGED: {
- sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+ sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
if (oldDesc == 0) {
ALOGW("ioConfigChanged() modifying unknown %s! %d",
event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
- ioDesc->mIoHandle);
+ ioDesc->getIoHandle());
break;
}
deviceId = oldDesc->getDeviceId();
- mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+ mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
if (deviceId != ioDesc->getDeviceId()) {
deviceId = ioDesc->getDeviceId();
- auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+ auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
if (it != mAudioDeviceCallbacks.end()) {
callbacks = it->second;
}
}
- ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
- "channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
+ ALOGV("ioConfigChanged() new config for %s %s",
event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
- ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
- ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->mFrameCountHAL,
- ioDesc->getDeviceId());
+ ioDesc->toDebugString().c_str());
}
break;
case AUDIO_CLIENT_STARTED: {
- sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+ sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
if (oldDesc == 0) {
- ALOGW("ioConfigChanged() start client on unknown io! %d", ioDesc->mIoHandle);
+ ALOGW("ioConfigChanged() start client on unknown io! %d",
+ ioDesc->getIoHandle());
break;
}
ALOGV("ioConfigChanged() AUDIO_CLIENT_STARTED io %d port %d num callbacks %zu",
- ioDesc->mIoHandle, ioDesc->mPortId, mAudioDeviceCallbacks.size());
- oldDesc->mPatch = ioDesc->mPatch;
- auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+ ioDesc->getIoHandle(), ioDesc->getPortId(), mAudioDeviceCallbacks.size());
+ oldDesc->setPatch(ioDesc->getPatch());
+ auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
if (it != mAudioDeviceCallbacks.end()) {
auto cbks = it->second;
- auto it2 = cbks.find(ioDesc->mPortId);
+ auto it2 = cbks.find(ioDesc->getPortId());
if (it2 != cbks.end()) {
- callbacks.emplace(ioDesc->mPortId, it2->second);
+ callbacks.emplace(ioDesc->getPortId(), it2->second);
deviceId = oldDesc->getDeviceId();
}
}
@@ -655,8 +662,8 @@
// Callbacks must be called without mLock held. May lead to dead lock if calling for
// example getRoutedDevice that updates the device and tries to acquire mLock.
for (auto cb : callbacksToCall) {
- // If callbacksToCall is not empty, it implies ioDesc->mIoHandle and deviceId are valid
- cb->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
+ // If callbacksToCall is not empty, it implies ioDesc->getIoHandle() and deviceId are valid
+ cb->onAudioDeviceUpdate(ioDesc->getIoHandle(), deviceId);
}
return Status::ok();
@@ -845,9 +852,8 @@
name = device_name;
}
- media::AudioDevice deviceAidl;
- deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
- deviceAidl.address = address;
+ AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_device_AudioDevice(device, address));
return statusTFromBinderStatus(
aps->setDeviceConnectionState(
@@ -855,7 +861,8 @@
VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(state)),
name,
- VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+ VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
}
audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -864,9 +871,8 @@
if (aps == 0) return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
auto result = [&]() -> ConversionResult<audio_policy_dev_state_t> {
- media::AudioDevice deviceAidl;
- deviceAidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
- deviceAidl.address = device_address;
+ AudioDevice deviceAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_device_AudioDevice(device, device_address));
media::AudioPolicyDeviceState result;
RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -894,13 +900,12 @@
name = device_name;
}
- media::AudioDevice deviceAidl;
- deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
- deviceAidl.address = address;
+ AudioDevice deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_device_AudioDevice(device, address));
return statusTFromBinderStatus(
aps->handleDeviceConfigChange(deviceAidl, name, VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+ legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
}
status_t AudioSystem::setPhoneState(audio_mode_t state, uid_t uid) {
@@ -949,7 +954,7 @@
if (aps == 0) return AUDIO_IO_HANDLE_NONE;
auto result = [&]() -> ConversionResult<audio_io_handle_t> {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+ AudioStreamType streamAidl = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t outputAidl;
RETURN_IF_ERROR(
@@ -997,8 +1002,8 @@
media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
- media::AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_t_AudioConfig(*config));
+ AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
int32_t flagsAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
@@ -1091,8 +1096,8 @@
int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
int32_t riidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_unique_id_t_int32_t(riid));
int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
- media::AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_base_t_AudioConfigBase(*config));
+ AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(*config, true /*isInput*/));
int32_t flagsAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));
@@ -1148,7 +1153,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t indexMinAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMin));
int32_t indexMaxAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(indexMax));
@@ -1162,10 +1167,11 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
return statusTFromBinderStatus(
aps->setStreamVolumeIndex(streamAidl, deviceAidl, indexAidl));
}
@@ -1176,9 +1182,10 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
int32_t indexAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getStreamVolumeIndex(streamAidl, deviceAidl, &indexAidl)));
@@ -1197,7 +1204,8 @@
media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
return statusTFromBinderStatus(
aps->setVolumeIndexForAttributes(attrAidl, deviceAidl, indexAidl));
}
@@ -1210,7 +1218,8 @@
media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
int32_t indexAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getVolumeIndexForAttributes(attrAidl, deviceAidl, &indexAidl)));
@@ -1249,7 +1258,7 @@
if (aps == 0) return PRODUCT_STRATEGY_NONE;
auto result = [&]() -> ConversionResult<product_strategy_t> {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+ AudioStreamType streamAidl = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t resultAidl;
RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -1259,19 +1268,20 @@
return result.value_or(PRODUCT_STRATEGY_NONE);
}
-audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return AUDIO_DEVICE_NONE;
+ if (aps == 0) return DeviceTypeSet{};
- auto result = [&]() -> ConversionResult<audio_devices_t> {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+ auto result = [&]() -> ConversionResult<DeviceTypeSet> {
+ AudioStreamType streamAidl = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
- int32_t resultAidl;
+ std::vector<AudioDeviceDescription> resultAidl;
RETURN_IF_ERROR(statusTFromBinderStatus(
aps->getDevicesForStream(streamAidl, &resultAidl)));
- return aidl2legacy_int32_t_audio_devices_t(resultAidl);
+ return convertContainer<DeviceTypeSet>(resultAidl,
+ aidl2legacy_AudioDeviceDescription_audio_devices_t);
}();
- return result.value_or(AUDIO_DEVICE_NONE);
+ return result.value_or(DeviceTypeSet{});
}
status_t AudioSystem::getDevicesForAttributes(const AudioAttributes& aa,
@@ -1284,7 +1294,7 @@
media::AudioAttributesEx aaAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
- std::vector<media::AudioDevice> retAidl;
+ std::vector<AudioDevice> retAidl;
RETURN_STATUS_IF_ERROR(
statusTFromBinderStatus(aps->getDevicesForAttributes(aaAidl, &retAidl)));
*devices = VALUE_OR_RETURN_STATUS(
@@ -1362,7 +1372,7 @@
if (aps == 0) return PERMISSION_DENIED;
if (state == NULL) return BAD_VALUE;
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1376,7 +1386,7 @@
if (aps == 0) return PERMISSION_DENIED;
if (state == NULL) return BAD_VALUE;
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t inPastMsAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(inPastMs));
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1389,8 +1399,8 @@
if (aps == 0) return PERMISSION_DENIED;
if (state == NULL) return BAD_VALUE;
- media::AudioSourceType streamAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(stream));
+ AudioSource streamAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(stream));
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->isSourceActive(streamAidl, state)));
return OK;
@@ -1434,9 +1444,9 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == nullptr) return PERMISSION_DENIED;
- std::vector<media::AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<media::AudioUsage>>(systemUsages,
- legacy2aidl_audio_usage_t_AudioUsage));
+ std::vector<AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioUsage>>(systemUsages,
+ legacy2aidl_audio_usage_t_AudioUsage));
return statusTFromBinderStatus(aps->setSupportedSystemUsages(systemUsagesAidl));
}
@@ -1456,7 +1466,7 @@
if (aps == 0) return AUDIO_OFFLOAD_NOT_SUPPORTED;
auto result = [&]() -> ConversionResult<audio_offload_mode_t> {
- media::AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
+ AudioOffloadInfo infoAidl = VALUE_OR_RETURN(
legacy2aidl_audio_offload_info_t_AudioOffloadInfo(info));
media::AudioOffloadMode retAidl;
RETURN_IF_ERROR(
@@ -1484,7 +1494,7 @@
legacy2aidl_audio_port_role_t_AudioPortRole(role));
media::AudioPortType typeAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_type_t_AudioPortType(type));
- media::Int numPortsAidl;
+ Int numPortsAidl;
numPortsAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_ports));
std::vector<media::AudioPort> portsAidl;
int32_t generationAidl;
@@ -1551,7 +1561,7 @@
if (aps == 0) return PERMISSION_DENIED;
- media::Int numPatchesAidl;
+ Int numPatchesAidl;
numPatchesAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
std::vector<media::AudioPatch> patchesAidl;
int32_t generationAidl;
@@ -1690,7 +1700,8 @@
statusTFromBinderStatus(aps->acquireSoundTriggerSession(&retAidl)));
*session = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_session_t(retAidl.session));
*ioHandle = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(retAidl.ioHandle));
- *device = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_devices_t(retAidl.device));
+ *device = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(retAidl.device));
return OK;
}
@@ -1707,7 +1718,7 @@
if (aps == 0) return AUDIO_MODE_INVALID;
auto result = [&]() -> ConversionResult<audio_mode_t> {
- media::AudioMode retAidl;
+ media::audio::common::AudioMode retAidl;
RETURN_IF_ERROR(statusTFromBinderStatus(aps->getPhoneState(&retAidl)));
return aidl2legacy_AudioMode_audio_mode_t(retAidl);
}();
@@ -1732,8 +1743,8 @@
if (aps == 0) return PERMISSION_DENIED;
int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
- std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
+ std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioDevice>>(devices,
legacy2aidl_AudioDeviceTypeAddress));
return statusTFromBinderStatus(aps->setUidDeviceAffinities(uidAidl, devicesAidl));
}
@@ -1752,9 +1763,9 @@
if (aps == 0) return PERMISSION_DENIED;
int32_t userIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(userId));
- std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
return statusTFromBinderStatus(
aps->setUserIdDeviceAffinities(userIdAidl, devicesAidl));
}
@@ -1827,10 +1838,11 @@
if (aps == 0) return NAN;
auto result = [&]() -> ConversionResult<float> {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN(
+ AudioStreamType streamAidl = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t indexAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(index));
- int32_t deviceAidl = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
+ AudioDeviceDescription deviceAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
float retAidl;
RETURN_IF_ERROR(statusTFromBinderStatus(
aps->getStreamVolumeDB(streamAidl, indexAidl, deviceAidl, &retAidl)));
@@ -1862,10 +1874,10 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::Int numSurroundFormatsAidl;
+ Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+ std::vector<AudioFormatDescription> surroundFormatsAidl;
std::vector<bool> surroundFormatsEnabledAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl,
@@ -1875,7 +1887,7 @@
convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
RETURN_STATUS_IF_ERROR(
convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
- aidl2legacy_AudioFormat_audio_format_t));
+ aidl2legacy_AudioFormatDescription_audio_format_t));
std::copy(surroundFormatsEnabledAidl.begin(), surroundFormatsEnabledAidl.end(),
surroundFormatsEnabled);
return OK;
@@ -1889,10 +1901,10 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::Int numSurroundFormatsAidl;
+ Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+ std::vector<AudioFormatDescription> surroundFormatsAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getReportedSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl)));
@@ -1900,7 +1912,7 @@
convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
RETURN_STATUS_IF_ERROR(
convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
- aidl2legacy_AudioFormat_audio_format_t));
+ aidl2legacy_AudioFormatDescription_audio_format_t));
return OK;
}
@@ -1908,8 +1920,8 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::audio::common::AudioFormat audioFormatAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_format_t_AudioFormat(audioFormat));
+ AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_format_t_AudioFormatDescription(audioFormat));
return statusTFromBinderStatus(
aps->setSurroundFormatEnabled(audioFormatAidl, enabled));
}
@@ -1960,8 +1972,8 @@
return result.value_or(false);
}
-status_t AudioSystem::getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<audio_format_t>* formats) {
+status_t AudioSystem::getHwOffloadFormatsSupportedForBluetoothMedia(
+ audio_devices_t device, std::vector<audio_format_t>* formats) {
if (formats == nullptr) {
return BAD_VALUE;
}
@@ -1970,12 +1982,15 @@
& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- std::vector<media::audio::common::AudioFormat> formatsAidl;
+ std::vector<AudioFormatDescription> formatsAidl;
+ AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
- aps->getHwOffloadEncodingFormatsSupportedForA2DP(&formatsAidl)));
+ aps->getHwOffloadFormatsSupportedForBluetoothMedia(deviceAidl, &formatsAidl)));
*formats = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<audio_format_t>>(formatsAidl,
- aidl2legacy_AudioFormat_audio_format_t));
+ convertContainer<std::vector<audio_format_t>>(
+ formatsAidl,
+ aidl2legacy_AudioFormatDescription_audio_format_t));
return OK;
}
@@ -2114,9 +2129,9 @@
int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
- std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
return statusTFromBinderStatus(
aps->setDevicesRoleForStrategy(strategyAidl, roleAidl, devicesAidl));
}
@@ -2142,7 +2157,7 @@
}
int32_t strategyAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_product_strategy_t_int32_t(strategy));
media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
- std::vector<media::AudioDevice> devicesAidl;
+ std::vector<AudioDevice> devicesAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getDevicesForRoleAndStrategy(strategyAidl, roleAidl, &devicesAidl)));
devices = VALUE_OR_RETURN_STATUS(
@@ -2159,12 +2174,12 @@
return PERMISSION_DENIED;
}
- media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+ AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(audioSource));
media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
- std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
return statusTFromBinderStatus(
aps->setDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
}
@@ -2176,12 +2191,12 @@
if (aps == 0) {
return PERMISSION_DENIED;
}
- media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+ AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(audioSource));
media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
- std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
return statusTFromBinderStatus(
aps->addDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
}
@@ -2192,12 +2207,12 @@
if (aps == 0) {
return PERMISSION_DENIED;
}
- media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+ AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(audioSource));
media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
- std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
return statusTFromBinderStatus(
aps->removeDevicesRoleForCapturePreset(audioSourceAidl, roleAidl, devicesAidl));
}
@@ -2208,8 +2223,8 @@
if (aps == 0) {
return PERMISSION_DENIED;
}
- media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+ AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(audioSource));
media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
return statusTFromBinderStatus(
aps->clearDevicesRoleForCapturePreset(audioSourceAidl, roleAidl));
@@ -2222,10 +2237,10 @@
if (aps == 0) {
return PERMISSION_DENIED;
}
- media::AudioSourceType audioSourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(audioSource));
+ AudioSource audioSourceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(audioSource));
media::DeviceRole roleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_device_role_t_DeviceRole(role));
- std::vector<media::AudioDevice> devicesAidl;
+ std::vector<AudioDevice> devicesAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getDevicesForRoleAndCapturePreset(audioSourceAidl, roleAidl, &devicesAidl)));
devices = VALUE_OR_RETURN_STATUS(
@@ -2234,6 +2249,47 @@
return OK;
}
+status_t AudioSystem::getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+ sp<media::ISpatializer>* spatializer) {
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (spatializer == nullptr) {
+ return BAD_VALUE;
+ }
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ media::GetSpatializerResponse response;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ aps->getSpatializer(callback, &response)));
+
+ *spatializer = response.spatializer;
+ return OK;
+}
+
+status_t AudioSystem::canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices,
+ bool *canBeSpatialized) {
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ audio_attributes_t attributes = attr != nullptr ? *attr : AUDIO_ATTRIBUTES_INITIALIZER;
+ audio_config_t configuration = config != nullptr ? *config : AUDIO_CONFIG_INITIALIZER;
+
+ std::optional<media::AudioAttributesInternal> attrAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
+ std::optional<AudioConfig> configAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(configuration, false /*isInput*/));
+ std::vector<AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ aps->canBeSpatialized(attrAidl, configAidl, devicesAidl, canBeSpatialized)));
+ return OK;
+}
+
+
class CaptureStateListenerImpl : public media::BnCaptureStateListener,
public IBinder::DeathRecipient {
public:
@@ -2298,6 +2354,31 @@
return af->setVibratorInfos(vibratorInfos);
}
+status_t AudioSystem::getMmapPolicyInfo(
+ AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioSystem::getAAudioMixerBurstCount() {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->getAAudioMixerBurstCount();
+}
+
+int32_t AudioSystem::getAAudioHardwareBurstMinUsec() {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->getAAudioHardwareBurstMinUsec();
+}
+
// ---------------------------------------------------------------------------
int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
@@ -2409,12 +2490,12 @@
Status AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
int32_t event,
const media::RecordClientInfo& clientInfo,
- const media::AudioConfigBase& clientConfig,
+ const AudioConfigBase& clientConfig,
const std::vector<media::EffectDescriptor>& clientEffects,
- const media::AudioConfigBase& deviceConfig,
+ const AudioConfigBase& deviceConfig,
const std::vector<media::EffectDescriptor>& effects,
int32_t patchHandle,
- media::AudioSourceType source) {
+ AudioSource source) {
record_config_callback cb = NULL;
{
Mutex::Autolock _l(AudioSystem::gLock);
@@ -2426,13 +2507,13 @@
record_client_info_t clientInfoLegacy = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_RecordClientInfo_record_client_info_t(clientInfo));
audio_config_base_t clientConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig, true /*isInput*/));
std::vector<effect_descriptor_t> clientEffectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<std::vector<effect_descriptor_t>>(
clientEffects,
aidl2legacy_EffectDescriptor_effect_descriptor_t));
audio_config_base_t deviceConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig, true /*isInput*/));
std::vector<effect_descriptor_t> effectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<std::vector<effect_descriptor_t>>(
effects,
@@ -2440,7 +2521,7 @@
audio_patch_handle_t patchHandleLegacy = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_patch_handle_t(patchHandle));
audio_source_t sourceLegacy = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(source));
+ aidl2legacy_AudioSource_audio_source_t(source));
cb(eventLegacy, &clientInfoLegacy, &clientConfigLegacy, clientEffectsLegacy,
&deviceConfigLegacy, effectsLegacy, patchHandleLegacy, sourceLegacy);
}
@@ -2484,7 +2565,7 @@
legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
legacy.uid = VALUE_OR_RETURN(aidl2legacy_int32_t_uid_t(aidl.uid));
legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
- legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(aidl.source));
+ legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
legacy.port_id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
legacy.silenced = aidl.silenced;
return legacy;
@@ -2496,7 +2577,7 @@
aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(legacy.riid));
aidl.uid = VALUE_OR_RETURN(legacy2aidl_uid_t_int32_t(legacy.uid));
aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
- aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source));
+ aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source));
aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.port_id));
aidl.silenced = legacy.silenced;
return aidl;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 5f802de..407b294 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -21,9 +21,11 @@
#include <inttypes.h>
#include <math.h>
#include <sys/resource.h>
+#include <thread>
#include <android/media/IAudioPolicyService.h>
#include <android-base/macros.h>
+#include <android-base/stringprintf.h>
#include <audio_utils/clock.h>
#include <audio_utils/primitives.h>
#include <binder/IPCThreadState.h>
@@ -43,6 +45,7 @@
static const int kMaxLoopCountNotifications = 32;
using ::android::aidl_utils::statusTFromBinderStatus;
+using ::android::base::StringPrintf;
namespace android {
// ---------------------------------------------------------------------------
@@ -170,8 +173,8 @@
if (aps == 0) return false;
auto result = [&]() -> ConversionResult<bool> {
- media::AudioConfigBase configAidl = VALUE_OR_RETURN(
- legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+ media::audio::common::AudioConfigBase configAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(config, false /*isInput*/));
media::AudioAttributesInternal attributesAidl = VALUE_OR_RETURN(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
bool retAidl;
@@ -253,8 +256,7 @@
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
- callback_t cbf,
- void* user,
+ const wp<IAudioTrackCallback> & callback,
int32_t notificationFrames,
audio_session_t sessionId,
transfer_type transferType,
@@ -274,7 +276,85 @@
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
(void)set(streamType, sampleRate, format, channelMask,
- frameCount, flags, cbf, user, notificationFrames,
+ frameCount, flags, callback, notificationFrames,
+ 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
+ attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+
+namespace {
+ class LegacyCallbackWrapper : public AudioTrack::IAudioTrackCallback {
+ const AudioTrack::legacy_callback_t mCallback;
+ void * const mData;
+ public:
+ LegacyCallbackWrapper(AudioTrack::legacy_callback_t callback, void* user)
+ : mCallback(callback), mData(user) {}
+ size_t onMoreData(const AudioTrack::Buffer & buffer) override {
+ AudioTrack::Buffer copy = buffer;
+ mCallback(AudioTrack::EVENT_MORE_DATA, mData, static_cast<void*>(©));
+ return copy.size;
+ }
+ void onUnderrun() override {
+ mCallback(AudioTrack::EVENT_UNDERRUN, mData, nullptr);
+ }
+ void onLoopEnd(int32_t loopsRemaining) override {
+ mCallback(AudioTrack::EVENT_LOOP_END, mData, &loopsRemaining);
+ }
+ void onMarker(uint32_t markerPosition) override {
+ mCallback(AudioTrack::EVENT_MARKER, mData, &markerPosition);
+ }
+ void onNewPos(uint32_t newPos) override {
+ mCallback(AudioTrack::EVENT_NEW_POS, mData, &newPos);
+ }
+ void onBufferEnd() override {
+ mCallback(AudioTrack::EVENT_BUFFER_END, mData, nullptr);
+ }
+ void onNewIAudioTrack() override {
+ mCallback(AudioTrack::EVENT_NEW_IAUDIOTRACK, mData, nullptr);
+ }
+ void onStreamEnd() override {
+ mCallback(AudioTrack::EVENT_STREAM_END, mData, nullptr);
+ }
+ size_t onCanWriteMoreData(const AudioTrack::Buffer & buffer) override {
+ AudioTrack::Buffer copy = buffer;
+ mCallback(AudioTrack::EVENT_CAN_WRITE_MORE_DATA, mData, static_cast<void*>(©));
+ return copy.size;
+ }
+ };
+}
+
+AudioTrack::AudioTrack(
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ audio_output_flags_t flags,
+ legacy_callback_t callback,
+ void* user,
+ int32_t notificationFrames,
+ audio_session_t sessionId,
+ transfer_type transferType,
+ const audio_offload_info_t *offloadInfo,
+ const AttributionSourceState& attributionSource,
+ const audio_attributes_t* pAttributes,
+ bool doNotReconnect,
+ float maxRequiredSpeed,
+ audio_port_handle_t selectedDeviceId)
+ : mStatus(NO_INIT),
+ mState(STATE_STOPPED),
+ mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+ mPreviousSchedulingGroup(SP_DEFAULT),
+ mPausedPosition(0),
+ mAudioTrackCallback(new AudioTrackCallback())
+{
+ mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ if (callback != nullptr) {
+ mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+ } else if (user) {
+ LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+ }
+ (void)set(streamType, sampleRate, format, channelMask,
+ frameCount, flags, mLegacyCallbackWrapper, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
}
@@ -286,8 +366,7 @@
audio_channel_mask_t channelMask,
const sp<IMemory>& sharedBuffer,
audio_output_flags_t flags,
- callback_t cbf,
- void* user,
+ const wp<IAudioTrackCallback>& callback,
int32_t notificationFrames,
audio_session_t sessionId,
transfer_type transferType,
@@ -307,11 +386,49 @@
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
(void)set(streamType, sampleRate, format, channelMask,
- 0 /*frameCount*/, flags, cbf, user, notificationFrames,
+ 0 /*frameCount*/, flags, callback, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed);
}
+AudioTrack::AudioTrack(
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const sp<IMemory>& sharedBuffer,
+ audio_output_flags_t flags,
+ legacy_callback_t callback,
+ void* user,
+ int32_t notificationFrames,
+ audio_session_t sessionId,
+ transfer_type transferType,
+ const audio_offload_info_t *offloadInfo,
+ const AttributionSourceState& attributionSource,
+ const audio_attributes_t* pAttributes,
+ bool doNotReconnect,
+ float maxRequiredSpeed)
+ : mStatus(NO_INIT),
+ mState(STATE_STOPPED),
+ mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+ mPreviousSchedulingGroup(SP_DEFAULT),
+ mPausedPosition(0),
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mAudioTrackCallback(new AudioTrackCallback())
+{
+ mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ if (callback) {
+ mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+ } else if (user) {
+ LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+ }
+
+ (void)set(streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
+ mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+ false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, attributionSource,
+ pAttributes, doNotReconnect, maxRequiredSpeed);
+}
+
AudioTrack::~AudioTrack()
{
// pull together the numbers, before we clean up our structures
@@ -352,8 +469,8 @@
// Otherwise the callback thread will never exit.
stop();
if (mAudioTrackThread != 0) { // not thread safe
- mProxy->interrupt();
mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
+ mProxy->interrupt();
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
@@ -373,8 +490,38 @@
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
- callback_t cbf,
- void* user,
+ legacy_callback_t callback,
+ void * user,
+ int32_t notificationFrames,
+ const sp<IMemory>& sharedBuffer,
+ bool threadCanCallJava,
+ audio_session_t sessionId,
+ transfer_type transferType,
+ const audio_offload_info_t *offloadInfo,
+ const AttributionSourceState& attributionSource,
+ const audio_attributes_t* pAttributes,
+ bool doNotReconnect,
+ float maxRequiredSpeed,
+ audio_port_handle_t selectedDeviceId)
+{
+ if (callback) {
+ mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+ } else if (user) {
+ LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+ }
+ return set(streamType, sampleRate,format, channelMask, frameCount, flags,
+ mLegacyCallbackWrapper, notificationFrames, sharedBuffer, threadCanCallJava,
+ sessionId, transferType, offloadInfo, attributionSource, pAttributes,
+ doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+status_t AudioTrack::set(
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ audio_output_flags_t flags,
+ const wp<IAudioTrackCallback>& callback,
int32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
@@ -393,7 +540,8 @@
pid_t myPid;
uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
-
+ sp<IAudioTrackCallback> _callback = callback.promote();
+ std::string errorMessage;
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
@@ -409,7 +557,7 @@
case TRANSFER_DEFAULT:
if (sharedBuffer != 0) {
transferType = TRANSFER_SHARED;
- } else if (cbf == NULL || threadCanCallJava) {
+ } else if (_callback == nullptr|| threadCanCallJava) {
transferType = TRANSFER_SYNC;
} else {
transferType = TRANSFER_CALLBACK;
@@ -417,33 +565,35 @@
break;
case TRANSFER_CALLBACK:
case TRANSFER_SYNC_NOTIF_CALLBACK:
- if (cbf == NULL || sharedBuffer != 0) {
- ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
+ if (_callback == nullptr || sharedBuffer != 0) {
+ errorMessage = StringPrintf(
+ "%s: Transfer type %s but callback == nullptr || sharedBuffer != 0",
convertTransferToText(transferType), __func__);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
break;
case TRANSFER_OBTAIN:
case TRANSFER_SYNC:
if (sharedBuffer != 0) {
- ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
+ errorMessage = StringPrintf(
+ "%s: Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
break;
case TRANSFER_SHARED:
if (sharedBuffer == 0) {
- ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
+ errorMessage = StringPrintf(
+ "%s: Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
break;
default:
- ALOGE("%s(): Invalid transfer type %d",
- __func__, transferType);
+ errorMessage = StringPrintf("%s: Invalid transfer type %d", __func__, transferType);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
mSharedBuffer = sharedBuffer;
mTransfer = transferType;
@@ -457,9 +607,9 @@
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
- ALOGE("%s(): Track already in use", __func__);
+ errorMessage = StringPrintf("%s: Track already in use", __func__);
status = INVALID_OPERATION;
- goto exit;
+ goto error;
}
// handle default values first.
@@ -468,9 +618,9 @@
}
if (pAttributes == NULL) {
if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
- ALOGE("%s(): Invalid stream type %d", __func__, streamType);
+ errorMessage = StringPrintf("%s: Invalid stream type %d", __func__, streamType);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
mOriginalStreamType = streamType;
@@ -494,16 +644,16 @@
// validate parameters
if (!audio_is_valid_format(format)) {
- ALOGE("%s(): Invalid format %#x", __func__, format);
+ errorMessage = StringPrintf("%s: Invalid format %#x", __func__, format);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
mFormat = format;
if (!audio_is_output_channel(channelMask)) {
- ALOGE("%s(): Invalid channel mask %#x", __func__, channelMask);
+ errorMessage = StringPrintf("%s: Invalid channel mask %#x", __func__, channelMask);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
mChannelMask = channelMask;
channelCount = audio_channel_count_from_out_mask(channelMask);
@@ -542,8 +692,10 @@
// sampling rate must be specified for direct outputs
if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+ errorMessage = StringPrintf(
+ "%s: sample rate must be specified for direct outputs", __func__);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
mSampleRate = sampleRate;
mOriginalSampleRate = sampleRate;
@@ -573,16 +725,17 @@
mNotificationsPerBufferReq = 0;
} else {
if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
- ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
+ errorMessage = StringPrintf(
+ "%s: notificationFrames=%d not permitted for non-fast track",
__func__, notificationFrames);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
if (frameCount > 0) {
ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
__func__, notificationFrames, frameCount);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
mNotificationFramesReq = 0;
const uint32_t minNotificationsPerBuffer = 1;
@@ -608,10 +761,10 @@
}
mAuxEffectId = 0;
mOrigFlags = mFlags = flags;
- mCbf = cbf;
+ mCallback = callback;
- if (cbf != NULL) {
- mAudioTrackThread = new AudioTrackThread(*this);
+ if (_callback != nullptr) {
+ mAudioTrackThread = sp<AudioTrackThread>::make(*this);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
}
@@ -627,10 +780,10 @@
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
+ // We do not goto error to prevent double-logging errors.
goto exit;
}
- mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
mLoopEnd = 0;
@@ -661,6 +814,12 @@
mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
mVolumeHandler = new media::VolumeHandler();
+error:
+ if (status != NO_ERROR) {
+ ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+ reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
+ }
+ // fall through
exit:
mStatus = status;
return status;
@@ -674,7 +833,7 @@
uint32_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
- callback_t cbf,
+ legacy_callback_t callback,
void* user,
int32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
@@ -693,11 +852,15 @@
attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
attributionSource.token = sp<BBinder>::make();
- return set(streamType, sampleRate, format,
- static_cast<audio_channel_mask_t>(channelMask),
- frameCount, flags, cbf, user, notificationFrames, sharedBuffer,
- threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
- pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+ if (callback) {
+ mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+ } else if (user) {
+ LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+ }
+ return set(streamType, sampleRate, format, static_cast<audio_channel_mask_t>(channelMask),
+ frameCount, flags, mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
+ threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
+ pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
}
// -------------------------------------------------------------------------
@@ -947,6 +1110,44 @@
mAudioTrack->flush();
}
+bool AudioTrack::pauseAndWait(const std::chrono::milliseconds& timeout)
+{
+ using namespace std::chrono_literals;
+
+ pause();
+
+ AutoMutex lock(mLock);
+ // offload and direct tracks do not wait because pause volume ramp is handled by hardware.
+ if (isOffloadedOrDirect_l()) return true;
+
+ // Wait for the track state to be anything besides pausing.
+ // This ensures that the volume has ramped down.
+ constexpr auto SLEEP_INTERVAL_MS = 10ms;
+ auto begin = std::chrono::steady_clock::now();
+ while (true) {
+ // wait for state to change
+ const int state = mProxy->getState();
+
+ mLock.unlock(); // only local variables accessed until lock.
+ auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::steady_clock::now() - begin);
+ if (state != CBLK_STATE_PAUSING) {
+ ALOGV("%s: success state:%d after %lld ms", __func__, state, elapsed.count());
+ return true;
+ }
+ std::chrono::milliseconds remaining = timeout - elapsed;
+ if (remaining.count() <= 0) {
+ ALOGW("%s: timeout expired state:%d still pausing:%d after %lld ms",
+ __func__, state, CBLK_STATE_PAUSING, elapsed.count());
+ return false;
+ }
+ // It is conceivable that the track is restored while sleeping;
+ // as this logic is advisory, we allow that.
+ std::this_thread::sleep_for(std::min(remaining, SLEEP_INTERVAL_MS));
+ mLock.lock();
+ }
+}
+
void AudioTrack::pause()
{
const int64_t beginNs = systemTime();
@@ -1275,10 +1476,6 @@
if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
return NO_INIT;
}
- // Reject if timed track or compressed audio.
- if (!audio_is_linear_pcm(mFormat)) {
- return INVALID_OPERATION;
- }
ssize_t originalBufferSize = mProxy->getBufferSizeInFrames();
ssize_t finalBufferSize = mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
@@ -1372,7 +1569,7 @@
status_t AudioTrack::setMarkerPosition(uint32_t marker)
{
// The only purpose of setting marker position is to get a callback
- if (mCbf == NULL || isOffloadedOrDirect()) {
+ if (!mCallback.promote() || isOffloadedOrDirect()) {
return INVALID_OPERATION;
}
@@ -1405,7 +1602,7 @@
status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
{
// The only purpose of setting position update period is to get a callback
- if (mCbf == NULL || isOffloadedOrDirect()) {
+ if (!mCallback.promote() || isOffloadedOrDirect()) {
return INVALID_OPERATION;
}
@@ -1555,6 +1752,8 @@
status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
AutoMutex lock(mLock);
+ ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+ __func__, mPortId, deviceId, mSelectedDeviceId);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
if (mStatus == NO_ERROR) {
@@ -1648,12 +1847,13 @@
{
status_t status;
bool callbackAdded = false;
+ std::string errorMessage;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
- ALOGE("%s(%d): Could not get audioflinger",
+ errorMessage = StringPrintf("%s(%d): Could not get audioflinger",
__func__, mPortId);
- status = NO_INIT;
+ status = DEAD_OBJECT;
goto exit;
}
@@ -1730,10 +1930,11 @@
}
if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
- ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
+ errorMessage = StringPrintf(
+ "%s(%d): AudioFlinger could not create track, status: %d output %d",
__func__, mPortId, status, output.outputId);
if (status == NO_ERROR) {
- status = NO_INIT;
+ status = INVALID_OPERATION; // device not ready
}
goto exit;
}
@@ -1764,8 +1965,8 @@
output.audioTrack->getCblk(&sfr);
sp<IMemory> iMem = VALUE_OR_FATAL(aidl2legacy_NullableSharedFileRegion_IMemory(sfr));
if (iMem == 0) {
- ALOGE("%s(%d): Could not get control block", __func__, mPortId);
- status = NO_INIT;
+ errorMessage = StringPrintf("%s(%d): Could not get control block", __func__, mPortId);
+ status = FAILED_TRANSACTION;
goto exit;
}
// TODO: Using unsecurePointer() has some associated security pitfalls
@@ -1774,8 +1975,9 @@
// issue (e.g. by copying).
void *iMemPointer = iMem->unsecurePointer();
if (iMemPointer == NULL) {
- ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
- status = NO_INIT;
+ errorMessage = StringPrintf(
+ "%s(%d): Could not get control block pointer", __func__, mPortId);
+ status = FAILED_TRANSACTION;
goto exit;
}
// invariant that mAudioTrack != 0 is true only after set() returns successfully
@@ -1799,7 +2001,7 @@
mAwaitBoost = true;
}
} else {
- ALOGD("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
+ ALOGV("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
__func__, mPortId, mReqFrameCount, mFrameCount);
}
}
@@ -1833,8 +2035,10 @@
// issue (e.g. by copying).
buffers = mSharedBuffer->unsecurePointer();
if (buffers == NULL) {
- ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
- status = NO_INIT;
+ errorMessage = StringPrintf(
+ "%s(%d): Could not get buffer pointer", __func__, mPortId);
+ ALOGE("%s", errorMessage.c_str());
+ status = FAILED_TRANSACTION;
goto exit;
}
}
@@ -1932,17 +2136,44 @@
}
exit:
- if (status != NO_ERROR && callbackAdded) {
- // note: mOutput is always valid is callbackAdded is true
- AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+ if (status != NO_ERROR) {
+ if (callbackAdded) {
+ // note: mOutput is always valid is callbackAdded is true
+ AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+ }
+ ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+ reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
}
-
mStatus = status;
// sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
}
+void AudioTrack::reportError(status_t status, const char *event, const char *message) const
+{
+ if (status == NO_ERROR) return;
+ // We report error on the native side because some callers do not come
+ // from Java.
+ mediametrics::LogItem(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + "error")
+ .set(AMEDIAMETRICS_PROP_EVENT, event)
+ .set(AMEDIAMETRICS_PROP_ERROR, mediametrics::statusToErrorString(status))
+ .set(AMEDIAMETRICS_PROP_ERRORMESSAGE, message)
+ .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
+ .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
+ .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
+ .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
+ .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
+ .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
+ .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
+ .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mReqFrameCount) // requested frame count
+ // the following are NOT immutable
+ .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
+ .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
+ .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
+ .record();
+}
+
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
{
if (audioBuffer == NULL) {
@@ -2172,10 +2403,14 @@
{
// Currently the AudioTrack thread is not created if there are no callbacks.
// Would it ever make sense to run the thread, even without callbacks?
- // If so, then replace this by checks at each use for mCbf != NULL.
+ // If so, then replace this by checks at each use for mCallback != NULL.
LOG_ALWAYS_FATAL_IF(mCblk == NULL);
-
mLock.lock();
+ sp<IAudioTrackCallback> callback = mCallback.promote();
+ if (!callback) {
+ mCallback = nullptr;
+ return NS_NEVER;
+ }
if (mAwaitBoost) {
mAwaitBoost = false;
mLock.unlock();
@@ -2273,7 +2508,7 @@
sp<AudioTrackClientProxy> proxy = mProxy;
// Determine the number of new loop callback(s) that will be needed, while locked.
- int loopCountNotifications = 0;
+ uint32_t loopCountNotifications = 0;
uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
if (mLoopCount > 0) {
@@ -2295,7 +2530,7 @@
}
// These fields don't need to be cached, because they are assigned only by set():
- // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
+ // mTransfer, mCallback, mUserData, mFormat, mFrameSize, mFlags
// mFlags is also assigned by createTrack_l(), but not the bit we care about.
mLock.unlock();
@@ -2320,7 +2555,7 @@
if (status != DEAD_OBJECT) {
// for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
// instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
- mCbf(EVENT_STREAM_END, mUserData, NULL);
+ callback->onStreamEnd();
}
{
AutoMutex lock(mLock);
@@ -2343,28 +2578,27 @@
// perform callbacks while unlocked
if (newUnderrun) {
- mCbf(EVENT_UNDERRUN, mUserData, NULL);
+ callback->onUnderrun();
}
while (loopCountNotifications > 0) {
- mCbf(EVENT_LOOP_END, mUserData, NULL);
--loopCountNotifications;
+ callback->onLoopEnd(mLoopCount > 0 ? loopCountNotifications + mLoopCountNotified : -1);
}
if (flags & CBLK_BUFFER_END) {
- mCbf(EVENT_BUFFER_END, mUserData, NULL);
+ callback->onBufferEnd();
}
if (markerReached) {
- mCbf(EVENT_MARKER, mUserData, &markerPosition);
+ callback->onMarker(markerPosition.value());
}
while (newPosCount > 0) {
- size_t temp = newPosition.value(); // FIXME size_t != uint32_t
- mCbf(EVENT_NEW_POS, mUserData, &temp);
+ callback->onNewPos(newPosition.value());
newPosition += updatePeriod;
newPosCount--;
}
if (mObservedSequence != sequence) {
mObservedSequence = sequence;
- mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+ callback->onNewIAudioTrack();
// for offloaded tracks, just wait for the upper layers to recreate the track
if (isOffloadedOrDirect()) {
return NS_INACTIVE;
@@ -2502,10 +2736,9 @@
// written in the next write() call, since it's not passed through the callback
audioBuffer.size += nonContig;
}
- mCbf(mTransfer == TRANSFER_CALLBACK ? EVENT_MORE_DATA : EVENT_CAN_WRITE_MORE_DATA,
- mUserData, &audioBuffer);
- size_t writtenSize = audioBuffer.size;
-
+ const size_t writtenSize = (mTransfer == TRANSFER_CALLBACK)
+ ? callback->onMoreData(audioBuffer)
+ : callback->onCanWriteMoreData(audioBuffer);
// Validate on returned size
if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
@@ -2565,6 +2798,9 @@
return ns;
}
+ // releaseBuffer reads from audioBuffer.size
+ audioBuffer.size = writtenSize;
+
size_t releasedFrames = writtenSize / mFrameSize;
audioBuffer.frameCount = releasedFrames;
mRemainingFrames -= releasedFrames;
diff --git a/media/libaudioclient/AudioVolumeGroup.cpp b/media/libaudioclient/AudioVolumeGroup.cpp
index 361f7b8..ab95246 100644
--- a/media/libaudioclient/AudioVolumeGroup.cpp
+++ b/media/libaudioclient/AudioVolumeGroup.cpp
@@ -26,11 +26,10 @@
#include <media/AudioAttributes.h>
#include <media/PolicyAidlConversion.h>
-#define RETURN_STATUS_IF_ERROR(x) \
- { auto _tmp = (x); if (_tmp != OK) return _tmp; }
-
namespace android {
+using media::audio::common::AudioStreamType;
+
status_t AudioVolumeGroup::readFromParcel(const Parcel *parcel)
{
media::AudioVolumeGroup aidl;
@@ -55,7 +54,7 @@
legacy.getAudioAttributes(),
legacy2aidl_audio_attributes_t_AudioAttributesInternal));
aidl.streams = VALUE_OR_RETURN(
- convertContainer<std::vector<media::AudioStreamType>>(legacy.getStreamTypes(),
+ convertContainer<std::vector<AudioStreamType>>(legacy.getStreamTypes(),
legacy2aidl_audio_stream_type_t_AudioStreamType));
return aidl;
}
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index cae81f0..88e7396 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "IAudioFlinger"
//#define LOG_NDEBUG 0
+
#include <utils/Log.h>
#include <stdint.h>
@@ -30,6 +31,13 @@
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUuid;
#define MAX_ITEMS_PER_LIST 1024
@@ -40,12 +48,6 @@
std::move(_tmp.value()); \
})
-#define RETURN_STATUS_IF_ERROR(x) \
- { \
- auto _tmp = (x); \
- if (_tmp != OK) return _tmp; \
- }
-
#define RETURN_BINDER_IF_ERROR(x) \
{ \
auto _tmp = (x); \
@@ -55,7 +57,9 @@
ConversionResult<media::CreateTrackRequest> IAudioFlinger::CreateTrackInput::toAidl() const {
media::CreateTrackRequest aidl;
aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
- aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(config));
+ // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+ aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+ config, false /*isInput*/));
aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
aidl.sharedBuffer = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(sharedBuffer));
aidl.notificationsPerBuffer = VALUE_OR_RETURN(convertIntegral<int32_t>(notificationsPerBuffer));
@@ -74,7 +78,9 @@
IAudioFlinger::CreateTrackInput::fromAidl(const media::CreateTrackRequest& aidl) {
IAudioFlinger::CreateTrackInput legacy;
legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
- legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.config));
+ // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+ legacy.config = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfig_audio_config_t(aidl.config, false /*isInput*/));
legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
legacy.sharedBuffer = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.sharedBuffer));
legacy.notificationsPerBuffer = VALUE_OR_RETURN(
@@ -139,7 +145,8 @@
IAudioFlinger::CreateRecordInput::toAidl() const {
media::CreateRecordRequest aidl;
aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
- aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+ aidl.config = VALUE_OR_RETURN(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(config, true /*isInput*/));
aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(riid));
aidl.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -159,7 +166,8 @@
IAudioFlinger::CreateRecordInput legacy;
legacy.attr = VALUE_OR_RETURN(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
- legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
+ legacy.config = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config, true /*isInput*/));
legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
legacy.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -189,6 +197,8 @@
aidl.buffers = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(buffers));
aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(portId));
aidl.audioRecord = audioRecord;
+ aidl.serverConfig = VALUE_OR_RETURN(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(serverConfig, true /*isInput*/));
return aidl;
}
@@ -209,6 +219,8 @@
legacy.buffers = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.buffers));
legacy.portId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
legacy.audioRecord = aidl.audioRecord;
+ legacy.serverConfig = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.serverConfig, true /*isInput*/));
return legacy;
}
@@ -242,9 +254,9 @@
audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
auto result = [&]() -> ConversionResult<audio_format_t> {
int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
- media::audio::common::AudioFormat aidlRet;
+ AudioFormatDescription aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
- return aidl2legacy_AudioFormat_audio_format_t(aidlRet);
+ return aidl2legacy_AudioFormatDescription_audio_format_t(aidlRet);
}();
return result.value_or(AUDIO_FORMAT_INVALID);
}
@@ -309,14 +321,14 @@
status_t AudioFlingerClientAdapter::setStreamVolume(audio_stream_type_t stream, float value,
audio_io_handle_t output) {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
return statusTFromBinderStatus(mDelegate->setStreamVolume(streamAidl, value, outputAidl));
}
status_t AudioFlingerClientAdapter::setStreamMute(audio_stream_type_t stream, bool muted) {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
return statusTFromBinderStatus(mDelegate->setStreamMute(streamAidl, muted));
}
@@ -324,7 +336,7 @@
float AudioFlingerClientAdapter::streamVolume(audio_stream_type_t stream,
audio_io_handle_t output) const {
auto result = [&]() -> ConversionResult<float> {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
float aidlRet;
@@ -338,7 +350,7 @@
bool AudioFlingerClientAdapter::streamMute(audio_stream_type_t stream) const {
auto result = [&]() -> ConversionResult<bool> {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
bool aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -350,7 +362,7 @@
}
status_t AudioFlingerClientAdapter::setMode(audio_mode_t mode) {
- media::AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
+ AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
return statusTFromBinderStatus(mDelegate->setMode(modeAidl));
}
@@ -410,10 +422,10 @@
audio_channel_mask_t channelMask) const {
auto result = [&]() -> ConversionResult<size_t> {
int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
- media::audio::common::AudioFormat formatAidl = VALUE_OR_RETURN(
- legacy2aidl_audio_format_t_AudioFormat(format));
- int32_t channelMaskAidl = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(channelMask));
+ AudioFormatDescription formatAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(format));
+ AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(channelMask, true /*isInput*/));
int64_t aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(
mDelegate->getInputBufferSize(sampleRateAidl, formatAidl, channelMaskAidl,
@@ -469,7 +481,7 @@
}
status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
- media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
return statusTFromBinderStatus(mDelegate->invalidateStream(streamAidl));
}
@@ -568,9 +580,9 @@
const effect_uuid_t* pTypeUUID,
uint32_t preferredTypeFlag,
effect_descriptor_t* pDescriptor) const {
- media::AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
+ AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_uuid_t_AudioUuid(*pEffectUUID));
- media::AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
+ AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_uuid_t_AudioUuid(*pTypeUUID));
int32_t preferredTypeFlagAidl = VALUE_OR_RETURN_STATUS(
convertReinterpret<int32_t>(preferredTypeFlag));
@@ -715,6 +727,10 @@
return statusTFromBinderStatus(mDelegate->systemReady());
}
+status_t AudioFlingerClientAdapter::audioPolicyReady() {
+ return statusTFromBinderStatus(mDelegate->audioPolicyReady());
+}
+
size_t AudioFlingerClientAdapter::frameCountHAL(audio_io_handle_t ioHandle) const {
auto result = [&]() -> ConversionResult<size_t> {
int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
@@ -761,6 +777,32 @@
return statusTFromBinderStatus(mDelegate->updateSecondaryOutputs(trackSecondaryOutputInfos));
}
+status_t AudioFlingerClientAdapter::getMmapPolicyInfos(
+ AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+ return statusTFromBinderStatus(mDelegate->getMmapPolicyInfos(policyType, policyInfos));
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioMixerBurstCount() {
+ auto result = [&]() -> ConversionResult<int32_t> {
+ int32_t aidlRet;
+ RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->getAAudioMixerBurstCount(&aidlRet)));
+ return convertIntegral<int32_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+int32_t AudioFlingerClientAdapter::getAAudioHardwareBurstMinUsec() {
+ auto result = [&]() -> ConversionResult<int32_t> {
+ int32_t aidlRet;
+ RETURN_IF_ERROR(statusTFromBinderStatus(
+ mDelegate->getAAudioHardwareBurstMinUsec(&aidlRet)));
+ return convertIntegral<int32_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
// AudioFlingerServerAdapter
@@ -806,11 +848,11 @@
}
Status AudioFlingerServerAdapter::format(int32_t output,
- media::audio::common::AudioFormat* _aidl_return) {
+ AudioFormatDescription* _aidl_return) {
audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(output));
*_aidl_return = VALUE_OR_RETURN_BINDER(
- legacy2aidl_audio_format_t_AudioFormat(mDelegate->format(outputLegacy)));
+ legacy2aidl_audio_format_t_AudioFormatDescription(mDelegate->format(outputLegacy)));
return Status::ok();
}
@@ -856,7 +898,7 @@
return Status::fromStatusT(mDelegate->getMasterBalance(_aidl_return));
}
-Status AudioFlingerServerAdapter::setStreamVolume(media::AudioStreamType stream, float value,
+Status AudioFlingerServerAdapter::setStreamVolume(AudioStreamType stream, float value,
int32_t output) {
audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -865,13 +907,13 @@
return Status::fromStatusT(mDelegate->setStreamVolume(streamLegacy, value, outputLegacy));
}
-Status AudioFlingerServerAdapter::setStreamMute(media::AudioStreamType stream, bool muted) {
+Status AudioFlingerServerAdapter::setStreamMute(AudioStreamType stream, bool muted) {
audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
return Status::fromStatusT(mDelegate->setStreamMute(streamLegacy, muted));
}
-Status AudioFlingerServerAdapter::streamVolume(media::AudioStreamType stream, int32_t output,
+Status AudioFlingerServerAdapter::streamVolume(AudioStreamType stream, int32_t output,
float* _aidl_return) {
audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
@@ -881,14 +923,14 @@
return Status::ok();
}
-Status AudioFlingerServerAdapter::streamMute(media::AudioStreamType stream, bool* _aidl_return) {
+Status AudioFlingerServerAdapter::streamMute(AudioStreamType stream, bool* _aidl_return) {
audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
*_aidl_return = mDelegate->streamMute(streamLegacy);
return Status::ok();
}
-Status AudioFlingerServerAdapter::setMode(media::AudioMode mode) {
+Status AudioFlingerServerAdapter::setMode(AudioMode mode) {
audio_mode_t modeLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioMode_audio_mode_t(mode));
return Status::fromStatusT(mDelegate->setMode(modeLegacy));
}
@@ -934,13 +976,14 @@
}
Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
- media::audio::common::AudioFormat format,
- int32_t channelMask, int64_t* _aidl_return) {
+ const AudioFormatDescription& format,
+ const AudioChannelLayout& channelMask,
+ int64_t* _aidl_return) {
uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
- aidl2legacy_AudioFormat_audio_format_t(format));
+ aidl2legacy_AudioFormatDescription_audio_format_t(format));
audio_channel_mask_t channelMaskLegacy = VALUE_OR_RETURN_BINDER(
- aidl2legacy_int32_t_audio_channel_mask_t(channelMask));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(channelMask, true /*isInput*/));
size_t size = mDelegate->getInputBufferSize(sampleRateLegacy, formatLegacy, channelMaskLegacy);
*_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(size));
return Status::ok();
@@ -991,7 +1034,7 @@
return Status::fromStatusT(mDelegate->closeInput(inputLegacy));
}
-Status AudioFlingerServerAdapter::invalidateStream(media::AudioStreamType stream) {
+Status AudioFlingerServerAdapter::invalidateStream(AudioStreamType stream) {
audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
return Status::fromStatusT(mDelegate->invalidateStream(streamLegacy));
@@ -1066,8 +1109,8 @@
return Status::ok();
}
-Status AudioFlingerServerAdapter::getEffectDescriptor(const media::AudioUuid& effectUUID,
- const media::AudioUuid& typeUUID,
+Status AudioFlingerServerAdapter::getEffectDescriptor(const AudioUuid& effectUUID,
+ const AudioUuid& typeUUID,
int32_t preferredTypeFlag,
media::EffectDescriptor* _aidl_return) {
effect_uuid_t effectUuidLegacy = VALUE_OR_RETURN_BINDER(
@@ -1189,6 +1232,11 @@
return Status::fromStatusT(mDelegate->systemReady());
}
+Status AudioFlingerServerAdapter::audioPolicyReady() {
+ mDelegate->audioPolicyReady();
+ return Status::ok();
+}
+
Status AudioFlingerServerAdapter::frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) {
audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
@@ -1227,4 +1275,21 @@
return Status::fromStatusT(mDelegate->updateSecondaryOutputs(trackSecondaryOutputs));
}
+Status AudioFlingerServerAdapter::getMmapPolicyInfos(
+ AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *_aidl_return) {
+ return Status::fromStatusT(mDelegate->getMmapPolicyInfos(policyType, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::getAAudioMixerBurstCount(int32_t* _aidl_return) {
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ convertIntegral<int32_t>(mDelegate->getAAudioMixerBurstCount()));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) {
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ convertIntegral<int32_t>(mDelegate->getAAudioHardwareBurstMinUsec()));
+ return Status::ok();
+}
+
} // namespace android
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 25fdb49..fd94568 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -25,6 +25,7 @@
namespace android {
using base::unexpected;
+using media::audio::common::AudioDeviceAddress;
ConversionResult<volume_group_t>
aidl2legacy_int32_t_volume_group_t(int32_t aidl) {
@@ -152,7 +153,7 @@
case media::AudioMixMatchCriterionValue::source:
legacy.mSource = VALUE_OR_RETURN(
- aidl2legacy_AudioSourceType_audio_source_t(UNION_GET(aidl, source).value()));
+ aidl2legacy_AudioSource_audio_source_t(UNION_GET(aidl, source).value()));
*rule |= RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET;
return legacy;
@@ -184,7 +185,7 @@
case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
UNION_SET(aidl, source,
- VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.mSource)));
+ VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.mSource)));
break;
case RULE_MATCH_UID:
@@ -232,11 +233,14 @@
std::back_inserter(legacy.mCriteria),
aidl2legacy_AudioMixMatchCriterion));
legacy.mMixType = VALUE_OR_RETURN(aidl2legacy_AudioMixType_uint32_t(aidl.mixType));
- legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.format));
+ // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+ // an output mask is expected here.
+ legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(
+ aidl.format, false /*isInput*/));
legacy.mRouteFlags = VALUE_OR_RETURN(
aidl2legacy_AudioMixRouteFlag_uint32_t_mask(aidl.routeFlags));
- legacy.mDeviceType = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
- legacy.mDeviceAddress = VALUE_OR_RETURN(aidl2legacy_string_view_String8(aidl.device.address));
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+ aidl.device, &legacy.mDeviceType, &legacy.mDeviceAddress));
legacy.mCbFlags = VALUE_OR_RETURN(aidl2legacy_AudioMixCallbackFlag_uint32_t_mask(aidl.cbFlags));
legacy.mAllowPrivilegedMediaPlaybackCapture = aidl.allowPrivilegedMediaPlaybackCapture;
legacy.mVoiceCommunicationCaptureAllowed = aidl.voiceCommunicationCaptureAllowed;
@@ -251,11 +255,15 @@
legacy.mCriteria,
legacy2aidl_AudioMixMatchCriterion));
aidl.mixType = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixType(legacy.mMixType));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(legacy.mFormat));
+ // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+ // an output mask is expected here.
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+ legacy.mFormat, false /*isInput*/));
aidl.routeFlags = VALUE_OR_RETURN(
legacy2aidl_uint32_t_AudioMixRouteFlag_mask(legacy.mRouteFlags));
- aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mDeviceType));
- aidl.device.address = VALUE_OR_RETURN(legacy2aidl_String8_string(legacy.mDeviceAddress));
+ aidl.device = VALUE_OR_RETURN(
+ legacy2aidl_audio_device_AudioDevice(
+ legacy.mDeviceType, legacy.mDeviceAddress));
aidl.cbFlags = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixCallbackFlag_mask(legacy.mCbFlags));
aidl.allowPrivilegedMediaPlaybackCapture = legacy.mAllowPrivilegedMediaPlaybackCapture;
aidl.voiceCommunicationCaptureAllowed = legacy.mVoiceCommunicationCaptureAllowed;
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
new file mode 100644
index 0000000..d8c18c0
--- /dev/null
+++ b/media/libaudioclient/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "audio_aidl_conversion_tests"
+ }
+ ]
+}
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index e5e8496..cd3eacb 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -977,7 +977,7 @@
// Method: ToneGenerator::ToneGenerator()
//
// Description: Constructor. Initializes the tone sequencer, intantiates required sine wave
-// generators, instantiates output audio track.
+// generators, does not initialize output audio track.
//
// Input:
// streamType: Type of stream used for tone playback
@@ -1041,6 +1041,23 @@
mRegion = CEPT;
}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Method: ToneGenerator::onFirstRef()
+//
+// Description: Called upon first RefBase reference. Initializes audio track
+// with weak pointer to self as the registered callback.
+// Input:
+// none
+//
+// Output:
+// none
+//
+////////////////////////////////////////////////////////////////////////////////
+
+void ToneGenerator::onFirstRef() {
if (initAudioTrack()) {
ALOGV("ToneGenerator INIT OK, time: %d", (unsigned int)(systemTime()/1000000));
} else {
@@ -1048,9 +1065,6 @@
}
}
-
-
-
////////////////////////////////////////////////////////////////////////////////
//
// Method: ToneGenerator::~ToneGenerator()
@@ -1282,8 +1296,7 @@
AUDIO_CHANNEL_OUT_MONO,
frameCount,
AUDIO_OUTPUT_FLAG_FAST,
- audioCallback,
- this, // user
+ wp<AudioTrack::IAudioTrackCallback>::fromExisting(this),
0, // notificationFrames
0, // sharedBuffer
mThreadCanCallJava,
@@ -1308,50 +1321,47 @@
////////////////////////////////////////////////////////////////////////////////
//
-// Method: ToneGenerator::audioCallback()
+// Method: ToneGenerator::onMoreData()
//
// Description: AudioTrack callback implementation. Generates a block of
// PCM samples
// and manages tone generator sequencer: tones pulses, tone duration...
//
// Input:
-// user reference (pointer to our ToneGenerator)
-// info audio buffer descriptor
+// buffer An buffer object containing a pointer which we will fill with
+// buffer.size bytes.
//
// Output:
-// returned value: always true.
+// The number of bytes we successfully wrote.
//
////////////////////////////////////////////////////////////////////////////////
-void ToneGenerator::audioCallback(int event, void* user, void *info) {
+size_t ToneGenerator::onMoreData(const AudioTrack::Buffer& buffer) {
- if (event != AudioTrack::EVENT_MORE_DATA) return;
-
- AudioTrack::Buffer *buffer = static_cast<AudioTrack::Buffer *>(info);
- ToneGenerator *lpToneGen = static_cast<ToneGenerator *>(user);
- int16_t *lpOut = buffer->i16;
- unsigned int lNumSmp = buffer->size/sizeof(int16_t);
- const ToneDescriptor *lpToneDesc = lpToneGen->mpToneDesc;
-
- if (buffer->size == 0) return;
-
+ int16_t *lpOut = buffer.i16;
+ uint32_t lNumSmp = (buffer.size / sizeof(int16_t) < UINT32_MAX) ?
+ buffer.size / sizeof(int16_t) : UINT32_MAX;
+ if (buffer.size == 0) return 0;
+ // We will write to the entire buffer unless we are stopped, then we return
+ // 0 at loop end
+ size_t bytesWritten = lNumSmp * sizeof(int16_t);
// Clear output buffer: WaveGenerator accumulates into lpOut buffer
- memset(lpOut, 0, buffer->size);
+ memset(lpOut, 0, buffer.size);
while (lNumSmp) {
- unsigned int lReqSmp = lNumSmp < lpToneGen->mProcessSize*2 ? lNumSmp : lpToneGen->mProcessSize;
+ unsigned int lReqSmp = lNumSmp < mProcessSize*2 ? lNumSmp : mProcessSize;
unsigned int lGenSmp;
unsigned int lWaveCmd = WaveGenerator::WAVEGEN_CONT;
bool lSignal = false;
- lpToneGen->mLock.lock();
+ mLock.lock();
// Update pcm frame count and end time (current time at the end of this process)
- lpToneGen->mTotalSmp += lReqSmp;
+ mTotalSmp += lReqSmp;
// Update tone gen state machine and select wave gen command
- switch (lpToneGen->mState) {
+ switch (mState) {
case TONE_PLAYING:
lWaveCmd = WaveGenerator::WAVEGEN_CONT;
break;
@@ -1365,7 +1375,7 @@
ALOGV("Stop/restart Cbk");
lWaveCmd = WaveGenerator::WAVEGEN_STOP;
- lpToneGen->mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
+ mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
break;
case TONE_STOPPED:
ALOGV("Stopped Cbk");
@@ -1376,20 +1386,20 @@
}
// Exit if tone sequence is over
- if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0 ||
- lpToneGen->mTotalSmp > lpToneGen->mMaxSmp) {
- if (lpToneGen->mState == TONE_PLAYING) {
- lpToneGen->mState = TONE_STOPPING;
+ if (mpToneDesc->segments[mCurSegment].duration == 0 ||
+ mTotalSmp > mMaxSmp) {
+ if (mState == TONE_PLAYING) {
+ mState = TONE_STOPPING;
}
- if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) {
+ if (mpToneDesc->segments[mCurSegment].duration == 0) {
goto audioCallback_EndLoop;
}
// fade out before stopping if maximum duration reached
lWaveCmd = WaveGenerator::WAVEGEN_STOP;
- lpToneGen->mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
+ mNextSegSmp = TONEGEN_INF; // forced to skip state machine management below
}
- if (lpToneGen->mTotalSmp > lpToneGen->mNextSegSmp) {
+ if (mTotalSmp > mNextSegSmp) {
// Time to go to next sequence segment
ALOGV("End Segment, time: %d", (unsigned int)(systemTime()/1000000));
@@ -1397,61 +1407,61 @@
lGenSmp = lReqSmp;
// If segment, ON -> OFF transition : ramp volume down
- if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
+ if (mpToneDesc->segments[mCurSegment].waveFreq[0] != 0) {
lWaveCmd = WaveGenerator::WAVEGEN_STOP;
unsigned int lFreqIdx = 0;
- uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+ uint16_t lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[lFreqIdx];
while (lFrequency != 0) {
- WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
+ WaveGenerator *lpWaveGen = mWaveGens.valueFor(lFrequency);
lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd);
- lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx];
+ lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[++lFreqIdx];
}
ALOGV("ON->OFF, lGenSmp: %d, lReqSmp: %d", lGenSmp, lReqSmp);
}
// check if we need to loop and loop for the reqd times
- if (lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
- if (lpToneGen->mLoopCounter < lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
+ if (mpToneDesc->segments[mCurSegment].loopCnt) {
+ if (mLoopCounter < mpToneDesc->segments[mCurSegment].loopCnt) {
ALOGV ("in if loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
- lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
- lpToneGen->mLoopCounter,
- lpToneGen->mCurSegment);
- lpToneGen->mCurSegment = lpToneDesc->segments[lpToneGen->mCurSegment].loopIndx;
- ++lpToneGen->mLoopCounter;
+ mpToneDesc->segments[mCurSegment].loopCnt,
+ mLoopCounter,
+ mCurSegment);
+ mCurSegment = mpToneDesc->segments[mCurSegment].loopIndx;
+ ++mLoopCounter;
} else {
// completed loop. go to next segment
- lpToneGen->mLoopCounter = 0;
- lpToneGen->mCurSegment++;
+ mLoopCounter = 0;
+ mCurSegment++;
ALOGV ("in else loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
- lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
- lpToneGen->mLoopCounter,
- lpToneGen->mCurSegment);
+ mpToneDesc->segments[mCurSegment].loopCnt,
+ mLoopCounter,
+ mCurSegment);
}
} else {
- lpToneGen->mCurSegment++;
+ mCurSegment++;
ALOGV ("Goto next seg loopCnt(%d) loopctr(%d), CurSeg(%d)",
- lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
- lpToneGen->mLoopCounter,
- lpToneGen->mCurSegment);
+ mpToneDesc->segments[mCurSegment].loopCnt,
+ mLoopCounter,
+ mCurSegment);
}
// Handle loop if last segment reached
- if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) {
- ALOGV("Last Seg: %d", lpToneGen->mCurSegment);
+ if (mpToneDesc->segments[mCurSegment].duration == 0) {
+ ALOGV("Last Seg: %d", mCurSegment);
// Pre increment loop count and restart if total count not reached. Stop sequence otherwise
- if (++lpToneGen->mCurCount <= lpToneDesc->repeatCnt) {
- ALOGV("Repeating Count: %d", lpToneGen->mCurCount);
+ if (++mCurCount <= mpToneDesc->repeatCnt) {
+ ALOGV("Repeating Count: %d", mCurCount);
- lpToneGen->mCurSegment = lpToneDesc->repeatSegment;
- if (lpToneDesc->segments[lpToneDesc->repeatSegment].waveFreq[0] != 0) {
+ mCurSegment = mpToneDesc->repeatSegment;
+ if (mpToneDesc->segments[mpToneDesc->repeatSegment].waveFreq[0] != 0) {
lWaveCmd = WaveGenerator::WAVEGEN_START;
}
- ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
- ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+ ALOGV("New segment %d, Next Time: %lld", mCurSegment,
+ ((long long)(mNextSegSmp)*1000)/mSamplingRate);
} else {
@@ -1459,10 +1469,10 @@
ALOGV("End repeat, time: %d", (unsigned int)(systemTime()/1000000));
}
} else {
- ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
- ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+ ALOGV("New segment %d, Next Time: %lld", mCurSegment,
+ ((long long)(mNextSegSmp)*1000)/mSamplingRate);
- if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
+ if (mpToneDesc->segments[mCurSegment].waveFreq[0] != 0) {
// If next segment is not silent, OFF -> ON transition : reset wave generator
lWaveCmd = WaveGenerator::WAVEGEN_START;
@@ -1472,13 +1482,13 @@
}
}
- // Update next segment transition position. No harm to do it also for last segment as lpToneGen->mNextSegSmp won't be used any more
- lpToneGen->mNextSegSmp
- += (lpToneDesc->segments[lpToneGen->mCurSegment].duration * lpToneGen->mSamplingRate) / 1000;
+ // Update next segment transition position. No harm to do it also for last segment as
+ // mNextSegSmp won't be used any more
+ mNextSegSmp += (mpToneDesc->segments[mCurSegment].duration * mSamplingRate) / 1000;
} else {
// Inside a segment keep tone ON or OFF
- if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] == 0) {
+ if (mpToneDesc->segments[mCurSegment].waveFreq[0] == 0) {
lGenSmp = 0; // If odd segment, tone is currently OFF
} else {
lGenSmp = lReqSmp; // If event segment, tone is currently ON
@@ -1488,12 +1498,12 @@
if (lGenSmp) {
// If samples must be generated, call all active wave generators and acumulate waves in lpOut
unsigned int lFreqIdx = 0;
- uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+ uint16_t lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[lFreqIdx];
while (lFrequency != 0) {
- WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
+ WaveGenerator *lpWaveGen = mWaveGens.valueFor(lFrequency);
lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd);
- lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx];
+ lFrequency = mpToneDesc->segments[mCurSegment].waveFreq[++lFreqIdx];
}
}
@@ -1501,21 +1511,19 @@
lpOut += lReqSmp;
audioCallback_EndLoop:
-
- switch (lpToneGen->mState) {
+ switch (mState) {
case TONE_RESTARTING:
ALOGV("Cbk restarting track");
- if (lpToneGen->prepareWave()) {
- lpToneGen->mState = TONE_STARTING;
- if (clock_gettime(CLOCK_MONOTONIC, &lpToneGen->mStartTime) != 0) {
- lpToneGen->mStartTime.tv_sec = 0;
+ if (prepareWave()) {
+ mState = TONE_STARTING;
+ if (clock_gettime(CLOCK_MONOTONIC, &mStartTime) != 0) {
+ mStartTime.tv_sec = 0;
}
- // must reload lpToneDesc as prepareWave() may change mpToneDesc
- lpToneDesc = lpToneGen->mpToneDesc;
+ // must reload mpToneDesc as prepareWave() may change mpToneDesc
} else {
ALOGW("Cbk restarting prepareWave() failed");
- lpToneGen->mState = TONE_IDLE;
- lpToneGen->mpAudioTrack->stop();
+ mState = TONE_IDLE;
+ mpAudioTrack->stop();
// Force loop exit
lNumSmp = 0;
}
@@ -1523,22 +1531,22 @@
break;
case TONE_STOPPING:
ALOGV("Cbk Stopping");
- lpToneGen->mState = TONE_STOPPED;
+ mState = TONE_STOPPED;
// Force loop exit
lNumSmp = 0;
break;
case TONE_STOPPED:
- lpToneGen->mState = TONE_INIT;
+ mState = TONE_INIT;
ALOGV("Cbk Stopped track");
- lpToneGen->mpAudioTrack->stop();
+ mpAudioTrack->stop();
// Force loop exit
lNumSmp = 0;
- buffer->size = 0;
+ bytesWritten = 0;
lSignal = true;
break;
case TONE_STARTING:
ALOGV("Cbk starting track");
- lpToneGen->mState = TONE_PLAYING;
+ mState = TONE_PLAYING;
lSignal = true;
break;
case TONE_PLAYING:
@@ -1546,14 +1554,15 @@
default:
// Force loop exit
lNumSmp = 0;
- buffer->size = 0;
+ bytesWritten = 0;
break;
}
if (lSignal)
- lpToneGen->mWaitCbkCond.broadcast();
- lpToneGen->mLock.unlock();
+ mWaitCbkCond.broadcast();
+ mLock.unlock();
}
+ return bytesWritten;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
index 04a02c7..335866f 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesEx.aidl
@@ -17,7 +17,7 @@
package android.media;
import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
/**
* This is the equivalent of the android::AudioAttributes C++ type.
diff --git a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
index 699df0a..2e74206 100644
--- a/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioAttributesInternal.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioContentType;
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioContentType;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
/**
* The "Internal" suffix of this type name is to disambiguate it from the
@@ -28,7 +28,7 @@
parcelable AudioAttributesInternal {
AudioContentType contentType;
AudioUsage usage;
- AudioSourceType source;
+ AudioSource source;
// Bitmask, indexed by AudioFlag.
int flags;
@utf8InCpp String tags; /* UTF8 */
diff --git a/media/libaudioclient/aidl/android/media/AudioConfig.aidl b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
deleted file mode 100644
index 8dc97d3..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfig.aidl
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioOffloadInfo;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioConfig {
- int sampleRate;
- /**
- * Interpreted as audio_channel_mask_t.
- * TODO(ytai): Create a designated type.
- */
- int channelMask;
- AudioFormat format;
- AudioOffloadInfo offloadInfo;
- long frameCount;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
deleted file mode 100644
index 8353c0d..0000000
--- a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioConfigBase {
- int sampleRate;
- /** Interpreted as audio_channel_mask_t. */
- int channelMask;
- AudioFormat format;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioContentType.aidl b/media/libaudioclient/aidl/android/media/AudioContentType.aidl
deleted file mode 100644
index f734fba..0000000
--- a/media/libaudioclient/aidl/android/media/AudioContentType.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-@Backing(type="int")
-enum AudioContentType {
- UNKNOWN = 0,
- SPEECH = 1,
- MUSIC = 2,
- MOVIE = 3,
- SONIFICATION = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioDevice.aidl b/media/libaudioclient/aidl/android/media/AudioDevice.aidl
deleted file mode 100644
index b200697..0000000
--- a/media/libaudioclient/aidl/android/media/AudioDevice.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioDevice {
- /** Interpreted as audio_devices_t. */
- int type;
- @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl b/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
deleted file mode 100644
index b03adfe..0000000
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMetadataType.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationMetadataType {
- NONE = 0,
- FRAMEWORK_TUNER = 1,
- DVB_AD_DESCRIPTOR = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl b/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
deleted file mode 100644
index 9e04e82..0000000
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationMode.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioEncapsulationMode {
- NONE = 0,
- ELEMENTARY_STREAM = 1,
- HANDLE = 2,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 58b493b..acf4e6d 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -34,4 +34,7 @@
MUTE_HAPTIC = 11,
NO_SYSTEM_CAPTURE = 12,
CAPTURE_PRIVATE = 13,
+ CONTENT_SPATIALIZED = 14,
+ NEVER_SPATIALIZE = 15,
+ CALL_REDIRECTION = 16,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioGain.aidl b/media/libaudioclient/aidl/android/media/AudioGain.aidl
deleted file mode 100644
index 048b295..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGain.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioGain {
- int index;
- boolean useInChannelMask;
- boolean useForVolume;
- /** Bitmask, indexed by AudioGainMode. */
- int mode;
- /** Interpreted as audio_channel_mask_t. */
- int channelMask;
- int minValue;
- int maxValue;
- int defaultValue;
- int stepValue;
- int minRampMs;
- int maxRampMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
deleted file mode 100644
index b93c2dc..0000000
--- a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioGainConfig {
- /** Index of the corresponding audio_gain in the audio_port gains[] table. */
- int index;
-
- /** Mode requested for this command. Bitfield indexed by AudioGainMode. */
- int mode;
-
- /**
- * Channels which gain value follows. N/A in joint mode.
- * Interpreted as audio_channel_mask_t.
- */
- int channelMask;
-
- /**
- * Gain values in millibels.
- * For each channel ordered from LSb to MSb in channel mask. The number of values is 1 in joint
- * mode, otherwise equals the number of bits implied by channelMask.
- */
- int[] values;
-
- /** Ramp duration in ms. */
- int rampDurationMs;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
similarity index 81%
rename from media/libaudioclient/aidl/android/media/AudioGainMode.aidl
rename to media/libaudioclient/aidl/android/media/AudioGainSys.aidl
index e1b9f0b..426f4ed 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGainSys.aidl
@@ -13,14 +13,15 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package android.media;
/**
+ * Provides additional runtime information for AudioGain, used by the framework.
+ *
* {@hide}
*/
-@Backing(type="int")
-enum AudioGainMode {
- JOINT = 0,
- CHANNELS = 1,
- RAMP = 2,
+parcelable AudioGainSys {
+ int index;
+ boolean isInput;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
deleted file mode 100644
index bfc0eb0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioInputFlags {
- FAST = 0,
- HW_HOTWORD = 1,
- RAW = 2,
- SYNC = 3,
- MMAP_NOIRQ = 4,
- VOIP_TX = 5,
- HW_AV_SYNC = 6,
- DIRECT = 7,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index 876ef9b..b01f902 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -17,7 +17,8 @@
package android.media;
import android.media.AudioPatch;
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
@@ -26,10 +27,10 @@
/** Interpreted as audio_io_handle_t. */
int ioHandle;
AudioPatch patch;
+ boolean isInput;
int samplingRate;
- AudioFormat format;
- /** Interpreted as audio_channel_mask_t. */
- int channelMask;
+ AudioFormatDescription format;
+ AudioChannelLayout channelMask;
long frameCount;
long frameCountHAL;
/** Only valid for output. */
diff --git a/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl b/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl
deleted file mode 100644
index f9b25bf..0000000
--- a/media/libaudioclient/aidl/android/media/AudioIoFlags.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-union AudioIoFlags {
- /** Bitmask indexed by AudioInputFlags. */
- int input;
- /** Bitmask indexed by AudioOutputFlags. */
- int output;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioMix.aidl b/media/libaudioclient/aidl/android/media/AudioMix.aidl
index 7473372..88b0450 100644
--- a/media/libaudioclient/aidl/android/media/AudioMix.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMix.aidl
@@ -16,12 +16,12 @@
package android.media;
-import android.media.AudioConfig;
-import android.media.AudioDevice;
import android.media.AudioMixCallbackFlag;
import android.media.AudioMixMatchCriterion;
import android.media.AudioMixRouteFlag;
import android.media.AudioMixType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl b/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
deleted file mode 100644
index d70b364..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMixLatencyClass {
- LOW = 0,
- NORMAL = 1,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
index e26a9e1..921a93a 100644
--- a/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMixMatchCriterionValue.aidl
@@ -16,15 +16,15 @@
package android.media;
-import android.media.AudioSourceType;
-import android.media.AudioUsage;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioUsage;
/**
* {@hide}
*/
union AudioMixMatchCriterionValue {
AudioUsage usage = AudioUsage.UNKNOWN;
- AudioSourceType source;
+ AudioSource source;
/** Interpreted as uid_t. */
int uid;
int userId;
diff --git a/media/libaudioclient/aidl/android/media/AudioMode.aidl b/media/libaudioclient/aidl/android/media/AudioMode.aidl
deleted file mode 100644
index 7067dd3..0000000
--- a/media/libaudioclient/aidl/android/media/AudioMode.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioMode {
- INVALID = -2,
- CURRENT = -1,
- NORMAL = 0,
- RINGTONE = 1,
- IN_CALL = 2,
- IN_COMMUNICATION = 3,
- CALL_SCREEN = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl b/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
deleted file mode 100644
index c86b3f0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioConfigBase;
-import android.media.AudioEncapsulationMode;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioOffloadInfo {
- /** Version of the info structure. Interpreted as a uint16_t version constant. */
- int version;
- /** Audio configuration. */
- AudioConfigBase config;
- /** Stream type. */
- AudioStreamType streamType;
- /** Bit rate in bits per second. */
- int bitRate;
- /** Duration in microseconds, -1 if unknown. */
- long durationUs;
- /** true if stream is tied to a video stream. */
- boolean hasVideo;
- /** true if streaming, false if local playback. */
- boolean isStreaming;
- int bitWidth;
- /** Offload fragment size. */
- int offloadBufferSize;
- AudioUsage usage;
- AudioEncapsulationMode encapsulationMode;
- /** Content id from tuner HAL (0 if none). */
- int contentId;
- /** Sync id from tuner HAL (0 if none). */
- int syncId;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
deleted file mode 100644
index cebd8f0..0000000
--- a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioOutputFlags {
- DIRECT = 0,
- PRIMARY = 1,
- FAST = 2,
- DEEP_BUFFER = 3,
- COMPRESS_OFFLOAD = 4,
- NON_BLOCKING = 5,
- HW_AV_SYNC = 6,
- TTS = 7,
- RAW = 8,
- SYNC = 9,
- IEC958_NONAUDIO = 10,
- DIRECT_PCM = 11,
- MMAP_NOIRQ = 12,
- VOIP_RX = 13,
- INCALL_MUSIC = 14,
- GAPLESS_OFFLOAD = 15,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPort.aidl b/media/libaudioclient/aidl/android/media/AudioPort.aidl
index bf0e5b7..ff177c0 100644
--- a/media/libaudioclient/aidl/android/media/AudioPort.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPort.aidl
@@ -16,35 +16,13 @@
package android.media;
-import android.media.AudioGain;
-import android.media.AudioPortConfig;
-import android.media.AudioPortExt;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.AudioProfile;
-import android.media.ExtraAudioDescriptor;
+import android.media.AudioPortSys;
+import android.media.audio.common.AudioPort;
/**
* {@hide}
*/
parcelable AudioPort {
- /** Port unique ID. Interpreted as audio_port_handle_t. */
- int id;
- /** Sink or source. */
- AudioPortRole role;
- /** Device, mix ... */
- AudioPortType type;
- @utf8InCpp String name;
- /** AudioProfiles supported by this port (format, Rates, Channels). */
- AudioProfile[] profiles;
- /**
- * ExtraAudioDescriptors supported by this port. The format is not unrecognized to the
- * platform. The audio capability is described by a hardware descriptor.
- */
- ExtraAudioDescriptor[] extraAudioDescriptors;
- /** Gain controllers. */
- AudioGain[] gains;
- /** Current audio port configuration. */
- AudioPortConfig activeConfig;
- AudioPortExt ext;
+ AudioPort hal;
+ AudioPortSys sys;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
index 2dd30a4..3a4ca31 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
@@ -16,44 +16,13 @@
package android.media;
-import android.media.AudioGainConfig;
-import android.media.AudioIoFlags;
-import android.media.AudioPortConfigExt;
-import android.media.AudioPortConfigType;
-import android.media.AudioPortRole;
-import android.media.AudioPortType;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioPortConfigSys;
+import android.media.audio.common.AudioPortConfig;
/**
* {@hide}
*/
parcelable AudioPortConfig {
- /**
- * Port unique ID.
- * Interpreted as audio_port_handle_t.
- */
- int id;
- /** Sink or source. */
- AudioPortRole role;
- /** Device, mix ... */
- AudioPortType type;
- /** Bitmask, indexed by AudioPortConfigType. */
- int configMask;
- /** Sampling rate in Hz. */
- int sampleRate;
- /**
- * Channel mask, if applicable.
- * Interpreted as audio_channel_mask_t.
- * TODO: bitmask?
- */
- int channelMask;
- /**
- * Format, if applicable.
- */
- AudioFormat format;
- /** Gain to apply, if applicable. */
- AudioGainConfig gain;
- /** Framework only: HW_AV_SYNC, DIRECT, ... */
- AudioIoFlags flags;
- AudioPortConfigExt ext;
+ AudioPortConfig hal;
+ AudioPortConfigSys sys;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
deleted file mode 100644
index a99aa9b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigDeviceExt {
- /**
- * Module the device is attached to.
- * Interpreted as audio_module_handle_t.
- */
- int hwModule;
- /**
- * Device type (e.g AUDIO_DEVICE_OUT_SPEAKER).
- * Interpreted as audio_devices_t.
- * TODO: Convert to a standalone AIDL representation.
- */
- int type;
- /** Device address. "" if N/A. */
- @utf8InCpp String address;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
deleted file mode 100644
index 5d635b6..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigDeviceExt;
-import android.media.AudioPortConfigMixExt;
-import android.media.AudioPortConfigSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortConfigExt {
- /**
- * This represents an empty union. Value is ignored.
- * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
- * established.
- */
- boolean unspecified;
- /** Device specific info. */
- AudioPortConfigDeviceExt device;
- /** Mix specific info. */
- AudioPortConfigMixExt mix;
- /** Session specific info. */
- AudioPortConfigSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
deleted file mode 100644
index d3226f2..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExt.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortConfigMixExtUseCase;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigMixExt {
- /**
- * Module the stream is attached to.
- * Interpreted as audio_module_handle_t.
- */
- int hwModule;
- /**
- * I/O handle of the input/output stream.
- * Interpreted as audio_io_handle_t.
- */
- int handle;
- AudioPortConfigMixExtUseCase usecase;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
deleted file mode 100644
index c61f044..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-
-/**
- * {@hide}
- */
-union AudioPortConfigMixExtUseCase {
- /**
- * This to be set if the containing config has the AudioPortRole::NONE role.
- * This represents an empty value (value is ignored).
- * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
- * established.
- */
- boolean unspecified;
- /** This to be set if the containing config has the AudioPortRole::SOURCE role. */
- AudioStreamType stream;
- /** This to be set if the containing config has the AudioPortRole::SINK role. */
- AudioSourceType source;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
deleted file mode 100644
index a2cbf62..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigSessionExt.aidl
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortConfigSessionExt {
- int session;
-}
diff --git a/media/libaudioclient/aidl/android/media/Int.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
similarity index 71%
rename from media/libaudioclient/aidl/android/media/Int.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
index 24f4d62..8692848 100644
--- a/media/libaudioclient/aidl/android/media/Int.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigSys.aidl
@@ -16,12 +16,17 @@
package android.media;
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+
/**
- * This is a simple wrapper around an 'int', putting it in a parcelable, so it can be used as an
- * inout parameter, be made @nullable, etc.
- *
* {@hide}
*/
-parcelable Int {
- int value;
+parcelable AudioPortConfigSys {
+ /** Sink or source. */
+ AudioPortRole role;
+ /** Device, mix ... */
+ AudioPortType type;
+ AudioPortExtSys ext;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
deleted file mode 100644
index 6e22b8d..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigType.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioPortConfigType {
- SAMPLE_RATE = 0,
- CHANNEL_MASK = 1,
- FORMAT = 2,
- GAIN = 3,
- FLAGS = 4,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
similarity index 85%
rename from media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
index b758f23..0f5a9b6 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,15 +16,12 @@
package android.media;
-import android.media.AudioDevice;
-
/**
* {@hide}
*/
-parcelable AudioPortDeviceExt {
+parcelable AudioPortDeviceExtSys {
/** Module the device is attached to. Interpreted as audio_module_handle_t. */
int hwModule;
- AudioDevice device;
/** Bitmask, indexed by AudioEncapsulationMode. */
int encapsulationModes;
/** Bitmask, indexed by AudioEncapsulationMetadataType. */
diff --git a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
deleted file mode 100644
index 453784b..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioPortDeviceExt;
-import android.media.AudioPortMixExt;
-import android.media.AudioPortSessionExt;
-
-/**
- * {@hide}
- */
-union AudioPortExt {
- /**
- * This represents an empty union. Value is ignored.
- * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
- * established.
- */
- boolean unspecified;
- /** Device specific info. */
- AudioPortDeviceExt device;
- /** Mix specific info. */
- AudioPortMixExt mix;
- /** Session specific info. */
- AudioPortSessionExt session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
similarity index 62%
copy from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
copy to media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
index b08a604..2cdf4f6 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
@@ -16,14 +16,19 @@
package android.media;
+import android.media.AudioPortDeviceExtSys;
+import android.media.AudioPortMixExtSys;
+
/**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
* {@hide}
*/
-@Backing(type="int")
-enum AudioEncapsulationType {
- NONE = 0,
- IEC61937 = 1,
-}
\ No newline at end of file
+union AudioPortExtSys {
+ /**
+ * This represents an empty union. Value is ignored.
+ */
+ boolean unspecified;
+ /** System-only parameters when the port is an audio device. */
+ AudioPortDeviceExtSys device;
+ /** System-only parameters when the port is an audio mix. */
+ AudioPortMixExtSys mix;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
deleted file mode 100644
index 62cdb8e..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioMixLatencyClass;
-
-/**
- * {@hide}
- */
-parcelable AudioPortMixExt {
- /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
- int hwModule;
- /** I/O handle of the input/output stream. Interpreted as audio_io_handle_t. */
- int handle;
- /** Latency class */
- AudioMixLatencyClass latencyClass;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStandard.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
similarity index 81%
rename from media/libaudioclient/aidl/android/media/AudioStandard.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
index e131d0d..5999885 100644
--- a/media/libaudioclient/aidl/android/media/AudioStandard.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortMixExtSys.aidl
@@ -13,15 +13,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package android.media;
/**
- * The audio standard that describe audio playback/capture capabilites.
- *
* {@hide}
*/
-@Backing(type="int")
-enum AudioStandard {
- NONE = 0,
- EDID = 1,
+parcelable AudioPortMixExtSys {
+ /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
+ int hwModule;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
deleted file mode 100644
index dbca168..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioPortSessionExt {
- /** Audio session. Interpreted as audio_session_t. */
- int session;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
new file mode 100644
index 0000000..f3b5c19
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioGainSys;
+import android.media.AudioPortConfig;
+import android.media.AudioPortExtSys;
+import android.media.AudioPortRole;
+import android.media.AudioPortType;
+import android.media.AudioProfileSys;
+
+/**
+ * {@hide}
+ */
+parcelable AudioPortSys {
+ /** Sink or source. */
+ AudioPortRole role;
+ /** Device, mix ... */
+ AudioPortType type;
+ /** System-only parameters for each AudioProfile from 'port.profiles'. */
+ AudioProfileSys[] profiles;
+ /** System-only parameters for each AudioGain from 'port.gains'. */
+ AudioGainSys[] gains;
+ /** Current audio port configuration. */
+ AudioPortConfig activeConfig;
+ /** System-only extra parameters for 'port.ext'. */
+ AudioPortExtSys ext;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioProfile.aidl b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
deleted file mode 100644
index afb288f..0000000
--- a/media/libaudioclient/aidl/android/media/AudioProfile.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioEncapsulationType;
-import android.media.audio.common.AudioFormat;
-
-/**
- * {@hide}
- */
-parcelable AudioProfile {
- @utf8InCpp String name;
- /** The format for an audio profile should only be set when initialized. */
- AudioFormat format;
- /** Interpreted as audio_channel_mask_t. */
- int[] channelMasks;
- int[] samplingRates;
- boolean isDynamicFormat;
- boolean isDynamicChannels;
- boolean isDynamicRate;
- AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
similarity index 68%
copy from media/libaudioclient/aidl/android/media/AudioGainMode.aidl
copy to media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
index e1b9f0b..329c9d5 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainMode.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioProfileSys.aidl
@@ -13,14 +13,18 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package android.media;
/**
+ * Provides indication whether the parameters of the AudioProfiles in the
+ * AudioPort are dynamic. Each instance of AudioProfileSys corresponds
+ * to an instance of AudioProfile.
+ *
* {@hide}
*/
-@Backing(type="int")
-enum AudioGainMode {
- JOINT = 0,
- CHANNELS = 1,
- RAMP = 2,
+parcelable AudioProfileSys {
+ boolean isDynamicFormat;
+ boolean isDynamicChannels;
+ boolean isDynamicRate;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl b/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
deleted file mode 100644
index 8673b92..0000000
--- a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioSourceType {
- INVALID = -1,
- DEFAULT = 0,
- MIC = 1,
- VOICE_UPLINK = 2,
- VOICE_DOWNLINK = 3,
- VOICE_CALL = 4,
- CAMCORDER = 5,
- VOICE_RECOGNITION = 6,
- VOICE_COMMUNICATION = 7,
- REMOTE_SUBMIX = 8,
- UNPROCESSED = 9,
- VOICE_PERFORMANCE = 10,
- ECHO_REFERENCE = 1997,
- FM_TUNER = 1998,
- /**
- * A low-priority, preemptible audio source for for background software
- * hotword detection. Same tuning as VOICE_RECOGNITION.
- * Used only internally by the framework.
- */
- HOTWORD = 1999,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl b/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
deleted file mode 100644
index d777882..0000000
--- a/media/libaudioclient/aidl/android/media/AudioStreamType.aidl
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioStreamType {
- DEFAULT = -1,
- VOICE_CALL = 0,
- SYSTEM = 1,
- RING = 2,
- MUSIC = 3,
- ALARM = 4,
- NOTIFICATION = 5,
- BLUETOOTH_SCO = 6,
- ENFORCED_AUDIBLE = 7,
- DTMF = 8,
- TTS = 9,
- ACCESSIBILITY = 10,
- ASSISTANT = 11,
- /** For dynamic policy output mixes. Only used by the audio policy */
- REROUTING = 12,
- /** For audio flinger tracks volume. Only used by the audioflinger */
- PATCH = 13,
- /** stream for corresponding to AUDIO_USAGE_CALL_ASSISTANT */
- CALL_ASSISTANT = 14,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUsage.aidl b/media/libaudioclient/aidl/android/media/AudioUsage.aidl
deleted file mode 100644
index 66c5c30..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUsage.aidl
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-@Backing(type="int")
-enum AudioUsage {
- UNKNOWN = 0,
- MEDIA = 1,
- VOICE_COMMUNICATION = 2,
- VOICE_COMMUNICATION_SIGNALLING = 3,
- ALARM = 4,
- NOTIFICATION = 5,
- NOTIFICATION_TELEPHONY_RINGTONE = 6,
- NOTIFICATION_COMMUNICATION_REQUEST = 7,
- NOTIFICATION_COMMUNICATION_INSTANT = 8,
- NOTIFICATION_COMMUNICATION_DELAYED = 9,
- NOTIFICATION_EVENT = 10,
- ASSISTANCE_ACCESSIBILITY = 11,
- ASSISTANCE_NAVIGATION_GUIDANCE = 12,
- ASSISTANCE_SONIFICATION = 13,
- GAME = 14,
- VIRTUAL_SOURCE = 15,
- ASSISTANT = 16,
- CALL_ASSISTANT = 17,
- EMERGENCY = 1000,
- SAFETY = 1001,
- VEHICLE_STATUS = 1002,
- ANNOUNCEMENT = 1003,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioUuid.aidl b/media/libaudioclient/aidl/android/media/AudioUuid.aidl
deleted file mode 100644
index bba9039..0000000
--- a/media/libaudioclient/aidl/android/media/AudioUuid.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-/**
- * {@hide}
- */
-parcelable AudioUuid {
- int timeLow;
- int timeMid;
- int timeHiAndVersion;
- int clockSeq;
- byte[] node; // Length = 6
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
index f88fc3c..8538d8a 100644
--- a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
@@ -24,4 +24,5 @@
int id;
float resonantFrequency;
float qFactor;
+ float maxAmplitude;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
index 3a29a08..b95a1d3 100644
--- a/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVolumeGroup.aidl
@@ -17,7 +17,7 @@
package android.media;
import android.media.AudioAttributesInternal;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
index 2d274f4..bcca04a 100644
--- a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
@@ -16,10 +16,10 @@
package android.media;
-import android.media.AudioDevice;
+import android.content.AttributionSourceState;
import android.media.EffectDescriptor;
import android.media.IEffectClient;
-import android.content.AttributionSourceState;
+import android.media.audio.common.AudioDevice;
/**
* Input arguments of the createEffect() method.
@@ -37,4 +37,6 @@
AudioDevice device;
AttributionSourceState attributionSource;
boolean probe;
+ /** true if a callback must be sent each time audio frames are processed */
+ boolean notifyFramesProcessed;
}
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
index 7e3c240..b938a3e 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
@@ -18,7 +18,7 @@
import android.media.AudioAttributesInternal;
import android.media.AudioClient;
-import android.media.AudioConfigBase;
+import android.media.audio.common.AudioConfigBase;
/**
* CreateRecordRequest contains all input arguments sent by AudioRecord to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
index d78b3fc..7d159d0 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordResponse.aidl
@@ -18,6 +18,7 @@
import android.media.IAudioRecord;
import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfigBase;
/**
* CreateRecordResponse contains all output arguments returned by AudioFlinger to AudioRecord
@@ -43,4 +44,5 @@
int portId;
/** The newly created record. */
@nullable IAudioRecord audioRecord;
+ AudioConfigBase serverConfig;
}
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
index 014b3ca..212221e 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackRequest.aidl
@@ -18,9 +18,9 @@
import android.media.AudioAttributesInternal;
import android.media.AudioClient;
-import android.media.AudioConfig;
import android.media.IAudioTrackCallback;
import android.media.SharedFileRegion;
+import android.media.audio.common.AudioConfig;
/**
* CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
index 40473fa..da6f454 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
import android.media.IAudioTrack;
/**
diff --git a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
index 35a3d74..e5b5158 100644
--- a/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/EffectDescriptor.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioUuid;
+import android.media.audio.common.AudioUuid;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl b/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
deleted file mode 100644
index ec5b67a..0000000
--- a/media/libaudioclient/aidl/android/media/ExtraAudioDescriptor.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioEncapsulationType;
-import android.media.AudioStandard;
-
-/**
- * The audio descriptor that descibes playback/capture capabilities according to
- * a particular standard.
- *
- * {@hide}
- */
-parcelable ExtraAudioDescriptor {
- AudioStandard standard;
- byte[] audioDescriptor;
- AudioEncapsulationType encapsulationType;
-}
diff --git a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
index 164fb9d..963877a 100644
--- a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioStreamType;
+import android.media.audio.common.AudioStreamType;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl b/media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
similarity index 72%
rename from media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
rename to media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
index b08a604..25115ac 100644
--- a/media/libaudioclient/aidl/android/media/AudioEncapsulationType.aidl
+++ b/media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
@@ -16,14 +16,13 @@
package android.media;
+import android.media.ISpatializer;
+
/**
- * Audio encapsulation type is used to describe if the audio data should be sent with a particular
- * encapsulation type or not.
- *
+ * Used as a return value for IAudioPolicyService.getSpatializer() method
* {@hide}
*/
-@Backing(type="int")
-enum AudioEncapsulationType {
- NONE = 0,
- IEC61937 = 1,
-}
\ No newline at end of file
+ parcelable GetSpatializerResponse {
+ /* The ISpatializer interface if successful, null if not */
+ @nullable ISpatializer spatializer;
+}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index d2cae6d..c55c66e 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,13 +16,10 @@
package android.media;
-import android.media.AudioMode;
import android.media.AudioPatch;
import android.media.AudioPort;
import android.media.AudioPortConfig;
-import android.media.AudioStreamType;
import android.media.AudioUniqueIdUse;
-import android.media.AudioUuid;
import android.media.AudioVibratorInfo;
import android.media.CreateEffectRequest;
import android.media.CreateEffectResponse;
@@ -41,7 +38,13 @@
import android.media.MicrophoneInfoData;
import android.media.RenderPosition;
import android.media.TrackSecondaryOutputInfo;
-import android.media.audio.common.AudioFormat;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMMapPolicyInfo;
+import android.media.audio.common.AudioMMapPolicyType;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUuid;
/**
* {@hide}
@@ -62,7 +65,7 @@
*/
int sampleRate(int /* audio_io_handle_t */ ioHandle);
- AudioFormat format(int /* audio_io_handle_t */ output);
+ AudioFormatDescription format(int /* audio_io_handle_t */ output);
long frameCount(int /* audio_io_handle_t */ ioHandle);
@@ -115,8 +118,8 @@
// Retrieve the audio recording buffer size in bytes.
// FIXME This API assumes a route, and so should be deprecated.
long getInputBufferSize(int sampleRate,
- AudioFormat format,
- int /* audio_channel_mask_t */ channelMask);
+ in AudioFormatDescription format,
+ in AudioChannelLayout channelMask);
OpenOutputResponse openOutput(in OpenOutputRequest request);
int /* audio_io_handle_t */ openDuplicateOutput(int /* audio_io_handle_t */ output1,
@@ -197,6 +200,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
oneway void systemReady();
+ /* Indicate audio policy service is ready */
+ oneway void audioPolicyReady();
+
// Returns the number of frames per audio HAL buffer.
long frameCountHAL(int /* audio_io_handle_t */ ioHandle);
@@ -213,4 +219,10 @@
// This usually happens when there is a dynamic policy registered.
void updateSecondaryOutputs(
in TrackSecondaryOutputInfo[] trackSecondaryOutputInfos);
+
+ AudioMMapPolicyInfo[] getMmapPolicyInfos(AudioMMapPolicyType policyType);
+
+ int getAAudioMixerBurstCount();
+
+ int getAAudioHardwareBurstMinUsec();
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 4c3955a..8e9ff86 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -18,16 +18,9 @@
import android.content.AttributionSourceState;
-import android.media.audio.common.AudioFormat;
-
import android.media.AudioAttributesEx;
import android.media.AudioAttributesInternal;
-import android.media.AudioConfig;
-import android.media.AudioConfigBase;
-import android.media.AudioDevice;
import android.media.AudioMix;
-import android.media.AudioMode;
-import android.media.AudioOffloadInfo;
import android.media.AudioOffloadMode;
import android.media.AudioPatch;
import android.media.AudioPolicyDeviceState;
@@ -38,19 +31,28 @@
import android.media.AudioPortRole;
import android.media.AudioPortType;
import android.media.AudioProductStrategy;
-import android.media.AudioSourceType;
-import android.media.AudioStreamType;
-import android.media.AudioUsage;
-import android.media.AudioUuid;
import android.media.AudioVolumeGroup;
import android.media.DeviceRole;
import android.media.EffectDescriptor;
import android.media.GetInputForAttrResponse;
import android.media.GetOutputForAttrResponse;
+import android.media.GetSpatializerResponse;
import android.media.IAudioPolicyServiceClient;
import android.media.ICaptureStateListener;
-import android.media.Int;
+import android.media.INativeSpatializerCallback;
import android.media.SoundTriggerSession;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioDeviceDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioMode;
+import android.media.audio.common.AudioOffloadInfo;
+import android.media.audio.common.AudioSource;
+import android.media.audio.common.AudioStreamType;
+import android.media.audio.common.AudioUsage;
+import android.media.audio.common.AudioUuid;
+import android.media.audio.common.Int;
/**
* IAudioPolicyService interface (see AudioPolicyInterface for method descriptions).
@@ -63,13 +65,13 @@
void setDeviceConnectionState(in AudioDevice device,
in AudioPolicyDeviceState state,
@utf8InCpp String deviceName,
- in AudioFormat encodedFormat);
+ in AudioFormatDescription encodedFormat);
AudioPolicyDeviceState getDeviceConnectionState(in AudioDevice device);
void handleDeviceConfigChange(in AudioDevice device,
@utf8InCpp String deviceName,
- in AudioFormat encodedFormat);
+ in AudioFormatDescription encodedFormat);
void setPhoneState(AudioMode state, int /* uid_t */ uid);
@@ -114,18 +116,18 @@
int indexMax);
void setStreamVolumeIndex(AudioStreamType stream,
- int /* audio_devices_t */ device,
+ in AudioDeviceDescription device,
int index);
int getStreamVolumeIndex(AudioStreamType stream,
- int /* audio_devices_t */ device);
+ in AudioDeviceDescription device);
void setVolumeIndexForAttributes(in AudioAttributesInternal attr,
- int /* audio_devices_t */ device,
+ in AudioDeviceDescription device,
int index);
int getVolumeIndexForAttributes(in AudioAttributesInternal attr,
- int /* audio_devices_t */ device);
+ in AudioDeviceDescription device);
int getMaxVolumeIndexForAttributes(in AudioAttributesInternal attr);
@@ -133,7 +135,7 @@
int /* product_strategy_t */ getStrategyForStream(AudioStreamType stream);
- int /* bitmask of audio_devices_t */ getDevicesForStream(AudioStreamType stream);
+ AudioDeviceDescription[] getDevicesForStream(AudioStreamType stream);
AudioDevice[] getDevicesForAttributes(in AudioAttributesEx attr);
@@ -155,7 +157,7 @@
boolean isStreamActiveRemotely(AudioStreamType stream, int inPastMs);
- boolean isSourceActive(AudioSourceType source);
+ boolean isSourceActive(AudioSource source);
/**
* On input, count represents the maximum length of the returned array.
@@ -170,7 +172,7 @@
@utf8InCpp String opPackageName,
in AudioUuid uuid,
int priority,
- AudioSourceType source);
+ AudioSource source);
int /* audio_unique_id_t */ addStreamDefaultEffect(in AudioUuid type,
@utf8InCpp String opPackageName,
@@ -268,7 +270,7 @@
boolean getMasterMono();
- float getStreamVolumeDB(AudioStreamType stream, int index, int /* audio_devices_t */ device);
+ float getStreamVolumeDB(AudioStreamType stream, int index, in AudioDeviceDescription device);
/**
* Populates supported surround formats and their enabled state in formats and formatsEnabled.
@@ -279,7 +281,7 @@
* number of elements without actually retrieving them.
*/
void getSurroundFormats(inout Int count,
- out AudioFormat[] formats,
+ out AudioFormatDescription[] formats,
out boolean[] formatsEnabled);
/**
@@ -291,11 +293,12 @@
* number of elements without actually retrieving them.
*/
void getReportedSurroundFormats(inout Int count,
- out AudioFormat[] formats);
+ out AudioFormatDescription[] formats);
- AudioFormat[] getHwOffloadEncodingFormatsSupportedForA2DP();
+ AudioFormatDescription[] getHwOffloadFormatsSupportedForBluetoothMedia(
+ in AudioDeviceDescription device);
- void setSurroundFormatEnabled(AudioFormat audioFormat, boolean enabled);
+ void setSurroundFormatEnabled(in AudioFormatDescription audioFormat, boolean enabled);
void setAssistantUid(int /* uid_t */ uid);
@@ -329,23 +332,48 @@
AudioDevice[] getDevicesForRoleAndStrategy(int /* product_strategy_t */ strategy,
DeviceRole role);
- void setDevicesRoleForCapturePreset(AudioSourceType audioSource,
+ void setDevicesRoleForCapturePreset(AudioSource audioSource,
DeviceRole role,
in AudioDevice[] devices);
- void addDevicesRoleForCapturePreset(AudioSourceType audioSource,
+ void addDevicesRoleForCapturePreset(AudioSource audioSource,
DeviceRole role,
in AudioDevice[] devices);
- void removeDevicesRoleForCapturePreset(AudioSourceType audioSource,
+ void removeDevicesRoleForCapturePreset(AudioSource audioSource,
DeviceRole role,
in AudioDevice[] devices);
- void clearDevicesRoleForCapturePreset(AudioSourceType audioSource,
+ void clearDevicesRoleForCapturePreset(AudioSource audioSource,
DeviceRole role);
- AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSourceType audioSource,
+ AudioDevice[] getDevicesForRoleAndCapturePreset(AudioSource audioSource,
DeviceRole role);
boolean registerSoundTriggerCaptureStateListener(ICaptureStateListener listener);
+
+ /** If a spatializer stage effect is present on the platform, this will return an
+ * ISpatializer interface (see GetSpatializerResponse,aidl) to control this
+ * feature.
+ * If no spatializer stage is present, a null interface is returned.
+ * The INativeSpatializerCallback passed must not be null.
+ * Only one ISpatializer interface can exist at a given time. The native audio policy
+ * service will reject the request if an interface was already acquired and previous owner
+ * did not die or call ISpatializer.release().
+ */
+ GetSpatializerResponse getSpatializer(INativeSpatializerCallback callback);
+
+ /** Queries if some kind of spatialization will be performed if the audio playback context
+ * described by the provided arguments is present.
+ * The context is made of:
+ * - The audio attributes describing the playback use case.
+ * - The audio configuration describing the audio format, channels, sampling rate...
+ * - The devices describing the sink audio device selected for playback.
+ * All arguments are optional and only the specified arguments are used to match against
+ * supported criteria. For instance, supplying no argument will tell if spatialization is
+ * supported or not in general.
+ */
+ boolean canBeSpatialized(in @nullable AudioAttributesInternal attr,
+ in @nullable AudioConfig config,
+ in AudioDevice[] devices);
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
index a7782b8..d93a59d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyServiceClient.aidl
@@ -16,10 +16,10 @@
package android.media;
-import android.media.AudioConfigBase;
-import android.media.AudioSourceType;
import android.media.EffectDescriptor;
import android.media.RecordClientInfo;
+import android.media.audio.common.AudioConfigBase;
+import android.media.audio.common.AudioSource;
/**
* {@hide}
@@ -43,7 +43,7 @@
in AudioConfigBase deviceConfig,
in EffectDescriptor[] effects,
int /* audio_patch_handle_t */ patchHandle,
- AudioSourceType source);
+ AudioSource source);
/** Notifies a change of audio routing */
void onRoutingUpdated();
}
diff --git a/media/libaudioclient/aidl/android/media/IEffectClient.aidl b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
index 3b6bcf1..37b442d 100644
--- a/media/libaudioclient/aidl/android/media/IEffectClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
@@ -43,4 +43,10 @@
* TODO(ytai): replace opaque byte arrays with strongly typed parameters.
*/
oneway void commandExecuted(int cmdCode, in byte[] cmdData, in byte[] replyData);
+
+ /**
+ * Called whenever audio frames have been processed by the effect engine.
+ * @param frames number of frames processed.
+ */
+ oneway void framesProcessed(int frames);
}
diff --git a/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
new file mode 100644
index 0000000..88b8108
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
+
+/**
+ * The INativeSpatializerCallback interface is a callback associated to the
+ * ISpatializer interface. The callback is used by the spatializer
+ * implementation in native audio server to communicate state changes to the
+ * client controlling the spatializer with the ISpatializer interface.
+ * {@hide}
+ */
+oneway interface INativeSpatializerCallback {
+ /** Called when the spatialization level applied by the spatializer changes
+ * (e.g. when the spatializer is enabled or disabled)
+ */
+ void onLevelChanged(SpatializationLevel level);
+
+ /** Called when the output stream the Spatializer is attached to changes.
+ * Indicates the IO Handle of the new output.
+ */
+ void onOutputChanged(int output);
+}
diff --git a/media/libaudioclient/aidl/android/media/ISpatializer.aidl b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
new file mode 100644
index 0000000..b871238
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.ISpatializerHeadTrackingCallback;
+import android.media.SpatializationLevel;
+import android.media.SpatializationMode;
+import android.media.SpatializerHeadTrackingMode;
+
+
+/**
+ * The ISpatializer interface is used to control the native audio service implementation
+ * of the spatializer stage with headtracking when present on a platform.
+ * It is intended for exclusive use by the java AudioService running in system_server.
+ * It provides APIs to discover the feature availability and options as well as control and report
+ * the active state and modes of the spatializer and head tracking effect.
+ * {@hide}
+ */
+interface ISpatializer {
+ /** Releases a ISpatializer interface previously acquired. */
+ void release();
+
+ /** Reports the list of supported spatialization levels (see SpatializationLevel.aidl).
+ * The list should never be empty if an ISpatializer interface was successfully
+ * retrieved with IAudioPolicyService.getSpatializer().
+ */
+ SpatializationLevel[] getSupportedLevels();
+
+ /** Selects the desired spatialization level (see SpatializationLevel.aidl). Selecting a level
+ * different from SpatializationLevel.NONE with create the specialized multichannel output
+ * mixer, create and enable the spatializer effect and let the audio policy attach eligible
+ * AudioTrack to this output stream.
+ */
+ void setLevel(SpatializationLevel level);
+
+ /** Gets the selected spatialization level (see SpatializationLevel.aidl) */
+ SpatializationLevel getLevel();
+
+ /** Reports if the spatializer engine supports head tracking or not.
+ * This is a pre condition independent of the fact that a head tracking sensor is
+ * registered or not.
+ */
+ boolean isHeadTrackingSupported();
+
+ /** Reports the list of supported head tracking modes (see SpatializerHeadTrackingMode.aidl).
+ * The list can be empty if the spatializer implementation does not support head tracking or if
+ * no head tracking sensor is registered (see setHeadSensor() and setScreenSensor()).
+ */
+ SpatializerHeadTrackingMode[] getSupportedHeadTrackingModes();
+
+ /** Selects the desired head tracking mode (see SpatializerHeadTrackingMode.aidl) */
+ void setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode);
+
+ /** Gets the actual head tracking mode. Can be different from the desired mode if conditions to
+ * enable the desired mode are not met (e.g if the head tracking device was removed)
+ */
+ SpatializerHeadTrackingMode getActualHeadTrackingMode();
+
+ /** Reset the head tracking algorithm to consider current head pose as neutral */
+ void recenterHeadTracker();
+
+ /** Set the screen to stage transform to use by the head tracking algorithm
+ * The screen to stage transform is conveyed as a vector of 6 elements,
+ * where the first three are a translation vector and
+ * the last three are a rotation vector.
+ */
+ void setGlobalTransform(in float[] screenToStage);
+
+ /**
+ * Set the sensor that is to be used for head-tracking.
+ * -1 can be used to disable head-tracking.
+ */
+ void setHeadSensor(int sensorHandle);
+
+ /**
+ * Set the sensor that is to be used for screen-tracking.
+ * -1 can be used to disable screen-tracking.
+ */
+ void setScreenSensor(int sensorHandle);
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ void setDisplayOrientation(float physicalToLogicalAngle);
+
+ /**
+ * Sets the hinge angle for foldable devices.
+ */
+ void setHingeAngle(float hingeAngle);
+
+ /** Reports the list of supported spatialization modess (see SpatializationMode.aidl).
+ * The list should never be empty if an ISpatializer interface was successfully
+ * retrieved with IAudioPolicyService.getSpatializer().
+ */
+ SpatializationMode[] getSupportedModes();
+
+ /**
+ * Registers a callback to monitor head tracking functions.
+ * Only one callback can be registered on a Spatializer.
+ * The last callback registered wins and passing a nullptr unregisters
+ * last registered callback.
+ */
+ void registerHeadTrackingCallback(@nullable ISpatializerHeadTrackingCallback callback);
+
+ /**
+ * Sets a parameter to the spatializer engine. Used by effect implementor for vendor
+ * specific configuration.
+ */
+ void setParameter(int key, in byte[] value);
+
+ /**
+ * Gets a parameter from the spatializer engine. Used by effect implementor for vendor
+ * specific configuration.
+ */
+ void getParameter(int key, inout byte[] value);
+
+ /**
+ * Gets the io handle of the output stream the spatializer is connected to.
+ */
+ int getOutput();
+}
diff --git a/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
new file mode 100644
index 0000000..23d5e13
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
+
+/**
+ * The ISpatializerHeadTrackingCallback interface is a callback associated to the
+ * Spatializer head tracking function. It can be registered via the ISpatializer
+ * interface to monitor head tracking related states.
+ * {@hide}
+ */
+oneway interface ISpatializerHeadTrackingCallback {
+ /** Called when the head tracking mode has changed
+ */
+ void onHeadTrackingModeChanged(SpatializerHeadTrackingMode mode);
+
+ /** Called when the head to stage pose hase been updated
+ * The head to stage pose is conveyed as a vector of 6 elements,
+ * where the first three are a translation vector and
+ * the last three are a rotation vector.
+ */
+ void onHeadToSoundStagePoseUpdated(in float[] headToStage);
+}
diff --git a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
index 2e55526..75ff8e9 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputRequest.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioConfig;
-import android.media.AudioDevice;
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
+import android.media.audio.common.AudioSource;
/**
* {@hide}
@@ -30,7 +30,7 @@
int input;
AudioConfig config;
AudioDevice device;
- AudioSourceType source;
+ AudioSource source;
/** Bitmask, indexed by AudioInputFlag. */
int flags;
}
diff --git a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
index b613ba5..41bc38a 100644
--- a/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenInputResponse.aidl
@@ -16,8 +16,8 @@
package android.media;
-import android.media.AudioConfig;
-import android.media.AudioDevice;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioDevice;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 06b12e9..90e7ea6 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -16,8 +16,9 @@
package android.media;
-import android.media.AudioConfig;
import android.media.AudioPort;
+import android.media.audio.common.AudioConfig;
+import android.media.audio.common.AudioConfigBase;
/**
* {@hide}
@@ -25,7 +26,8 @@
parcelable OpenOutputRequest {
/** Interpreted as audio_module_handle_t. */
int module;
- AudioConfig config;
+ AudioConfig halConfig;
+ AudioConfigBase mixerConfig;
/** Type must be DEVICE. */
AudioPort device;
/** Bitmask, indexed by AudioOutputFlag. */
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
index a051969..451a0bf 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputResponse.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioConfig;
+import android.media.audio.common.AudioConfig;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
index 3280460..7dad58d 100644
--- a/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/RecordClientInfo.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioSourceType;
+import android.media.audio.common.AudioSource;
/**
* {@hide}
@@ -28,7 +28,7 @@
int uid;
/** Interpreted as audio_session_t. */
int session;
- AudioSourceType source;
+ AudioSource source;
/** Interpreted as audio_port_handle_t. */
int portId;
boolean silenced;
diff --git a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
index a829e59..4b540a9 100644
--- a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
+++ b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
@@ -16,6 +16,8 @@
package android.media;
+import android.media.audio.common.AudioDeviceDescription;
+
/**
* {@hide}
*/
@@ -24,6 +26,6 @@
int session;
/** Interpreted as audio_io_handle_t. */
int ioHandle;
- /** Interpreted as audio_devices_t. */
- int device;
+ /** Device type. */
+ AudioDeviceDescription device;
}
diff --git a/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
new file mode 100644
index 0000000..961c5a1
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The spatialization level supported by the spatializer stage effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializationLevel {
+ /** Spatialization is disabled. */
+ NONE = 0,
+ /** The spatializer accepts audio with positional multichannel masks (e.g 5.1). */
+ SPATIALIZER_MULTICHANNEL = 1,
+ /** The spatializer accepts audio made of a channel bed of positional multichannels (e.g 5.1)
+ * and audio objects positioned independently via meta data.
+ */
+ SPATIALIZER_MCHAN_BED_PLUS_OBJECTS = 2,
+}
diff --git a/media/libaudioclient/aidl/android/media/SpatializationMode.aidl b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
new file mode 100644
index 0000000..5d8fd93
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The spatialization mode supported by the spatializer stage effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializationMode {
+ /** The spatializer supports binaural mode (over headphones type devices). */
+ SPATIALIZATER_BINAURAL = 0,
+ /** The spatializer supports transaural mode (over speaker type devices). */
+ SPATIALIZATER_TRANSAURAL = 1,
+}
diff --git a/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
new file mode 100644
index 0000000..58e0f61
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+
+/**
+ * The head tracking mode supported by the spatializer effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializerHeadTrackingMode {
+ /** Head tracking is active in a mode not listed below (forward compatibility) */
+ OTHER = 0,
+ /** Head tracking is disabled */
+ DISABLED = 1,
+ /** Head tracking is performed relative to the real work environment */
+ RELATIVE_WORLD = 2,
+ /** Head tracking is performed relative to the device's screen */
+ RELATIVE_SCREEN = 3,
+}
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index b290aa8..969e3e6 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -46,6 +46,7 @@
],
shared_libs: [
"android.hardware.audio.common-util",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index d03c6fa..4c89249 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -58,7 +58,8 @@
constexpr audio_mode_t kModes[] = {
AUDIO_MODE_INVALID, AUDIO_MODE_CURRENT, AUDIO_MODE_NORMAL, AUDIO_MODE_RINGTONE,
- AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN};
+ AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN,
+ AUDIO_MODE_CALL_REDIRECT, AUDIO_MODE_COMMUNICATION_REDIRECT};
constexpr audio_session_t kSessionId[] = {AUDIO_SESSION_NONE, AUDIO_SESSION_OUTPUT_STAGE,
AUDIO_SESSION_DEVICE};
@@ -231,7 +232,7 @@
attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
attributionSource.token = sp<BBinder>::make();
track->set(AUDIO_STREAM_DEFAULT, sampleRate, format, channelMask, frameCount, flags, nullptr,
- nullptr, notificationFrames, sharedBuffer, false, sessionId,
+ notificationFrames, sharedBuffer, false, sessionId,
((fast && sharedBuffer == 0) || offload) ? AudioTrack::TRANSFER_CALLBACK
: AudioTrack::TRANSFER_DEFAULT,
offload ? &offloadInfo : nullptr, attributionSource, &attributes, false, 1.0f,
@@ -383,6 +384,9 @@
const std::vector<uint8_t> &replyData __unused) override {
return binder::Status::ok();
}
+ binder::Status framesProcessed(int32_t frames __unused) override {
+ return binder::Status::ok();
+ }
};
status_t AudioFlingerFuzzer::invokeAudioEffect() {
@@ -424,6 +428,7 @@
request.attributionSource.packageName = opPackageName;
request.attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(getpid()));
request.probe = false;
+ request.notifyFramesProcessed = false;
media::CreateEffectResponse response{};
status_t status = af->createEffect(request, &response);
@@ -597,9 +602,10 @@
media::OpenInputRequest request{};
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
- request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+ request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
media::OpenInputResponse response{};
@@ -648,11 +654,16 @@
sp<DeviceDescriptorBase> device = new DeviceDescriptorBase(getValue(&mFdp, kDevices));
audio_output_flags_t flags = getValue(&mFdp, kOutputFlags);
+ audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+
media::OpenOutputRequest request{};
media::OpenOutputResponse response{};
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.halConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(config, false /*isInput*/));
+ request.mixerConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(mixerConfig, false /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 4ec69c7..a6c93cf 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -23,34 +23,42 @@
#include <android/media/AudioAttributesInternal.h>
#include <android/media/AudioClient.h>
-#include <android/media/AudioConfig.h>
-#include <android/media/AudioConfigBase.h>
#include <android/media/AudioDualMonoMode.h>
-#include <android/media/AudioEncapsulationMode.h>
-#include <android/media/AudioEncapsulationMetadataType.h>
-#include <android/media/AudioEncapsulationType.h>
#include <android/media/AudioFlag.h>
-#include <android/media/AudioGain.h>
-#include <android/media/AudioGainMode.h>
-#include <android/media/AudioInputFlags.h>
#include <android/media/AudioIoConfigEvent.h>
#include <android/media/AudioIoDescriptor.h>
-#include <android/media/AudioMixLatencyClass.h>
-#include <android/media/AudioMode.h>
-#include <android/media/AudioOutputFlags.h>
#include <android/media/AudioPlaybackRate.h>
#include <android/media/AudioPort.h>
-#include <android/media/AudioPortConfigType.h>
-#include <android/media/AudioPortDeviceExt.h>
-#include <android/media/AudioPortExt.h>
-#include <android/media/AudioPortMixExt.h>
-#include <android/media/AudioPortSessionExt.h>
-#include <android/media/AudioProfile.h>
+#include <android/media/AudioPortConfig.h>
+#include <android/media/AudioPortDeviceExtSys.h>
#include <android/media/AudioTimestampInternal.h>
#include <android/media/AudioUniqueIdUse.h>
#include <android/media/EffectDescriptor.h>
-#include <android/media/ExtraAudioDescriptor.h>
#include <android/media/TrackSecondaryOutputInfo.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioConfig.h>
+#include <android/media/audio/common/AudioConfigBase.h>
+#include <android/media/audio/common/AudioContentType.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioEncapsulationMetadataType.h>
+#include <android/media/audio/common/AudioEncapsulationMode.h>
+#include <android/media/audio/common/AudioEncapsulationType.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
+#include <android/media/audio/common/AudioGain.h>
+#include <android/media/audio/common/AudioGainConfig.h>
+#include <android/media/audio/common/AudioGainMode.h>
+#include <android/media/audio/common/AudioInputFlags.h>
+#include <android/media/audio/common/AudioMode.h>
+#include <android/media/audio/common/AudioOffloadInfo.h>
+#include <android/media/audio/common/AudioOutputFlags.h>
+#include <android/media/audio/common/AudioPortExt.h>
+#include <android/media/audio/common/AudioPortMixExt.h>
+#include <android/media/audio/common/AudioProfile.h>
+#include <android/media/audio/common/AudioSource.h>
+#include <android/media/audio/common/AudioStandard.h>
+#include <android/media/audio/common/AudioUsage.h>
+#include <android/media/audio/common/AudioUuid.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
#include <android/media/SharedFileRegion.h>
#include <binder/IMemory.h>
@@ -86,19 +94,9 @@
ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
- media::AudioPortConfigType aidl);
-// The legacy enum is unnamed. Thus, we use int32_t.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
- int32_t legacy);
-
ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy);
-
ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy);
@@ -116,10 +114,10 @@
ConversionResult<std::optional<std::string_view>>
legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy);
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
media::AudioIoConfigEvent aidl);
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
- audio_io_config_event legacy);
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+ audio_io_config_event_t legacy);
ConversionResult<audio_port_role_t> aidl2legacy_AudioPortRole_audio_port_role_t(
media::AudioPortRole aidl);
@@ -131,36 +129,59 @@
ConversionResult<media::AudioPortType> legacy2aidl_audio_port_type_t_AudioPortType(
audio_port_type_t legacy);
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
- media::audio::common::AudioFormat aidl);
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
- audio_format_t legacy);
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ const media::audio::common::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioChannelLayout>
+legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+ const media::audio::common::AudioDeviceDescription& aidl);
+ConversionResult<media::audio::common::AudioDeviceDescription>
+legacy2aidl_audio_devices_t_AudioDeviceDescription(audio_devices_t legacy);
+
+status_t aidl2legacy_AudioDevice_audio_device(
+ const media::audio::common::AudioDevice& aidl,
+ audio_devices_t* legacyType, char* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+ const media::audio::common::AudioDevice& aidl,
+ audio_devices_t* legacyType, String8* legacyAddress);
+status_t aidl2legacy_AudioDevice_audio_device(
+ const media::audio::common::AudioDevice& aidl,
+ audio_devices_t* legacyType, std::string* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const char* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const String8& legacyAddress);
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+ const media::audio::common::AudioFormatDescription& aidl);
+ConversionResult<media::audio::common::AudioFormatDescription>
+legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
ConversionResult<audio_gain_mode_t>
-aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl);
-ConversionResult<media::AudioGainMode>
+aidl2legacy_AudioGainMode_audio_gain_mode_t(media::audio::common::AudioGainMode aidl);
+ConversionResult<media::audio::common::AudioGainMode>
legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy);
-
ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
- const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type);
-ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
- const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type);
+ const media::audio::common::AudioGainConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGainConfig>
+legacy2aidl_audio_gain_config_AudioGainConfig(const audio_gain_config& legacy, bool isInput);
-ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
- media::AudioInputFlags aidl);
-ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
- audio_input_flags_t legacy);
+ConversionResult<audio_input_flags_t>
+aidl2legacy_AudioInputFlags_audio_input_flags_t(media::audio::common::AudioInputFlags aidl);
+ConversionResult<media::audio::common::AudioInputFlags>
+legacy2aidl_audio_input_flags_t_AudioInputFlags(audio_input_flags_t legacy);
-ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
- media::AudioOutputFlags aidl);
-ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
- audio_output_flags_t legacy);
+ConversionResult<audio_output_flags_t>
+aidl2legacy_AudioOutputFlags_audio_output_flags_t(media::audio::common::AudioOutputFlags aidl);
+ConversionResult<media::audio::common::AudioOutputFlags>
+legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
int32_t aidl);
@@ -173,40 +194,43 @@
audio_output_flags_t legacy);
ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
- const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type);
-ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
- const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type);
+ const media::audio::common::AudioIoFlags& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+ const audio_io_flags& legacy, bool isInput);
ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
- const media::AudioPortConfigDeviceExt& aidl);
-ConversionResult<media::AudioPortConfigDeviceExt>
-legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
- const audio_port_config_device_ext& legacy);
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+ const media::audio::common::AudioPortDeviceExt& aidl,
+ const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+ const audio_port_config_device_ext& legacy,
+ media::audio::common::AudioPortDeviceExt* aidl,
+ media::AudioPortDeviceExtSys* aidlDeviceExt);
ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
- media::AudioStreamType aidl);
-ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
- audio_stream_type_t legacy);
+ media::audio::common::AudioStreamType aidl);
+ConversionResult<media::audio::common::AudioStreamType>
+legacy2aidl_audio_stream_type_t_AudioStreamType(audio_stream_type_t legacy);
-ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
- media::AudioSourceType aidl);
-ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+ media::audio::common::AudioSource aidl);
+ConversionResult<media::audio::common::AudioSource>
+ legacy2aidl_audio_source_t_AudioSource(
audio_source_t legacy);
ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy);
-ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
- const media::AudioPortConfigMixExt& aidl, media::AudioPortRole role);
-ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
- const audio_port_config_mix_ext& legacy, audio_port_role_t role);
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
+ const media::audio::common::AudioPortMixExt& aidl, media::AudioPortRole role,
+ const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_AudioPortMixExt(
+ const audio_port_config_mix_ext& legacy, audio_port_role_t role,
+ media::audio::common::AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt);
ConversionResult<audio_port_config_session_ext>
-aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
- const media::AudioPortConfigSessionExt& aidl);
-ConversionResult<media::AudioPortConfigSessionExt>
-legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+aidl2legacy_int32_t_audio_port_config_session_ext(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
const audio_port_config_session_ext& legacy);
ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -221,7 +245,6 @@
ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
const media::AudioIoDescriptor& aidl);
-
ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
const sp<AudioIoDescriptor>& legacy);
@@ -231,13 +254,14 @@
const AudioClient& legacy);
ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl);
-ConversionResult<media::AudioContentType>
+aidl2legacy_AudioContentType_audio_content_type_t(
+ media::audio::common::AudioContentType aidl);
+ConversionResult<media::audio::common::AudioContentType>
legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy);
ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::AudioUsage aidl);
-ConversionResult<media::AudioUsage>
+aidl2legacy_AudioUsage_audio_usage_t(media::audio::common::AudioUsage aidl);
+ConversionResult<media::audio::common::AudioUsage>
legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy);
ConversionResult<audio_flags_mask_t>
@@ -256,24 +280,27 @@
legacy2aidl_audio_attributes_t_AudioAttributesInternal(const audio_attributes_t& legacy);
ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(media::AudioEncapsulationMode aidl);
-ConversionResult<media::AudioEncapsulationMode>
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(
+ media::audio::common::AudioEncapsulationMode aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMode>
legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy);
ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const media::AudioOffloadInfo& aidl);
-ConversionResult<media::AudioOffloadInfo>
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(
+ const media::audio::common::AudioOffloadInfo& aidl);
+ConversionResult<media::audio::common::AudioOffloadInfo>
legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy);
ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl);
-ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy);
+aidl2legacy_AudioConfig_audio_config_t(const media::audio::common::AudioConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput);
ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl);
-ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy);
+aidl2legacy_AudioConfigBase_audio_config_base_t(
+ const media::audio::common::AudioConfigBase& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput);
ConversionResult<sp<IMemory>>
aidl2legacy_SharedFileRegion_IMemory(const media::SharedFileRegion& aidl);
@@ -291,8 +318,8 @@
legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy);
ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl);
-ConversionResult<media::AudioUuid>
+aidl2legacy_AudioUuid_audio_uuid_t(const media::audio::common::AudioUuid& aidl);
+ConversionResult<media::audio::common::AudioUuid>
legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy);
ConversionResult<effect_descriptor_t>
@@ -302,8 +329,8 @@
ConversionResult<audio_encapsulation_metadata_type_t>
aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
- media::AudioEncapsulationMetadataType aidl);
-ConversionResult<media::AudioEncapsulationMetadataType>
+ media::audio::common::AudioEncapsulationMetadataType aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMetadataType>
legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
audio_encapsulation_metadata_type_t legacy);
@@ -317,37 +344,39 @@
ConversionResult<int32_t>
legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy);
-ConversionResult<audio_mix_latency_class_t>
-aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
- media::AudioMixLatencyClass aidl);
-ConversionResult<media::AudioMixLatencyClass>
-legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
- audio_mix_latency_class_t legacy);
-
ConversionResult<audio_port_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl);
-ConversionResult<media::AudioPortDeviceExt>
-legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy);
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+ const media::audio::common::AudioPortDeviceExt& aidl,
+ const media::AudioPortDeviceExtSys& aidlDeviceExt);
+status_t legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+ const audio_port_device_ext& legacy,
+ media::audio::common::AudioPortDeviceExt* aidl,
+ media::AudioPortDeviceExtSys* aidlDeviceExt);
ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl);
-ConversionResult<media::AudioPortMixExt>
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy);
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+ const media::audio::common::AudioPortMixExt& aidl,
+ const media::AudioPortMixExtSys& aidlMixExt);
+status_t legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+ const audio_port_mix_ext& legacy,
+ media::audio::common::AudioPortMixExt* aidl,
+ media::AudioPortMixExtSys* aidlMixExt);
ConversionResult<audio_port_session_ext>
-aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl);
-ConversionResult<media::AudioPortSessionExt>
-legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy);
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl);
+ConversionResult<int32_t>
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy);
ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl);
-ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy);
+aidl2legacy_AudioProfile_audio_profile(
+ const media::audio::common::AudioProfile& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput);
ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl);
-ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy);
+aidl2legacy_AudioGain_audio_gain(const media::audio::common::AudioGain& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput);
ConversionResult<audio_port_v7>
aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl);
@@ -355,8 +384,8 @@
legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl);
-ConversionResult<media::AudioMode>
+aidl2legacy_AudioMode_audio_mode_t(media::audio::common::AudioMode aidl);
+ConversionResult<media::audio::common::AudioMode>
legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
ConversionResult<audio_unique_id_use_t>
@@ -390,21 +419,21 @@
legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy);
ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::AudioStandard aidl);
-ConversionResult<media::AudioStandard>
+aidl2legacy_AudioStandard_audio_standard_t(media::audio::common::AudioStandard aidl);
+ConversionResult<media::audio::common::AudioStandard>
legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy);
ConversionResult<audio_extra_audio_descriptor>
aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
- const media::ExtraAudioDescriptor& aidl);
-ConversionResult<media::ExtraAudioDescriptor>
+ const media::audio::common::ExtraAudioDescriptor& aidl);
+ConversionResult<media::audio::common::ExtraAudioDescriptor>
legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
const audio_extra_audio_descriptor& legacy);
ConversionResult<audio_encapsulation_type_t>
aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
- const media::AudioEncapsulationType& aidl);
-ConversionResult<media::AudioEncapsulationType>
+ const media::audio::common::AudioEncapsulationType& aidl);
+ConversionResult<media::audio::common::AudioEncapsulationType>
legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
const audio_encapsulation_type_t & legacy);
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/libaudioclient/include/media/AidlConversionUtil.h
index c1a2be3..dfabd55 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/libaudioclient/include/media/AidlConversionUtil.h
@@ -41,6 +41,9 @@
#define RETURN_IF_ERROR(result) \
if (status_t _tmp = (result); _tmp != OK) return base::unexpected(_tmp);
+#define RETURN_STATUS_IF_ERROR(result) \
+ if (status_t _tmp = (result); _tmp != OK) return _tmp;
+
#define VALUE_OR_RETURN_STATUS(x) \
({ \
auto _tmp = (x); \
@@ -119,6 +122,47 @@
return output;
}
+/**
+ * A generic template that helps to "zip" two input containers of the same size
+ * into a single vector of converted types. The conversion function must
+ * thus accept two arguments.
+ */
+template<typename OutputContainer, typename InputContainer1,
+ typename InputContainer2, typename Func>
+ConversionResult<OutputContainer>
+convertContainers(const InputContainer1& input1, const InputContainer2& input2,
+ const Func& itemConversion) {
+ auto iter2 = input2.begin();
+ OutputContainer output;
+ auto ins = std::inserter(output, output.begin());
+ for (const auto& item1 : input1) {
+ RETURN_IF_ERROR(iter2 != input2.end() ? OK : BAD_VALUE);
+ *ins = VALUE_OR_RETURN(itemConversion(item1, *iter2++));
+ }
+ return output;
+}
+
+/**
+ * A generic template that helps to "unzip" a per-element conversion into
+ * a pair of elements into a pair of containers. The conversion function
+ * must emit a pair of elements.
+ */
+template<typename OutputContainer1, typename OutputContainer2,
+ typename InputContainer, typename Func>
+ConversionResult<std::pair<OutputContainer1, OutputContainer2>>
+convertContainerSplit(const InputContainer& input, const Func& itemConversion) {
+ OutputContainer1 output1;
+ OutputContainer2 output2;
+ auto ins1 = std::inserter(output1, output1.begin());
+ auto ins2 = std::inserter(output2, output2.begin());
+ for (const auto& item : input) {
+ auto out_pair = VALUE_OR_RETURN(itemConversion(item));
+ *ins1 = out_pair.first;
+ *ins2 = out_pair.second;
+ }
+ return std::make_pair(output1, output2);
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
// The code below establishes:
// IntegralTypeOf<T>, which works for either integral types (in which case it evaluates to T), or
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 5dfe5fc..862a0f9 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -17,9 +17,75 @@
#pragma once
+#include <functional>
+
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioDeviceDescription.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
+#include <binder/Parcelable.h>
#include <system/audio.h>
#include <system/audio_policy.h>
-#include <binder/Parcelable.h>
+
+namespace {
+// see boost::hash_combine
+#if defined(__clang__)
+__attribute__((no_sanitize("unsigned-integer-overflow")))
+#endif
+static size_t hash_combine(size_t seed, size_t v) {
+ return std::hash<size_t>{}(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+}
+
+namespace std {
+
+// Note: when extending the types hashed below we need to account for the
+// possibility of processing types belonging to different versions of the type,
+// e.g. a HAL may be using a previous version of the AIDL interface.
+
+template<> struct hash<android::media::audio::common::AudioChannelLayout>
+{
+ std::size_t operator()(
+ const android::media::audio::common::AudioChannelLayout& acl) const noexcept {
+ using Tag = android::media::audio::common::AudioChannelLayout::Tag;
+ const size_t seed = std::hash<Tag>{}(acl.getTag());
+ switch (acl.getTag()) {
+ case Tag::none:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::none>()));
+ case Tag::invalid:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::invalid>()));
+ case Tag::indexMask:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::indexMask>()));
+ case Tag::layoutMask:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::layoutMask>()));
+ case Tag::voiceMask:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::voiceMask>()));
+ }
+ return seed;
+ }
+};
+
+template<> struct hash<android::media::audio::common::AudioDeviceDescription>
+{
+ std::size_t operator()(
+ const android::media::audio::common::AudioDeviceDescription& add) const noexcept {
+ return hash_combine(
+ std::hash<android::media::audio::common::AudioDeviceType>{}(add.type),
+ std::hash<std::string>{}(add.connection));
+ }
+};
+
+template<> struct hash<android::media::audio::common::AudioFormatDescription>
+{
+ std::size_t operator()(
+ const android::media::audio::common::AudioFormatDescription& afd) const noexcept {
+ return hash_combine(
+ std::hash<android::media::audio::common::AudioFormatType>{}(afd.type),
+ hash_combine(
+ std::hash<android::media::audio::common::PcmType>{}(afd.pcm),
+ std::hash<std::string>{}(afd.encoding)));
+ }
+};
+} // namespace std
namespace android {
@@ -41,8 +107,43 @@
return !(lhs==rhs);
}
+constexpr bool operator==(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+ return lhs.version == rhs.version && lhs.size == rhs.size &&
+ lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format && lhs.stream_type == rhs.stream_type &&
+ lhs.bit_rate == rhs.bit_rate && lhs.duration_us == rhs.duration_us &&
+ lhs.has_video == rhs.has_video && lhs.is_streaming == rhs.is_streaming &&
+ lhs.bit_width == rhs.bit_width && lhs.offload_buffer_size == rhs.offload_buffer_size &&
+ lhs.usage == rhs.usage && lhs.encapsulation_mode == rhs.encapsulation_mode &&
+ lhs.content_id == rhs.content_id && lhs.sync_id == rhs.sync_id;
+}
+constexpr bool operator!=(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+ return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format && lhs.offload_info == rhs.offload_info;
+}
+constexpr bool operator!=(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+ return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format;
+}
+constexpr bool operator!=(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
enum volume_group_t : uint32_t;
static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
} // namespace android
-
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index 3c19ec1..ee262f3 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -40,7 +40,7 @@
// ----------------------------------------------------------------------------
-class AudioEffect : public RefBase
+class AudioEffect : public virtual RefBase
{
public:
@@ -283,7 +283,8 @@
EVENT_CONTROL_STATUS_CHANGED = 0,
EVENT_ENABLE_STATUS_CHANGED = 1,
EVENT_PARAMETER_CHANGED = 2,
- EVENT_ERROR = 3
+ EVENT_ERROR = 3,
+ EVENT_FRAMES_PROCESSED = 4,
};
/* Callback function notifying client application of a change in effect engine state or
@@ -389,7 +390,8 @@
audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE,
const AudioDeviceTypeAddr& device = {},
- bool probe = false);
+ bool probe = false,
+ bool notifyFramesProcessed = false);
/*
* Same as above but with type and uuid specified by character strings.
*/
@@ -401,7 +403,8 @@
audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE,
const AudioDeviceTypeAddr& device = {},
- bool probe = false);
+ bool probe = false,
+ bool notifyFramesProcessed = false);
/* Result of constructing the AudioEffect. This must be checked
* before using any AudioEffect API.
@@ -552,6 +555,7 @@
virtual void commandExecuted(int32_t cmdCode,
const std::vector<uint8_t>& cmdData,
const std::vector<uint8_t>& replyData);
+ virtual void framesProcessed(int32_t frames);
private:
@@ -587,6 +591,14 @@
}
return binder::Status::ok();
}
+ binder::Status framesProcessed(int32_t frames) override {
+ sp<AudioEffect> effect = mEffect.promote();
+ if (effect != 0) {
+ effect->framesProcessed(frames);
+ }
+ return binder::Status::ok();
+ }
+
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& /*who*/) {
diff --git a/media/libaudioclient/include/media/AudioIoDescriptor.h b/media/libaudioclient/include/media/AudioIoDescriptor.h
index 981d33a..405ec7d 100644
--- a/media/libaudioclient/include/media/AudioIoDescriptor.h
+++ b/media/libaudioclient/include/media/AudioIoDescriptor.h
@@ -17,9 +17,15 @@
#ifndef ANDROID_AUDIO_IO_DESCRIPTOR_H
#define ANDROID_AUDIO_IO_DESCRIPTOR_H
+#include <sstream>
+#include <string>
+
+#include <system/audio.h>
+#include <utils/RefBase.h>
+
namespace android {
-enum audio_io_config_event {
+enum audio_io_config_event_t {
AUDIO_OUTPUT_REGISTERED,
AUDIO_OUTPUT_OPENED,
AUDIO_OUTPUT_CLOSED,
@@ -33,41 +39,70 @@
// audio input/output descriptor used to cache output configurations in client process to avoid
// frequent calls through IAudioFlinger
-class AudioIoDescriptor : public RefBase {
+class AudioIoDescriptor : public virtual RefBase {
public:
- AudioIoDescriptor() :
- mIoHandle(AUDIO_IO_HANDLE_NONE),
- mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(AUDIO_CHANNEL_NONE),
- mFrameCount(0), mFrameCountHAL(0), mLatency(0), mPortId(AUDIO_PORT_HANDLE_NONE)
- {
- memset(&mPatch, 0, sizeof(struct audio_patch));
- }
+ AudioIoDescriptor() = default;
+ // For AUDIO_{INPUT|OUTPUT}_CLOSED events.
+ AudioIoDescriptor(audio_io_handle_t ioHandle) : mIoHandle(ioHandle) {}
+ // For AUDIO_CLIENT_STARTED events.
+ AudioIoDescriptor(
+ audio_io_handle_t ioHandle, const audio_patch& patch, audio_port_handle_t portId) :
+ mIoHandle(ioHandle), mPatch(patch), mPortId(portId) {}
+ // For everything else.
+ AudioIoDescriptor(
+ audio_io_handle_t ioHandle, const audio_patch& patch, bool isInput,
+ uint32_t samplingRate, audio_format_t format, audio_channel_mask_t channelMask,
+ size_t frameCount, size_t frameCountHal, uint32_t latency = 0,
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) :
+ mIoHandle(ioHandle), mPatch(patch), mIsInput(isInput),
+ mSamplingRate(samplingRate), mFormat(format), mChannelMask(channelMask),
+ mFrameCount(frameCount), mFrameCountHAL(frameCountHal), mLatency(latency),
+ mPortId(portId) {}
- virtual ~AudioIoDescriptor() {}
-
- audio_port_handle_t getDeviceId() {
+ audio_io_handle_t getIoHandle() const { return mIoHandle; }
+ const audio_patch& getPatch() const { return mPatch; }
+ bool getIsInput() const { return mIsInput; }
+ uint32_t getSamplingRate() const { return mSamplingRate; }
+ audio_format_t getFormat() const { return mFormat; }
+ audio_channel_mask_t getChannelMask() const { return mChannelMask; }
+ size_t getFrameCount() const { return mFrameCount; }
+ size_t getFrameCountHAL() const { return mFrameCountHAL; }
+ uint32_t getLatency() const { return mLatency; }
+ audio_port_handle_t getPortId() const { return mPortId; }
+ audio_port_handle_t getDeviceId() const {
if (mPatch.num_sources != 0 && mPatch.num_sinks != 0) {
- if (mPatch.sources[0].type == AUDIO_PORT_TYPE_MIX) {
- // this is an output mix
- // FIXME: the API only returns the first device in case of multiple device selection
- return mPatch.sinks[0].id;
- } else {
- // this is an input mix
- return mPatch.sources[0].id;
- }
+ // FIXME: the API only returns the first device in case of multiple device selection
+ return mIsInput ? mPatch.sources[0].id : mPatch.sinks[0].id;
}
return AUDIO_PORT_HANDLE_NONE;
}
+ void setPatch(const audio_patch& patch) { mPatch = patch; }
- audio_io_handle_t mIoHandle;
- struct audio_patch mPatch;
- uint32_t mSamplingRate;
- audio_format_t mFormat;
- audio_channel_mask_t mChannelMask;
- size_t mFrameCount;
- size_t mFrameCountHAL;
- uint32_t mLatency; // only valid for output
- audio_port_handle_t mPortId; // valid for event AUDIO_CLIENT_STARTED
+ std::string toDebugString() const {
+ std::ostringstream ss;
+ ss << mIoHandle << ", samplingRate " << mSamplingRate << ", "
+ << audio_format_to_string(mFormat) << ", "
+ << (audio_channel_mask_get_representation(mChannelMask) ==
+ AUDIO_CHANNEL_REPRESENTATION_INDEX ?
+ audio_channel_index_mask_to_string(mChannelMask) :
+ (mIsInput ? audio_channel_in_mask_to_string(mChannelMask) :
+ audio_channel_out_mask_to_string(mChannelMask)))
+ << ", frameCount " << mFrameCount << ", frameCountHAL " << mFrameCountHAL
+ << ", deviceId " << getDeviceId();
+ return ss.str();
+ }
+
+ private:
+ const audio_io_handle_t mIoHandle = AUDIO_IO_HANDLE_NONE;
+ struct audio_patch mPatch = {};
+ const bool mIsInput = false;
+ const uint32_t mSamplingRate = 0;
+ const audio_format_t mFormat = AUDIO_FORMAT_DEFAULT;
+ const audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+ const size_t mFrameCount = 0;
+ const size_t mFrameCountHAL = 0;
+ const uint32_t mLatency = 0;
+ const audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
};
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 326919a..f6faaae 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -47,7 +47,7 @@
{
public:
- /* Events used by AudioRecord callback function (callback_t).
+ /* Events used by AudioRecord callback function (legacy_callback_t).
* Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
*/
enum event_type {
@@ -65,7 +65,7 @@
};
/* Client should declare a Buffer and pass address to obtainBuffer()
- * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA.
+ * and releaseBuffer(). See also legacy_callback_t for EVENT_MORE_DATA.
*/
class Buffer
@@ -117,7 +117,28 @@
* - EVENT_NEW_IAUDIORECORD: unused.
*/
- typedef void (*callback_t)(int event, void* user, void *info);
+ typedef void (*legacy_callback_t)(int event, void* user, void *info);
+
+ class IAudioRecordCallback : public virtual RefBase {
+ friend AudioRecord;
+ protected:
+ // Request for client to read newly available data.
+ // Used for TRANSFER_CALLBACK mode.
+ // Parameters:
+ // - buffer : Buffer to read from
+ // Returns:
+ // - Number of bytes actually consumed.
+ virtual size_t onMoreData([[maybe_unused]] const AudioRecord::Buffer& buffer) { return 0; }
+ // A buffer overrun occurred.
+ virtual void onOverrun() {}
+ // Record head is at the specified marker (see setMarkerPosition()).
+ virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+ // Record head is at a new position (see setPositionUpdatePeriod()).
+ virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+ // IAudioRecord was recreated due to re-routing, server invalidation or
+ // server crash.
+ virtual void onNewIAudioRecord() {}
+ };
/* Returns the minimum frame count required for the successful creation of
* an AudioRecord object.
@@ -182,20 +203,37 @@
* pAttributes: If not NULL, supersedes inputSource for use case selection.
* threadCanCallJava: Not present in parameter list, and so is fixed at false.
*/
-
AudioRecord(audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
const android::content::AttributionSourceState& client,
size_t frameCount = 0,
- callback_t cbf = NULL,
- void* user = NULL,
+ const wp<IAudioRecordCallback> &callback = nullptr,
uint32_t notificationFrames = 0,
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
- const audio_attributes_t* pAttributes = NULL,
+ const audio_attributes_t* pAttributes = nullptr,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ audio_microphone_direction_t
+ selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+ float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
+
+
+ AudioRecord(audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const android::content::AttributionSourceState& client,
+ size_t frameCount,
+ legacy_callback_t callback,
+ void* user,
+ uint32_t notificationFrames = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+ const audio_attributes_t* pAttributes = nullptr,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
audio_microphone_direction_t
selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -223,13 +261,12 @@
*
* threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
*/
- status_t set(audio_source_t inputSource,
+ status_t set(audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount = 0,
- callback_t cbf = NULL,
- void* user = NULL,
+ const wp<IAudioRecordCallback> &callback = nullptr,
uint32_t notificationFrames = 0,
bool threadCanCallJava = false,
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
@@ -237,7 +274,28 @@
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL,
+ const audio_attributes_t* pAttributes = nullptr,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ audio_microphone_direction_t
+ selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+ float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT,
+ int32_t maxSharedAudioHistoryMs = 0);
+
+ status_t set(audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ legacy_callback_t callback,
+ void* user,
+ uint32_t notificationFrames = 0,
+ bool threadCanCallJava = false,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+ uid_t uid = AUDIO_UID_INVALID,
+ pid_t pid = -1,
+ const audio_attributes_t* pAttributes = nullptr,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
audio_microphone_direction_t
selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -264,6 +322,7 @@
size_t frameCount() const { return mFrameCount; }
size_t frameSize() const { return mFrameSize; }
audio_source_t inputSource() const { return mAttributes.source; }
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
/*
* Return the period of the notification callback in frames.
@@ -672,8 +731,9 @@
bool mActive;
// for client callback handler
- callback_t mCbf; // callback handler for events, or NULL
- void* mUserData;
+
+ wp<IAudioRecordCallback> mCallback;
+ sp<IAudioRecordCallback> mLegacyCallbackWrapper;
// for notification APIs
uint32_t mNotificationFramesReq; // requested number of frames between each
@@ -759,6 +819,13 @@
bool mTimestampRetrogradePositionReported = false; // reduce log spam
bool mTimestampRetrogradeTimeReported = false; // reduce log spam
+ // Format conversion. Maybe needed for adding fast tracks whose format is different from server.
+ audio_config_base_t mServerConfig;
+ size_t mServerFrameSize;
+ size_t mServerSampleSize;
+ std::unique_ptr<uint8_t[]> mFormatConversionBufRaw;
+ Buffer mFormatConversionBuffer;
+
private:
class DeathNotifier : public IBinder::DeathRecipient {
public:
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 8ba23ad..0e9d48c 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,24 +19,30 @@
#include <sys/types.h>
+#include <set>
+#include <vector>
+
+#include <android/content/AttributionSourceState.h>
#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerClient.h>
#include <android/media/BnAudioPolicyServiceClient.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/INativeSpatializerCallback.h>
+#include <android/media/ISpatializer.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
#include <media/AidlConversionUtil.h>
+#include <media/AudioContainers.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
#include <media/AudioProductStrategy.h>
#include <media/AudioVolumeGroup.h>
#include <media/AudioIoDescriptor.h>
#include <media/MicrophoneInfo.h>
-#include <set>
#include <system/audio.h>
#include <system/audio_effect.h>
#include <system/audio_policy.h>
#include <utils/Errors.h>
#include <utils/Mutex.h>
-#include <vector>
using android::content::AttributionSourceState;
@@ -225,6 +231,9 @@
// Indicate JAVA services are ready (scheduling, power management ...)
static status_t systemReady();
+ // Indicate audio policy service is ready
+ static status_t audioPolicyReady();
+
// Returns the number of frames per audio HAL buffer.
// Corresponds to audio_stream->get_buffer_size()/audio_stream_in_frame_size() for input.
// See also getFrameCount().
@@ -318,7 +327,7 @@
static status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
static product_strategy_t getStrategyForStream(audio_stream_type_t stream);
- static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+ static DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
static status_t getDevicesForAttributes(const AudioAttributes &aa,
AudioDeviceTypeAddrVector *devices);
@@ -415,8 +424,8 @@
static status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
- static status_t getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<audio_format_t> *formats);
+ static status_t getHwOffloadFormatsSupportedForBluetoothMedia(
+ audio_devices_t device, std::vector<audio_format_t> *formats);
// numSurroundFormats holds the maximum number of formats and bool value allowed in the array.
// When numSurroundFormats is 0, surroundFormats and surroundFormatsEnabled will not be
@@ -485,8 +494,51 @@
static status_t getDeviceForStrategy(product_strategy_t strategy,
AudioDeviceTypeAddr &device);
+
+ /**
+ * If a spatializer stage effect is present on the platform, this will return an
+ * ISpatializer interface to control this feature.
+ * If no spatializer stage is present, a null interface is returned.
+ * The INativeSpatializerCallback passed must not be null.
+ * Only one ISpatializer interface can exist at a given time. The native audio policy
+ * service will reject the request if an interface was already acquired and previous owner
+ * did not die or call ISpatializer.release().
+ * @param callback in: the callback to receive state updates if the ISpatializer
+ * interface is acquired.
+ * @param spatializer out: the ISpatializer interface made available to control the
+ * platform spatializer
+ * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, PERMISSION_DENIED, BAD_VALUE
+ * in case of error.
+ */
+ static status_t getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+ sp<media::ISpatializer>* spatializer);
+
+ /**
+ * Queries if some kind of spatialization will be performed if the audio playback context
+ * described by the provided arguments is present.
+ * The context is made of:
+ * - The audio attributes describing the playback use case.
+ * - The audio configuration describing the audio format, channels, sampling rate ...
+ * - The devices describing the sink audio device selected for playback.
+ * All arguments are optional and only the specified arguments are used to match against
+ * supported criteria. For instance, supplying no argument will tell if spatialization is
+ * supported or not in general.
+ * @param attr audio attributes describing the playback use case
+ * @param config audio configuration describing the audio format, channels, sampling rate...
+ * @param devices the sink audio device selected for playback
+ * @param canBeSpatialized out: true if spatialization is enabled for this context,
+ * false otherwise
+ * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE
+ * in case of error.
+ */
+ static status_t canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices,
+ bool *canBeSpatialized);
+
+
// A listener for capture state changes.
- class CaptureStateListener : public RefBase {
+ class CaptureStateListener : public virtual RefBase {
public:
// Called whenever capture state changes.
virtual void onStateChanged(bool active) = 0;
@@ -497,11 +549,11 @@
virtual ~CaptureStateListener() = default;
};
- // Regiseters a listener for sound trigger capture state changes.
+ // Registers a listener for sound trigger capture state changes.
// There may only be one such listener registered at any point.
- // The listener onStateChanged() method will be invoked sychronously from
+ // The listener onStateChanged() method will be invoked synchronously from
// this call with the initial value.
- // The listener onServiceDied() method will be invoked sychronously from
+ // The listener onServiceDied() method will be invoked synchronously from
// this call if initial attempt to register failed.
// If the audio policy service cannot be reached, this method will return
// PERMISSION_DENIED and will not invoke the callback, otherwise, it will
@@ -511,7 +563,7 @@
// ----------------------------------------------------------------------------
- class AudioVolumeGroupCallback : public RefBase
+ class AudioVolumeGroupCallback : public virtual RefBase
{
public:
@@ -526,7 +578,7 @@
static status_t addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
static status_t removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
- class AudioPortCallback : public RefBase
+ class AudioPortCallback : public virtual RefBase
{
public:
@@ -542,7 +594,7 @@
static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
- class AudioDeviceCallback : public RefBase
+ class AudioDeviceCallback : public virtual RefBase
{
public:
@@ -564,6 +616,14 @@
static status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+ static status_t getMmapPolicyInfo(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+ static int32_t getAAudioMixerBurstCount();
+
+ static int32_t getAAudioHardwareBurstMinUsec();
+
private:
class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
@@ -640,12 +700,12 @@
binder::Status onRecordingConfigurationUpdate(
int32_t event,
const media::RecordClientInfo& clientInfo,
- const media::AudioConfigBase& clientConfig,
+ const media::audio::common::AudioConfigBase& clientConfig,
const std::vector<media::EffectDescriptor>& clientEffects,
- const media::AudioConfigBase& deviceConfig,
+ const media::audio::common::AudioConfigBase& deviceConfig,
const std::vector<media::EffectDescriptor>& effects,
int32_t patchHandle,
- media::AudioSourceType source) override;
+ media::audio::common::AudioSource source) override;
binder::Status onRoutingUpdated();
private:
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index cb00990..16e10b5 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -28,6 +28,7 @@
#include <utils/threads.h>
#include <android/content/AttributionSourceState.h>
+#include <chrono>
#include <string>
#include "android/media/BnAudioTrackCallback.h"
@@ -145,7 +146,79 @@
* - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
*/
- typedef void (*callback_t)(int event, void* user, void *info);
+ typedef void (*legacy_callback_t)(int event, void* user, void* info);
+ class IAudioTrackCallback : public virtual RefBase {
+ friend AudioTrack;
+ protected:
+ /* Request to write more data to buffer.
+ * This event only occurs for TRANSFER_CALLBACK.
+ * If this event is delivered but the callback handler does not want to write more data,
+ * the handler must ignore the event by returning zero.
+ * This might occur, for example, if the application is waiting for source data or is at
+ * the end of stream.
+ * For data filling, it is preferred that the callback does not block and instead returns
+ * a short count of the amount of data actually delivered.
+ * Parameters:
+ * - buffer: Buffer to fill
+ * Returns:
+ * Amount of data actually written in bytes.
+ */
+ virtual size_t onMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) { return 0; }
+
+ // Buffer underrun occurred. This will not occur for static tracks.
+ virtual void onUnderrun() {}
+
+ /* Sample loop end was reached; playback restarted from loop start if loop count was not 0
+ * for a static track.
+ * Parameters:
+ * - loopsRemaining: Number of loops remaining to be played. -1 if infinite looping.
+ */
+ virtual void onLoopEnd([[maybe_unused]] int32_t loopsRemaining) {}
+
+ /* Playback head is at the specified marker (See setMarkerPosition()).
+ * Parameters:
+ * - onMarker: Marker position in frames
+ */
+ virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+
+ /* Playback head is at a new position (See setPositionUpdatePeriod()).
+ * Parameters:
+ * - newPos: New position in frames
+ */
+ virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+
+ // Playback has completed for a static track.
+ virtual void onBufferEnd() {}
+
+ // IAudioTrack was re-created, either due to re-routing and voluntary invalidation
+ // by mediaserver, or mediaserver crash.
+ virtual void onNewIAudioTrack() {}
+
+ // Sent after all the buffers queued in AF and HW are played back (after stop is called)
+ // for an offloaded track.
+ virtual void onStreamEnd() {}
+
+ /* Delivered periodically and when there's a significant change
+ * in the mapping from frame position to presentation time.
+ * See AudioTimestamp for the information included with event.
+ * TODO not yet implemented.
+ * Parameters:
+ * - timestamp: New frame position and presentation time mapping.
+ */
+ virtual void onNewTimestamp([[maybe_unused]] AudioTimestamp timestamp) {}
+
+ /* Notification that more data can be given by write()
+ * This event only occurs for TRANSFER_SYNC_NOTIF_CALLBACK.
+ * Similar to onMoreData(), return the number of frames actually written
+ * Parameters:
+ * - buffer: Buffer to fill
+ * Returns:
+ * Amount of data actually written in bytes.
+ */
+ virtual size_t onCanWriteMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) {
+ return 0;
+ }
+ };
/* Returns the minimum frame count required for the successful creation of
* an AudioTrack object.
@@ -256,15 +329,34 @@
audio_channel_mask_t channelMask,
size_t frameCount = 0,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = NULL,
- void* user = NULL,
+ const wp<IAudioTrackCallback>& callback = nullptr,
int32_t notificationFrames = 0,
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL,
+ const audio_offload_info_t *offloadInfo = nullptr,
const AttributionSourceState& attributionSource =
AttributionSourceState(),
- const audio_attributes_t* pAttributes = NULL,
+ const audio_attributes_t* pAttributes = nullptr,
+ bool doNotReconnect = false,
+ float maxRequiredSpeed = 1.0f,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+
+ AudioTrack( audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ audio_output_flags_t flags,
+ legacy_callback_t cbf,
+ void* user = nullptr,
+ int32_t notificationFrames = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = nullptr,
+ const AttributionSourceState& attributionSource =
+ AttributionSourceState(),
+ const audio_attributes_t* pAttributes = nullptr,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -280,22 +372,39 @@
* It is recommended to pass a callback function to be notified of playback end by an
* EVENT_UNDERRUN event.
*/
-
AudioTrack( audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
const sp<IMemory>& sharedBuffer,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = NULL,
- void* user = NULL,
+ const wp<IAudioTrackCallback>& callback = nullptr,
int32_t notificationFrames = 0,
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL,
+ const audio_offload_info_t *offloadInfo = nullptr,
const AttributionSourceState& attributionSource =
AttributionSourceState(),
- const audio_attributes_t* pAttributes = NULL,
+ const audio_attributes_t* pAttributes = nullptr,
+ bool doNotReconnect = false,
+ float maxRequiredSpeed = 1.0f);
+
+
+ AudioTrack( audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const sp<IMemory>& sharedBuffer,
+ audio_output_flags_t flags,
+ legacy_callback_t cbf,
+ void* user = nullptr,
+ int32_t notificationFrames = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = nullptr,
+ const AttributionSourceState& attributionSource =
+ AttributionSourceState(),
+ const audio_attributes_t* pAttributes = nullptr,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f);
@@ -333,20 +442,41 @@
audio_channel_mask_t channelMask,
size_t frameCount = 0,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = NULL,
- void* user = NULL,
+ const wp<IAudioTrackCallback>& callback = nullptr,
int32_t notificationFrames = 0,
const sp<IMemory>& sharedBuffer = 0,
bool threadCanCallJava = false,
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL,
+ const audio_offload_info_t *offloadInfo = nullptr,
const AttributionSourceState& attributionSource =
AttributionSourceState(),
- const audio_attributes_t* pAttributes = NULL,
+ const audio_attributes_t* pAttributes = nullptr,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
+ status_t set(audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount,
+ audio_output_flags_t flags,
+ legacy_callback_t callback,
+ void * user = nullptr,
+ int32_t notificationFrames = 0,
+ const sp<IMemory>& sharedBuffer = 0,
+ bool threadCanCallJava = false,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = nullptr,
+ const AttributionSourceState& attributionSource =
+ AttributionSourceState(),
+ const audio_attributes_t* pAttributes = nullptr,
+ bool doNotReconnect = false,
+ float maxRequiredSpeed = 1.0f,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+
// FIXME(b/169889714): Vendor code depends on the old method signature at link time
status_t set(audio_stream_type_t streamType,
uint32_t sampleRate,
@@ -354,17 +484,17 @@
uint32_t channelMask,
size_t frameCount = 0,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = NULL,
- void* user = NULL,
+ legacy_callback_t cbf = nullptr,
+ void* user = nullptr,
int32_t notificationFrames = 0,
const sp<IMemory>& sharedBuffer = 0,
bool threadCanCallJava = false,
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL,
+ const audio_offload_info_t *offloadInfo = nullptr,
uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL,
+ const audio_attributes_t* pAttributes = nullptr,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
@@ -401,6 +531,7 @@
uint32_t channelCount() const { return mChannelCount; }
size_t frameCount() const { return mFrameCount; }
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
/*
* Return the period of the notification callback in frames.
@@ -427,8 +558,7 @@
* less than or equal to the getBufferCapacityInFrames().
* It may also be adjusted slightly for internal reasons.
*
- * Return the final size or a negative error if the track is unitialized
- * or does not support variable sizes.
+ * Return the final size or a negative value (NO_INIT) if the track is uninitialized.
*/
ssize_t setBufferSizeInFrames(size_t size);
@@ -510,6 +640,14 @@
*/
void pause();
+ /* Pause and wait (with timeout) for the audio track to ramp to silence.
+ *
+ * \param timeout is the time limit to wait before returning.
+ * A negative number is treated as 0.
+ * \return true if the track is ramped to silence, false if the timeout occurred.
+ */
+ bool pauseAndWait(const std::chrono::milliseconds& timeout);
+
/* Set volume for this track, mostly used for games' sound effects
* left and right volumes. Levels must be >= 0.0 and <= 1.0.
* This is the older API. New applications should use setVolume(float) when possible.
@@ -1206,9 +1344,8 @@
}
// for client callback handler
- callback_t mCbf; // callback handler for events, or NULL
- void* mUserData;
-
+ wp<IAudioTrackCallback> mCallback; // callback handler for events, or NULL
+ sp<IAudioTrackCallback> mLegacyCallbackWrapper; // wrapper for legacy callback interface
// for notification APIs
// next 2 fields are const after constructor or set()
@@ -1380,6 +1517,9 @@
std::string mMetricsId; // GUARDED_BY(mLock), could change in createTrack_l().
std::string mCallerName; // for example "aaudio"
+ // report error to mediametrics.
+ void reportError(status_t status, const char *event, const char *message) const;
+
private:
class AudioTrackCallback : public media::BnAudioTrackCallback {
public:
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 0e059f7..b4ee4dc 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -37,10 +37,12 @@
#include <string>
#include <vector>
+#include <android/content/AttributionSourceState.h>
#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerService.h>
#include <android/media/BpAudioFlingerService.h>
-#include <android/content/AttributionSourceState.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
#include "android/media/CreateEffectRequest.h"
#include "android/media/CreateEffectResponse.h"
#include "android/media/CreateRecordRequest.h"
@@ -63,7 +65,7 @@
// ----------------------------------------------------------------------------
-class IAudioFlinger : public RefBase {
+class IAudioFlinger : public virtual RefBase {
public:
static constexpr char DEFAULT_SERVICE_NAME[] = "media.audio_flinger";
@@ -166,6 +168,7 @@
sp<IMemory> buffers;
audio_port_handle_t portId;
sp<media::IAudioRecord> audioRecord;
+ audio_config_base_t serverConfig;
ConversionResult<media::CreateRecordResponse> toAidl() const;
static ConversionResult<CreateRecordOutput>
@@ -329,6 +332,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
virtual status_t systemReady() = 0;
+ // Indicate audio policy service is ready
+ virtual status_t audioPolicyReady() = 0;
+
// Returns the number of frames per audio HAL buffer.
virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
@@ -344,6 +350,14 @@
virtual status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
+
+ virtual status_t getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
+
+ virtual int32_t getAAudioMixerBurstCount() = 0;
+
+ virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
};
/**
@@ -432,6 +446,8 @@
status_t setAudioPortConfig(const struct audio_port_config* config) override;
audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) override;
status_t systemReady() override;
+ status_t audioPolicyReady() override;
+
size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
@@ -439,6 +455,14 @@
status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
+ status_t getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) override;
+
+ int32_t getAAudioMixerBurstCount() override;
+
+ int32_t getAAudioHardwareBurstMinUsec() override;
+
private:
const sp<media::IAudioFlingerService> mDelegate;
};
@@ -514,6 +538,7 @@
SET_AUDIO_PORT_CONFIG = media::BnAudioFlingerService::TRANSACTION_setAudioPortConfig,
GET_AUDIO_HW_SYNC_FOR_SESSION = media::BnAudioFlingerService::TRANSACTION_getAudioHwSyncForSession,
SYSTEM_READY = media::BnAudioFlingerService::TRANSACTION_systemReady,
+ AUDIO_POLICY_READY = media::BnAudioFlingerService::TRANSACTION_audioPolicyReady,
FRAME_COUNT_HAL = media::BnAudioFlingerService::TRANSACTION_frameCountHAL,
GET_MICROPHONES = media::BnAudioFlingerService::TRANSACTION_getMicrophones,
SET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_setMasterBalance,
@@ -522,6 +547,9 @@
SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
UPDATE_SECONDARY_OUTPUTS = media::BnAudioFlingerService::TRANSACTION_updateSecondaryOutputs,
+ GET_MMAP_POLICY_INFOS = media::BnAudioFlingerService::TRANSACTION_getMmapPolicyInfos,
+ GET_AAUDIO_MIXER_BURST_COUNT = media::BnAudioFlingerService::TRANSACTION_getAAudioMixerBurstCount,
+ GET_AAUDIO_HARDWARE_BURST_MIN_USEC = media::BnAudioFlingerService::TRANSACTION_getAAudioHardwareBurstMinUsec,
};
/**
@@ -563,7 +591,8 @@
Status createRecord(const media::CreateRecordRequest& request,
media::CreateRecordResponse* _aidl_return) override;
Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
- Status format(int32_t output, media::audio::common::AudioFormat* _aidl_return) override;
+ Status format(int32_t output,
+ media::audio::common::AudioFormatDescription* _aidl_return) override;
Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
Status latency(int32_t output, int32_t* _aidl_return) override;
Status setMasterVolume(float value) override;
@@ -572,12 +601,13 @@
Status masterMute(bool* _aidl_return) override;
Status setMasterBalance(float balance) override;
Status getMasterBalance(float* _aidl_return) override;
- Status setStreamVolume(media::AudioStreamType stream, float value, int32_t output) override;
- Status setStreamMute(media::AudioStreamType stream, bool muted) override;
- Status
- streamVolume(media::AudioStreamType stream, int32_t output, float* _aidl_return) override;
- Status streamMute(media::AudioStreamType stream, bool* _aidl_return) override;
- Status setMode(media::AudioMode mode) override;
+ Status setStreamVolume(media::audio::common::AudioStreamType stream,
+ float value, int32_t output) override;
+ Status setStreamMute(media::audio::common::AudioStreamType stream, bool muted) override;
+ Status streamVolume(media::audio::common::AudioStreamType stream,
+ int32_t output, float* _aidl_return) override;
+ Status streamMute(media::audio::common::AudioStreamType stream, bool* _aidl_return) override;
+ Status setMode(media::audio::common::AudioMode mode) override;
Status setMicMute(bool state) override;
Status getMicMute(bool* _aidl_return) override;
Status setRecordSilenced(int32_t portId, bool silenced) override;
@@ -585,8 +615,10 @@
Status
getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
- Status getInputBufferSize(int32_t sampleRate, media::audio::common::AudioFormat format,
- int32_t channelMask, int64_t* _aidl_return) override;
+ Status getInputBufferSize(int32_t sampleRate,
+ const media::audio::common::AudioFormatDescription& format,
+ const media::audio::common::AudioChannelLayout& channelMask,
+ int64_t* _aidl_return) override;
Status openOutput(const media::OpenOutputRequest& request,
media::OpenOutputResponse* _aidl_return) override;
Status openDuplicateOutput(int32_t output1, int32_t output2, int32_t* _aidl_return) override;
@@ -596,7 +628,7 @@
Status openInput(const media::OpenInputRequest& request,
media::OpenInputResponse* _aidl_return) override;
Status closeInput(int32_t input) override;
- Status invalidateStream(media::AudioStreamType stream) override;
+ Status invalidateStream(media::audio::common::AudioStreamType stream) override;
Status setVoiceVolume(float volume) override;
Status getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) override;
Status getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) override;
@@ -605,7 +637,8 @@
Status releaseAudioSessionId(int32_t audioSession, int32_t pid) override;
Status queryNumberEffects(int32_t* _aidl_return) override;
Status queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) override;
- Status getEffectDescriptor(const media::AudioUuid& effectUUID, const media::AudioUuid& typeUUID,
+ Status getEffectDescriptor(const media::audio::common::AudioUuid& effectUUID,
+ const media::audio::common::AudioUuid& typeUUID,
int32_t preferredTypeFlag,
media::EffectDescriptor* _aidl_return) override;
Status createEffect(const media::CreateEffectRequest& request,
@@ -624,12 +657,18 @@
Status setAudioPortConfig(const media::AudioPortConfig& config) override;
Status getAudioHwSyncForSession(int32_t sessionId, int32_t* _aidl_return) override;
Status systemReady() override;
+ Status audioPolicyReady() override;
Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
Status setAudioHalPids(const std::vector<int32_t>& pids) override;
Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
Status updateSecondaryOutputs(
const std::vector<media::TrackSecondaryOutputInfo>& trackSecondaryOutputInfos) override;
+ Status getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *_aidl_return) override;
+ Status getAAudioMixerBurstCount(int32_t* _aidl_return) override;
+ Status getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) override;
private:
const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudioclient/include/media/PolicyAidlConversion.h b/media/libaudioclient/include/media/PolicyAidlConversion.h
index 873f27a..2296fdb 100644
--- a/media/libaudioclient/include/media/PolicyAidlConversion.h
+++ b/media/libaudioclient/include/media/PolicyAidlConversion.h
@@ -23,10 +23,8 @@
#include <android/media/AudioMix.h>
#include <android/media/AudioMixCallbackFlag.h>
-#include <android/media/AudioMixLatencyClass.h>
#include <android/media/AudioMixRouteFlag.h>
#include <android/media/AudioMixType.h>
-#include <android/media/AudioMode.h>
#include <android/media/AudioOffloadMode.h>
#include <android/media/AudioPolicyForceUse.h>
#include <android/media/AudioPolicyForcedConfig.h>
diff --git a/media/libaudioclient/include/media/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
index a575616..43c0100 100644
--- a/media/libaudioclient/include/media/ToneGenerator.h
+++ b/media/libaudioclient/include/media/ToneGenerator.h
@@ -28,7 +28,7 @@
namespace android {
-class ToneGenerator {
+class ToneGenerator : public AudioTrack::IAudioTrackCallback {
public:
// List of all available tones
@@ -156,6 +156,9 @@
ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false,
std::string opPackageName = {});
+
+ void onFirstRef() override;
+
~ToneGenerator();
bool startTone(tone_type toneType, int durationMs = -1);
@@ -311,6 +314,7 @@
unsigned int mProcessSize; // Size of audio blocks generated at a time by audioCallback() (in PCM frames).
struct timespec mStartTime; // tone start time: needed to guaranty actual tone duration
+ size_t onMoreData(const AudioTrack::Buffer& buffer) override;
bool initAudioTrack();
static void audioCallback(int event, void* user, void *info);
bool prepareWave();
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index def7ca6..891293e 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -9,10 +9,35 @@
cc_defaults {
name: "libaudioclient_tests_defaults",
+ test_suites: ["device-tests"],
cflags: [
"-Wall",
"-Werror",
],
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+}
+
+cc_test {
+ name: "audio_aidl_conversion_tests",
+ defaults: ["libaudioclient_tests_defaults"],
+ srcs: ["audio_aidl_legacy_conversion_tests.cpp"],
+ shared_libs: [
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+ static_libs: [
+ "android.media.audio.common.types-V1-cpp",
+ "audioclient-types-aidl-cpp",
+ "libaudioclient_aidl_conversion",
+ "libstagefright_foundation",
+ ],
}
cc_test {
@@ -30,8 +55,10 @@
cc_test {
name: "test_create_audiotrack",
defaults: ["libaudioclient_tests_defaults"],
- srcs: ["test_create_audiotrack.cpp",
- "test_create_utils.cpp"],
+ srcs: [
+ "test_create_audiotrack.cpp",
+ "test_create_utils.cpp",
+ ],
header_libs: [
"libmedia_headers",
"libmediametrics_headers",
@@ -49,8 +76,10 @@
cc_test {
name: "test_create_audiorecord",
defaults: ["libaudioclient_tests_defaults"],
- srcs: ["test_create_audiorecord.cpp",
- "test_create_utils.cpp"],
+ srcs: [
+ "test_create_audiorecord.cpp",
+ "test_create_utils.cpp",
+ ],
header_libs: [
"libmedia_headers",
"libmediametrics_headers",
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
new file mode 100644
index 0000000..997f62a
--- /dev/null
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include <media/AudioCommonTypes.h>
+#include <media/AidlConversion.h>
+
+using namespace android;
+using namespace android::aidl_utils;
+
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
+
+namespace {
+
+template<typename T> size_t hash(const T& t) {
+ return std::hash<T>{}(t);
+}
+
+AudioChannelLayout make_ACL_None() {
+ return AudioChannelLayout{};
+}
+
+AudioChannelLayout make_ACL_Invalid() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::invalid>(0);
+}
+
+AudioChannelLayout make_ACL_Stereo() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+ AudioChannelLayout::LAYOUT_STEREO);
+}
+
+AudioChannelLayout make_ACL_LayoutArbitrary() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+ // Use channels that exist both for input and output,
+ // but doesn't form a known layout mask.
+ AudioChannelLayout::CHANNEL_FRONT_LEFT |
+ AudioChannelLayout::CHANNEL_FRONT_RIGHT |
+ AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT |
+ AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT);
+}
+
+AudioChannelLayout make_ACL_ChannelIndex2() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(
+ AudioChannelLayout::INDEX_MASK_2);
+}
+
+AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
+ // Use channels 1 and 3.
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(5);
+}
+
+AudioChannelLayout make_ACL_VoiceCall() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(
+ AudioChannelLayout::VOICE_CALL_MONO);
+}
+
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
+ const std::string& connection = "") {
+ AudioDeviceDescription result;
+ result.type = type;
+ result.connection = connection;
+ return result;
+}
+
+AudioDeviceDescription make_ADD_None() {
+ return AudioDeviceDescription{};
+}
+
+AudioDeviceDescription make_ADD_DefaultIn() {
+ return make_AudioDeviceDescription(AudioDeviceType::IN_DEFAULT);
+}
+
+AudioDeviceDescription make_ADD_DefaultOut() {
+ return make_AudioDeviceDescription(AudioDeviceType::OUT_DEFAULT);
+}
+
+AudioDeviceDescription make_ADD_WiredHeadset() {
+ return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_ANALOG());
+}
+
+AudioDeviceDescription make_ADD_BtScoHeadset() {
+ return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_BT_SCO());
+}
+
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+ AudioFormatDescription result;
+ result.type = type;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+ auto result = make_AudioFormatDescription(AudioFormatType::PCM);
+ result.pcm = pcm;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ AudioFormatDescription result;
+ result.encoding = encoding;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
+ const std::string& encoding) {
+ auto result = make_AudioFormatDescription(encoding);
+ result.pcm = transport;
+ return result;
+}
+
+AudioFormatDescription make_AFD_Default() {
+ return AudioFormatDescription{};
+}
+
+AudioFormatDescription make_AFD_Invalid() {
+ return make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID);
+}
+
+AudioFormatDescription make_AFD_Pcm16Bit() {
+ return make_AudioFormatDescription(PcmType::INT_16_BIT);
+}
+
+AudioFormatDescription make_AFD_Bitstream() {
+ return make_AudioFormatDescription("example");
+}
+
+AudioFormatDescription make_AFD_Encap() {
+ return make_AudioFormatDescription(PcmType::INT_16_BIT, "example.encap");
+}
+
+AudioFormatDescription make_AFD_Encap_with_Enc() {
+ auto afd = make_AFD_Encap();
+ afd.encoding += "+example";
+ return afd;
+}
+
+} // namespace
+
+// Verify that two independently constructed ADDs/AFDs have the same hash.
+// This ensures that regardless of whether the ADD/AFD instance originates
+// from, it can be correctly compared to other ADD/AFD instance. Thus,
+// for example, a 16-bit integer format description provided by HAL
+// is identical to the same format description constructed by the framework.
+class HashIdentityTest : public ::testing::Test {
+ public:
+ template<typename T> void verifyHashIdentity(const std::vector<std::function<T()>>& valueGens) {
+ for (size_t i = 0; i < valueGens.size(); ++i) {
+ for (size_t j = 0; j < valueGens.size(); ++j) {
+ if (i == j) {
+ EXPECT_EQ(hash(valueGens[i]()), hash(valueGens[i]())) << i;
+ } else {
+ EXPECT_NE(hash(valueGens[i]()), hash(valueGens[j]())) << i << ", " << j;
+ }
+ }
+ }
+ }
+};
+
+TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
+ verifyHashIdentity<AudioChannelLayout>({
+ make_ACL_None, make_ACL_Invalid, make_ACL_Stereo,
+ make_ACL_LayoutArbitrary, make_ACL_ChannelIndex2,
+ make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
+}
+
+TEST_F(HashIdentityTest, AudioDeviceDescriptionHashIdentity) {
+ verifyHashIdentity<AudioDeviceDescription>({
+ make_ADD_None, make_ADD_DefaultIn, make_ADD_DefaultOut, make_ADD_WiredHeadset,
+ make_ADD_BtScoHeadset});
+}
+
+TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
+ verifyHashIdentity<AudioFormatDescription>({
+ make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
+ make_AFD_Encap, make_AFD_Encap_with_Enc});
+}
+
+using ChannelLayoutParam = std::tuple<AudioChannelLayout, bool /*isInput*/>;
+class AudioChannelLayoutRoundTripTest :
+ public testing::TestWithParam<ChannelLayoutParam> {};
+TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
+ const auto initial = std::get<0>(GetParam());
+ const bool isInput = std::get<1>(GetParam());
+ auto conv = aidl2legacy_AudioChannelLayout_audio_channel_mask_t(initial, isInput);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(conv.value(), isInput);
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
+ AudioChannelLayoutRoundTripTest,
+ testing::Combine(
+ testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
+ make_ACL_LayoutArbitrary(), make_ACL_ChannelIndex2(),
+ make_ACL_ChannelIndexArbitrary()),
+ testing::Values(false, true)));
+INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
+ AudioChannelLayoutRoundTripTest,
+ // In legacy constants the voice call is only defined for input.
+ testing::Combine(testing::Values(make_ACL_VoiceCall()), testing::Values(true)));
+
+using ChannelLayoutEdgeCaseParam = std::tuple<int /*legacy*/, bool /*isInput*/, bool /*isValid*/>;
+class AudioChannelLayoutEdgeCaseTest :
+ public testing::TestWithParam<ChannelLayoutEdgeCaseParam> {};
+TEST_P(AudioChannelLayoutEdgeCaseTest, Legacy2Aidl) {
+ const audio_channel_mask_t legacy = static_cast<audio_channel_mask_t>(std::get<0>(GetParam()));
+ const bool isInput = std::get<1>(GetParam());
+ const bool isValid = std::get<2>(GetParam());
+ auto conv = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy, isInput);
+ EXPECT_EQ(isValid, conv.ok());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutEdgeCase,
+ AudioChannelLayoutEdgeCaseTest,
+ testing::Values(
+ // Valid legacy input masks.
+ std::make_tuple(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO, true, true),
+ std::make_tuple(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO, true, true),
+ std::make_tuple(AUDIO_CHANNEL_IN_VOICE_CALL_MONO, true, true),
+ // Valid legacy output masks.
+ std::make_tuple(
+ // This has the same numerical representation as Mask 'A' below
+ AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+ AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT, false, true),
+ std::make_tuple(
+ // This has the same numerical representation as Mask 'B' below
+ AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+ AUDIO_CHANNEL_OUT_TOP_BACK_LEFT, false, true),
+ // Invalid legacy input masks.
+ std::make_tuple(AUDIO_CHANNEL_IN_6, true, false),
+ std::make_tuple(
+ AUDIO_CHANNEL_IN_6 | AUDIO_CHANNEL_IN_FRONT_PROCESSED, true, false),
+ std::make_tuple(
+ AUDIO_CHANNEL_IN_PRESSURE | AUDIO_CHANNEL_IN_X_AXIS |
+ AUDIO_CHANNEL_IN_Y_AXIS | AUDIO_CHANNEL_IN_Z_AXIS, true, false),
+ std::make_tuple( // Mask 'A'
+ AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_UPLINK, true, false),
+ std::make_tuple( // Mask 'B'
+ AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_DNLINK, true, false)));
+
+class AudioDeviceDescriptionRoundTripTest :
+ public testing::TestWithParam<AudioDeviceDescription> {};
+TEST_P(AudioDeviceDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+ const auto initial = GetParam();
+ auto conv = aidl2legacy_AudioDeviceDescription_audio_devices_t(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_devices_t_AudioDeviceDescription(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioDeviceDescriptionRoundTrip,
+ AudioDeviceDescriptionRoundTripTest,
+ testing::Values(AudioDeviceDescription{}, make_ADD_DefaultIn(),
+ make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
+
+class AudioFormatDescriptionRoundTripTest :
+ public testing::TestWithParam<AudioFormatDescription> {};
+TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+ const auto initial = GetParam();
+ auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_format_t_AudioFormatDescription(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
+ AudioFormatDescriptionRoundTripTest,
+ testing::Values(make_AFD_Invalid(), AudioFormatDescription{}, make_AFD_Pcm16Bit()));
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 3bef55b..727b86f 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -24,9 +24,11 @@
"libmedia_helper_headers",
],
static_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
],
export_static_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
],
host_supported: true,
@@ -52,6 +54,7 @@
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
"libaudioutils",
@@ -63,6 +66,7 @@
],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
],
diff --git a/media/libaudiofoundation/AudioContainers.cpp b/media/libaudiofoundation/AudioContainers.cpp
index 31257d5..117d188 100644
--- a/media/libaudiofoundation/AudioContainers.cpp
+++ b/media/libaudiofoundation/AudioContainers.cpp
@@ -63,48 +63,41 @@
return audioDeviceInAllUsbSet;
}
-bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+const DeviceTypeSet& getAudioDeviceOutAllBleSet() {
+ static const DeviceTypeSet audioDeviceOutAllBleSet = DeviceTypeSet(
+ std::begin(AUDIO_DEVICE_OUT_ALL_BLE_ARRAY),
+ std::end(AUDIO_DEVICE_OUT_ALL_BLE_ARRAY));
+ return audioDeviceOutAllBleSet;
+}
+
+std::string deviceTypesToString(const DeviceTypeSet &deviceTypes) {
if (deviceTypes.empty()) {
- str = "Empty device types";
- return true;
+ return "Empty device types";
}
- bool ret = true;
- for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
- std::string deviceTypeStr;
- ret = audio_is_output_device(*it) ?
- OutputDeviceConverter::toString(*it, deviceTypeStr) :
- InputDeviceConverter::toString(*it, deviceTypeStr);
- if (!ret) {
- break;
+ std::stringstream ss;
+ for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+ if (it != deviceTypes.begin()) {
+ ss << ", ";
}
- str.append(deviceTypeStr);
- if (++it != deviceTypes.end()) {
- str.append(" , ");
+ const char* strType = audio_device_to_string(*it);
+ if (strlen(strType) != 0) {
+ ss << strType;
+ } else {
+ ss << "unknown type:0x" << std::hex << *it;
}
}
- if (!ret) {
- str = "Unknown values";
- }
- return ret;
+ return ss.str();
}
std::string dumpDeviceTypes(const DeviceTypeSet &deviceTypes) {
- std::string ret;
- for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
- std::stringstream ss;
- ss << "0x" << std::hex << (*it);
- ret.append(ss.str());
- if (++it != deviceTypes.end()) {
- ret.append(" , ");
+ std::stringstream ss;
+ for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+ if (it != deviceTypes.begin()) {
+ ss << ", ";
}
+ ss << "0x" << std::hex << (*it);
}
- return ret;
-}
-
-std::string toString(const DeviceTypeSet& deviceTypes) {
- std::string ret;
- deviceTypesToString(deviceTypes, ret);
- return ret;
+ return ss.str();
}
} // namespace android
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index c5d7da8..4a7e956 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -25,6 +25,9 @@
namespace android {
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+
namespace {
static const std::string SUPPRESSED = "SUPPRESSED";
@@ -97,10 +100,13 @@
std::string AudioDeviceTypeAddr::toString(bool includeSensitiveInfo) const {
std::stringstream sstream;
- sstream << "type:0x" << std::hex << mType;
+ sstream << audio_device_to_string(mType);
+ if (sstream.str().empty()) {
+ sstream << "unknown type:0x" << std::hex << mType;
+ }
// IP and MAC address are sensitive information. The sensitive information will be suppressed
// is `includeSensitiveInfo` is false.
- sstream << ",@:"
+ sstream << ", @:"
<< (!includeSensitiveInfo && mIsAddressSensitive ? SUPPRESSED : mAddress);
return sstream.str();
}
@@ -157,17 +163,16 @@
}
ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl) {
- audio_devices_t type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
- return AudioDeviceTypeAddr(type, aidl.address);
+aidl2legacy_AudioDeviceTypeAddress(const AudioDevice& aidl) {
+ audio_devices_t type;
+ std::string address;
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(aidl, &type, &address));
+ return AudioDeviceTypeAddr(type, address);
}
-ConversionResult<media::AudioDevice>
+ConversionResult<AudioDevice>
legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy) {
- media::AudioDevice aidl;
- aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mType));
- aidl.address = legacy.getAddress();
- return aidl;
+ return legacy2aidl_audio_device_AudioDevice(legacy.mType, legacy.getAddress());
}
} // namespace android
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 1dee938..47e0edb 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -24,22 +24,18 @@
#define ALOGVV(a...) do { } while(0)
#endif
+#include <math.h>
+
#include <algorithm>
#include <android-base/stringprintf.h>
#include <media/AudioGain.h>
#include <utils/Log.h>
-#include <math.h>
-
namespace android {
-AudioGain::AudioGain(int index, bool useInChannelMask)
-{
- mIndex = index;
- mUseInChannelMask = useInChannelMask;
- memset(&mGain, 0, sizeof(struct audio_gain));
-}
+AudioGain::AudioGain(int index, bool isInput)
+ : mIndex(index), mIsInput(isInput) {}
void AudioGain::getDefaultConfig(struct audio_gain_config *config)
{
@@ -49,12 +45,9 @@
if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
config->values[0] = mGain.default_value;
} else {
- uint32_t numValues;
- if (mUseInChannelMask) {
- numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
- } else {
- numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
- }
+ const uint32_t numValues = mIsInput ?
+ audio_channel_count_from_in_mask(mGain.channel_mask) :
+ audio_channel_count_from_out_mask(mGain.channel_mask);
for (size_t i = 0; i < numValues; i++) {
config->values[i] = mGain.default_value;
}
@@ -78,12 +71,9 @@
if ((config->channel_mask & ~mGain.channel_mask) != 0) {
return BAD_VALUE;
}
- uint32_t numValues;
- if (mUseInChannelMask) {
- numValues = audio_channel_count_from_in_mask(config->channel_mask);
- } else {
- numValues = audio_channel_count_from_out_mask(config->channel_mask);
- }
+ const uint32_t numValues = mIsInput ?
+ audio_channel_count_from_in_mask(config->channel_mask) :
+ audio_channel_count_from_out_mask(config->channel_mask);
for (size_t i = 0; i < numValues; i++) {
if ((config->values[i] < mGain.min_value) ||
(config->values[i] > mGain.max_value)) {
@@ -116,7 +106,7 @@
bool AudioGain::equals(const sp<AudioGain>& other) const
{
return other != nullptr &&
- mUseInChannelMask == other->mUseInChannelMask &&
+ mIsInput == other->mIsInput &&
mUseForVolume == other->mUseForVolume &&
// Compare audio gain
mGain.mode == other->mGain.mode &&
@@ -129,51 +119,24 @@
mGain.max_ramp_ms == other->mGain.max_ramp_ms;
}
-status_t AudioGain::writeToParcel(android::Parcel *parcel) const {
- media::AudioGain parcelable;
- return writeToParcelable(&parcelable)
- ?: parcelable.writeToParcel(parcel);
+ConversionResult<AudioGain::Aidl> AudioGain::toParcelable() const {
+ media::audio::common::AudioGain aidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_gain_AudioGain(mGain, mIsInput));
+ aidl.useForVolume = mUseForVolume;
+ media::AudioGainSys aidlSys;
+ aidlSys.index = VALUE_OR_RETURN(convertIntegral<int32_t>(mIndex));
+ aidlSys.isInput = mIsInput;
+ return std::make_pair(aidl, aidlSys);
}
-status_t AudioGain::writeToParcelable(media::AudioGain* parcelable) const {
- parcelable->index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mIndex));
- parcelable->useInChannelMask = mUseInChannelMask;
- parcelable->useForVolume = mUseForVolume;
- parcelable->mode = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
- parcelable->channelMask = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
- parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
- parcelable->maxValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_value));
- parcelable->defaultValue = VALUE_OR_RETURN_STATUS(
- convertIntegral<int32_t>(mGain.default_value));
- parcelable->stepValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.step_value));
- parcelable->minRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_ramp_ms));
- parcelable->maxRampMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_ramp_ms));
- return OK;
-}
-
-status_t AudioGain::readFromParcel(const android::Parcel *parcel) {
- media::AudioGain parcelable;
- return parcelable.readFromParcel(parcel)
- ?: readFromParcelable(parcelable);
-}
-
-status_t AudioGain::readFromParcelable(const media::AudioGain& parcelable) {
- mIndex = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.index));
- mUseInChannelMask = parcelable.useInChannelMask;
- mUseForVolume = parcelable.useForVolume;
- mGain.mode = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.mode));
- mGain.channel_mask = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
- mGain.min_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.minValue));
- mGain.max_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.maxValue));
- mGain.default_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.defaultValue));
- mGain.step_value = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.stepValue));
- mGain.min_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.minRampMs));
- mGain.max_ramp_ms = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.maxRampMs));
- return OK;
+ConversionResult<sp<AudioGain>> AudioGain::fromParcelable(const AudioGain::Aidl& aidl) {
+ const media::audio::common::AudioGain& hal = aidl.first;
+ const media::AudioGainSys& sys = aidl.second;
+ auto index = VALUE_OR_RETURN(convertIntegral<int>(sys.index));
+ sp<AudioGain> legacy = sp<AudioGain>::make(index, sys.isInput);
+ legacy->mGain = VALUE_OR_RETURN(aidl2legacy_AudioGain_audio_gain(hal, sys.isInput));
+ legacy->mUseForVolume = hal.useForVolume;
+ return legacy;
}
bool AudioGains::equals(const AudioGains &other) const
@@ -184,59 +147,30 @@
});
}
-status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
- status_t status = NO_ERROR;
- if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
- for (const auto &audioGain : *this) {
- if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
- break;
- }
- }
- return status;
-}
-
-status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
- status_t status = NO_ERROR;
- this->clear();
- if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
- for (size_t i = 0; i < this->size(); i++) {
- this->at(i) = new AudioGain(0, false);
- if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
- this->clear();
- break;
- }
- }
- return status;
-}
-
ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl) {
- sp<AudioGain> legacy = new AudioGain(0, false);
- status_t status = legacy->readFromParcelable(aidl);
- if (status != OK) {
- return base::unexpected(status);
- }
- return legacy;
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl) {
+ return AudioGain::fromParcelable(aidl);
}
-ConversionResult<media::AudioGain>
+ConversionResult<AudioGain::Aidl>
legacy2aidl_AudioGain(const sp<AudioGain>& legacy) {
- media::AudioGain aidl;
- status_t status = legacy->writeToParcelable(&aidl);
- if (status != OK) {
- return base::unexpected(status);
- }
- return aidl;
+ return legacy->toParcelable();
}
ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl) {
- return convertContainer<AudioGains>(aidl, aidl2legacy_AudioGain);
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl) {
+ return convertContainers<AudioGains>(aidl.first, aidl.second,
+ [](const media::audio::common::AudioGain& g,
+ const media::AudioGainSys& gs) {
+ return aidl2legacy_AudioGain(std::make_pair(g, gs));
+ });
}
-ConversionResult<std::vector<media::AudioGain>>
+ConversionResult<AudioGains::Aidl>
legacy2aidl_AudioGains(const AudioGains& legacy) {
- return convertContainer<std::vector<media::AudioGain>>(legacy, legacy2aidl_AudioGain);
+ return convertContainerSplit<
+ std::vector<media::audio::common::AudioGain>,
+ std::vector<media::AudioGainSys>>(legacy, legacy2aidl_AudioGain);
}
} // namespace android
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index fafabd9..4513323 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -18,13 +18,28 @@
#include <algorithm>
#include <utility>
-#include <android/media/ExtraAudioDescriptor.h>
#include <android-base/stringprintf.h>
#include <media/AudioPort.h>
#include <utils/Log.h>
namespace android {
+void AudioPort::setFlags(uint32_t flags)
+{
+ // force direct flag if offload flag is set: offloading implies a direct output stream
+ // and all common behaviors are driven by checking only the direct flag
+ // this should normally be set appropriately in the policy configuration file
+ if (mRole == AUDIO_PORT_ROLE_SOURCE &&
+ (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ flags |= AUDIO_OUTPUT_FLAG_DIRECT;
+ }
+ if (useInputChannelMask()) {
+ mFlags.input = static_cast<audio_input_flags_t>(flags);
+ } else {
+ mFlags.output = static_cast<audio_output_flags_t>(flags);
+ }
+}
+
void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
{
for (const auto& profileToImport : port->mProfiles) {
@@ -147,9 +162,16 @@
}
}
-void AudioPort::dump(std::string *dst, int spaces, bool verbose) const {
+void AudioPort::dump(std::string *dst, int spaces, const char* extraInfo, bool verbose) const {
if (!mName.empty()) {
- dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+ dst->append(base::StringPrintf("\"%s\"%s", mName.c_str(),
+ extraInfo != nullptr ? "; " : ""));
+ }
+ if (extraInfo != nullptr) {
+ dst->append(base::StringPrintf("%s", extraInfo));
+ }
+ if (!mName.empty() || extraInfo != nullptr) {
+ dst->append("\n");
}
if (verbose) {
std::string profilesStr;
@@ -196,39 +218,59 @@
mType == other->getType() &&
mRole == other->getRole() &&
mProfiles.equals(other->getAudioProfiles()) &&
+ getFlags() == other->getFlags() &&
mExtraAudioDescriptors == other->getExtraAudioDescriptors();
}
-status_t AudioPort::writeToParcel(Parcel *parcel) const
-{
- media::AudioPort parcelable;
- return writeToParcelable(&parcelable)
- ?: parcelable.writeToParcel(parcel);
-}
-
status_t AudioPort::writeToParcelable(media::AudioPort* parcelable) const {
- parcelable->name = mName;
- parcelable->type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_type_t_AudioPortType(mType));
- parcelable->role = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
- parcelable->profiles = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioProfileVector(mProfiles));
- parcelable->extraAudioDescriptors = mExtraAudioDescriptors;
- parcelable->gains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+ parcelable->hal.name = mName;
+ parcelable->sys.type = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_type_t_AudioPortType(mType));
+ parcelable->sys.role = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
+ auto aidlProfiles = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_AudioProfileVector(mProfiles, useInputChannelMask()));
+ parcelable->hal.profiles = aidlProfiles.first;
+ parcelable->sys.profiles = aidlProfiles.second;
+ parcelable->hal.flags = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_io_flags_AudioIoFlags(mFlags, useInputChannelMask()));
+ parcelable->hal.extraAudioDescriptors = mExtraAudioDescriptors;
+ auto aidlGains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
+ parcelable->hal.gains = aidlGains.first;
+ parcelable->sys.gains = aidlGains.second;
+ if (mType == AUDIO_PORT_TYPE_MIX) {
+ media::audio::common::AudioPortMixExt mixExt{};
+ mixExt.maxOpenStreamCount = maxOpenCount;
+ mixExt.maxActiveStreamCount = maxActiveCount;
+ mixExt.recommendedMuteDurationMs = recommendedMuteDurationMs;
+ parcelable->hal.ext = media::audio::common::AudioPortExt::make<
+ media::audio::common::AudioPortExt::mix>(mixExt);
+ }
return OK;
}
-status_t AudioPort::readFromParcel(const Parcel *parcel) {
- media::AudioPort parcelable;
- return parcelable.readFromParcel(parcel)
- ?: readFromParcelable(parcelable);
-}
-
status_t AudioPort::readFromParcelable(const media::AudioPort& parcelable) {
- mName = parcelable.name;
- mType = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortType_audio_port_type_t(parcelable.type));
- mRole = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.role));
- mProfiles = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioProfileVector(parcelable.profiles));
- mExtraAudioDescriptors = parcelable.extraAudioDescriptors;
- mGains = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioGains(parcelable.gains));
+ mName = parcelable.hal.name;
+ mType = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioPortType_audio_port_type_t(parcelable.sys.type));
+ mRole = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.sys.role));
+ mProfiles = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioProfileVector(
+ std::make_pair(parcelable.hal.profiles, parcelable.sys.profiles),
+ useInputChannelMask()));
+ mFlags = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioIoFlags_audio_io_flags(parcelable.hal.flags, useInputChannelMask()));
+ mExtraAudioDescriptors = parcelable.hal.extraAudioDescriptors;
+ mGains = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioGains(std::make_pair(parcelable.hal.gains, parcelable.sys.gains)));
+ if (mType == AUDIO_PORT_TYPE_MIX) {
+ const media::audio::common::AudioPortMixExt& mixExt =
+ parcelable.hal.ext.get<media::audio::common::AudioPortExt::mix>();
+ maxOpenCount = mixExt.maxOpenStreamCount;
+ maxActiveCount = mixExt.maxActiveStreamCount;
+ recommendedMuteDurationMs = mixExt.recommendedMuteDurationMs;
+ }
return OK;
}
@@ -250,6 +292,9 @@
if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
mGain = config->gain;
}
+ if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ mFlags = config->flags;
+ }
return NO_ERROR;
}
@@ -303,6 +348,9 @@
} else {
dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
}
+
+ updateField(mFlags, &audio_port_config::flags,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
}
bool AudioPortConfig::hasGainController(bool canUseForVolume) const
@@ -315,12 +363,14 @@
: audioport->getGains().size() > 0;
}
-bool AudioPortConfig::equals(const sp<AudioPortConfig> &other) const
+bool AudioPortConfig::equals(const sp<AudioPortConfig> &other, bool isInput) const
{
return other != nullptr &&
mSamplingRate == other->getSamplingRate() &&
mFormat == other->getFormat() &&
mChannelMask == other->getChannelMask() &&
+ (isInput ? mFlags.input == other->getFlags().input :
+ mFlags.output == other->getFlags().output )&&
// Compare audio gain config
mGain.index == other->mGain.index &&
mGain.mode == other->mGain.mode &&
@@ -330,54 +380,47 @@
mGain.ramp_duration_ms == other->mGain.ramp_duration_ms;
}
-status_t AudioPortConfig::writeToParcel(Parcel *parcel) const {
- media::AudioPortConfig parcelable;
- return writeToParcelable(&parcelable)
- ?: parcelable.writeToParcel(parcel);
-}
-
-status_t AudioPortConfig::writeToParcelable(media::AudioPortConfig* parcelable) const {
- parcelable->sampleRate = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
- parcelable->format = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+status_t AudioPortConfig::writeToParcelable(
+ media::audio::common::AudioPortConfig* parcelable, bool isInput) const {
+ media::audio::common::Int aidl_sampleRate;
+ aidl_sampleRate.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
+ parcelable->sampleRate = aidl_sampleRate;
+ parcelable->format = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
parcelable->channelMask = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_channel_mask_t_int32_t(mChannelMask));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(mChannelMask, isInput));
parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
- parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
- parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
- parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
- parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
- convertIntegral<int32_t>(mGain.ramp_duration_ms));
- parcelable->gain.values = VALUE_OR_RETURN_STATUS(convertContainer<std::vector<int32_t>>(
- mGain.values, convertIntegral<int32_t, int>));
+ media::audio::common::AudioGainConfig aidl_gain = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_gain_config_AudioGainConfig(mGain, isInput));
+ parcelable->gain = aidl_gain;
+ parcelable->flags = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_io_flags_AudioIoFlags(mFlags, isInput));
return OK;
}
-status_t AudioPortConfig::readFromParcel(const Parcel *parcel) {
- media::AudioPortConfig parcelable;
- return parcelable.readFromParcel(parcel)
- ?: readFromParcelable(parcelable);
-}
-
-status_t AudioPortConfig::readFromParcelable(const media::AudioPortConfig& parcelable) {
- mSamplingRate = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.sampleRate));
- mFormat = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
- mChannelMask = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
- mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
- mGain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.index));
- mGain.mode = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.gain.mode));
- mGain.channel_mask = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_channel_mask_t(parcelable.gain.channelMask));
- mGain.ramp_duration_ms = VALUE_OR_RETURN_STATUS(
- convertIntegral<unsigned int>(parcelable.gain.rampDurationMs));
- if (parcelable.gain.values.size() > std::size(mGain.values)) {
- return BAD_VALUE;
+status_t AudioPortConfig::readFromParcelable(
+ const media::audio::common::AudioPortConfig& parcelable, bool isInput) {
+ if (parcelable.sampleRate.has_value()) {
+ mSamplingRate = VALUE_OR_RETURN_STATUS(
+ convertIntegral<unsigned int>(parcelable.sampleRate.value().value));
}
- for (size_t i = 0; i < parcelable.gain.values.size(); ++i) {
- mGain.values[i] = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.values[i]));
+ if (parcelable.format.has_value()) {
+ mFormat = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format.value()));
+ }
+ if (parcelable.channelMask.has_value()) {
+ mChannelMask = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ parcelable.channelMask.value(), isInput));
+ }
+ mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
+ if (parcelable.gain.has_value()) {
+ mGain = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioGainConfig_audio_gain_config(parcelable.gain.value(), isInput));
+ }
+ if (parcelable.flags.has_value()) {
+ mFlags = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioIoFlags_audio_io_flags(parcelable.flags.value(), isInput));
}
return OK;
}
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 8ac3f73..9a67bb7 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -27,6 +27,8 @@
namespace android {
+using media::audio::common::AudioChannelLayout;
+
bool operator == (const AudioProfile &left, const AudioProfile &right)
{
return (left.getFormat() == right.getFormat()) &&
@@ -97,18 +99,14 @@
void AudioProfile::dump(std::string *dst, int spaces) const
{
- dst->append(base::StringPrintf("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
+ dst->append(base::StringPrintf("\"%s\"; ", mName.c_str()));
+ dst->append(base::StringPrintf("%s%s%s%s", mIsDynamicFormat ? "[dynamic format]" : "",
mIsDynamicChannels ? "[dynamic channels]" : "",
- mIsDynamicRate ? "[dynamic rates]" : ""));
- if (mName.length() != 0) {
- dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
- }
- std::string formatLiteral;
- if (FormatConverter::toString(mFormat, formatLiteral)) {
- dst->append(base::StringPrintf("%*s- format: %s\n", spaces, "", formatLiteral.c_str()));
- }
+ mIsDynamicRate ? "[dynamic rates]" : "", isDynamic() ? "; " : ""));
+ dst->append(base::StringPrintf("%s (0x%x)\n", audio_format_to_string(mFormat), mFormat));
+
if (!mSamplingRates.empty()) {
- dst->append(base::StringPrintf("%*s- sampling rates:", spaces, ""));
+ dst->append(base::StringPrintf("%*ssampling rates: ", spaces, ""));
for (auto it = mSamplingRates.begin(); it != mSamplingRates.end();) {
dst->append(base::StringPrintf("%d", *it));
dst->append(++it == mSamplingRates.end() ? "" : ", ");
@@ -117,7 +115,7 @@
}
if (!mChannelMasks.empty()) {
- dst->append(base::StringPrintf("%*s- channel masks:", spaces, ""));
+ dst->append(base::StringPrintf("%*schannel masks: ", spaces, ""));
for (auto it = mChannelMasks.begin(); it != mChannelMasks.end();) {
dst->append(base::StringPrintf("0x%04x", *it));
dst->append(++it == mChannelMasks.end() ? "" : ", ");
@@ -126,7 +124,7 @@
}
dst->append(base::StringPrintf(
- "%*s- encapsulation type: %#x\n", spaces, "", mEncapsulationType));
+ "%*s%s\n", spaces, "", audio_encapsulation_type_to_string(mEncapsulationType)));
}
bool AudioProfile::equals(const sp<AudioProfile>& other) const
@@ -154,67 +152,67 @@
return *this;
}
-status_t AudioProfile::writeToParcel(Parcel *parcel) const {
- media::AudioProfile parcelable = VALUE_OR_RETURN_STATUS(toParcelable());
- return parcelable.writeToParcel(parcel);
- }
-
-ConversionResult<media::AudioProfile>
-AudioProfile::toParcelable() const {
- media::AudioProfile parcelable;
+ConversionResult<AudioProfile::Aidl>
+AudioProfile::toParcelable(bool isInput) const {
+ media::audio::common::AudioProfile parcelable;
parcelable.name = mName;
- parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+ parcelable.format = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
+ // Note: legacy 'audio_profile' imposes a limit on the number of
+ // channel masks and sampling rates. That's why it's not used here
+ // and conversions are performed directly on the fields instead
+ // of using 'legacy2aidl_audio_profile_AudioProfile' from AidlConversion.
parcelable.channelMasks = VALUE_OR_RETURN(
- convertContainer<std::vector<int32_t>>(mChannelMasks,
- legacy2aidl_audio_channel_mask_t_int32_t));
- parcelable.samplingRates = VALUE_OR_RETURN(
+ convertContainer<std::vector<AudioChannelLayout>>(
+ mChannelMasks,
+ [isInput](audio_channel_mask_t m) {
+ return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+ }));
+ parcelable.sampleRates = VALUE_OR_RETURN(
convertContainer<std::vector<int32_t>>(mSamplingRates,
convertIntegral<int32_t, uint32_t>));
- parcelable.isDynamicFormat = mIsDynamicFormat;
- parcelable.isDynamicChannels = mIsDynamicChannels;
- parcelable.isDynamicRate = mIsDynamicRate;
parcelable.encapsulationType = VALUE_OR_RETURN(
legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(mEncapsulationType));
- return parcelable;
+ media::AudioProfileSys parcelableSys;
+ parcelableSys.isDynamicFormat = mIsDynamicFormat;
+ parcelableSys.isDynamicChannels = mIsDynamicChannels;
+ parcelableSys.isDynamicRate = mIsDynamicRate;
+ return std::make_pair(parcelable, parcelableSys);
}
-status_t AudioProfile::readFromParcel(const Parcel *parcel) {
- media::AudioProfile parcelable;
- if (status_t status = parcelable.readFromParcel(parcel); status != OK) {
- return status;
- }
- *this = *VALUE_OR_RETURN_STATUS(fromParcelable(parcelable));
- return OK;
-}
-
-ConversionResult<sp<AudioProfile>>
-AudioProfile::fromParcelable(const media::AudioProfile& parcelable) {
+ConversionResult<sp<AudioProfile>> AudioProfile::fromParcelable(
+ const AudioProfile::Aidl& aidl, bool isInput) {
sp<AudioProfile> legacy = new AudioProfile();
+ const auto& parcelable = aidl.first;
legacy->mName = parcelable.name;
- legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
+ legacy->mFormat = VALUE_OR_RETURN(
+ aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
legacy->mChannelMasks = VALUE_OR_RETURN(
convertContainer<ChannelMaskSet>(parcelable.channelMasks,
- aidl2legacy_int32_t_audio_channel_mask_t));
+ [isInput](const AudioChannelLayout& l) {
+ return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+ }));
legacy->mSamplingRates = VALUE_OR_RETURN(
- convertContainer<SampleRateSet>(parcelable.samplingRates,
+ convertContainer<SampleRateSet>(parcelable.sampleRates,
convertIntegral<uint32_t, int32_t>));
- legacy->mIsDynamicFormat = parcelable.isDynamicFormat;
- legacy->mIsDynamicChannels = parcelable.isDynamicChannels;
- legacy->mIsDynamicRate = parcelable.isDynamicRate;
legacy->mEncapsulationType = VALUE_OR_RETURN(
aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
parcelable.encapsulationType));
+ const auto& parcelableSys = aidl.second;
+ legacy->mIsDynamicFormat = parcelableSys.isDynamicFormat;
+ legacy->mIsDynamicChannels = parcelableSys.isDynamicChannels;
+ legacy->mIsDynamicRate = parcelableSys.isDynamicRate;
return legacy;
}
ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl) {
- return AudioProfile::fromParcelable(aidl);
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput) {
+ return AudioProfile::fromParcelable(aidl, isInput);
}
-ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy) {
- return legacy->toParcelable();
+ConversionResult<AudioProfile::Aidl>
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput) {
+ return legacy->toParcelable(isInput);
}
ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
@@ -319,42 +317,16 @@
void AudioProfileVector::dump(std::string *dst, int spaces) const
{
- dst->append(base::StringPrintf("%*s- Profiles:\n", spaces, ""));
+ dst->append(base::StringPrintf("%*s- Profiles (%zu):\n", spaces - 2, "", size()));
for (size_t i = 0; i < size(); i++) {
- dst->append(base::StringPrintf("%*sProfile %zu:", spaces + 4, "", i));
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->append(prefix);
std::string profileStr;
- at(i)->dump(&profileStr, spaces + 8);
+ at(i)->dump(&profileStr, prefix.size());
dst->append(profileStr);
}
}
-status_t AudioProfileVector::writeToParcel(Parcel *parcel) const
-{
- status_t status = NO_ERROR;
- if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
- for (const auto &audioProfile : *this) {
- if ((status = parcel->writeParcelable(*audioProfile)) != NO_ERROR) {
- break;
- }
- }
- return status;
-}
-
-status_t AudioProfileVector::readFromParcel(const Parcel *parcel)
-{
- status_t status = NO_ERROR;
- this->clear();
- if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
- for (size_t i = 0; i < this->size(); ++i) {
- this->at(i) = new AudioProfile(AUDIO_FORMAT_DEFAULT, AUDIO_CHANNEL_NONE, 0 /*sampleRate*/);
- if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
- this->clear();
- break;
- }
- }
- return status;
-}
-
bool AudioProfileVector::equals(const AudioProfileVector& other) const
{
return std::equal(begin(), end(), other.begin(), other.end(),
@@ -364,13 +336,22 @@
}
ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl) {
- return convertContainer<AudioProfileVector>(aidl, aidl2legacy_AudioProfile);
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput) {
+ return convertContainers<AudioProfileVector>(aidl.first, aidl.second,
+ [isInput](const media::audio::common::AudioProfile& p,
+ const media::AudioProfileSys& ps) {
+ return aidl2legacy_AudioProfile(std::make_pair(p, ps), isInput);
+ });
}
-ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy) {
- return convertContainer<std::vector<media::AudioProfile>>(legacy, legacy2aidl_AudioProfile);
+ConversionResult<AudioProfileVector::Aidl>
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput) {
+ return convertContainerSplit<
+ std::vector<media::audio::common::AudioProfile>,
+ std::vector<media::AudioProfileSys>>(legacy,
+ [isInput](const sp<AudioProfile>& p) {
+ return legacy2aidl_AudioProfile(p, isInput);
+ });
}
AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 5cfea81..5ffbffc 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -30,16 +30,20 @@
{
}
-DeviceDescriptorBase::DeviceDescriptorBase(audio_devices_t type, const std::string& address) :
- DeviceDescriptorBase(AudioDeviceTypeAddr(type, address))
+DeviceDescriptorBase::DeviceDescriptorBase(
+ audio_devices_t type, const std::string& address,
+ const FormatVector &encodedFormats) :
+ DeviceDescriptorBase(AudioDeviceTypeAddr(type, address), encodedFormats)
{
}
-DeviceDescriptorBase::DeviceDescriptorBase(const AudioDeviceTypeAddr &deviceTypeAddr) :
+DeviceDescriptorBase::DeviceDescriptorBase(
+ const AudioDeviceTypeAddr &deviceTypeAddr, const FormatVector &encodedFormats) :
AudioPort("", AUDIO_PORT_TYPE_DEVICE,
audio_is_output_device(deviceTypeAddr.mType) ? AUDIO_PORT_ROLE_SINK :
AUDIO_PORT_ROLE_SOURCE),
- mDeviceTypeAddr(deviceTypeAddr)
+ mDeviceTypeAddr(deviceTypeAddr),
+ mEncodedFormats(encodedFormats)
{
if (mDeviceTypeAddr.address().empty() && audio_is_remote_submix_device(mDeviceTypeAddr.mType)) {
mDeviceTypeAddr.setAddress("0");
@@ -106,32 +110,23 @@
return NO_ERROR;
}
-void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
+void DeviceDescriptorBase::dump(std::string *dst, int spaces,
const char* extraInfo, bool verbose) const
{
- dst->append(base::StringPrintf("%*sDevice %d:\n", spaces, "", index + 1));
if (mId != 0) {
- dst->append(base::StringPrintf("%*s- id: %2d\n", spaces, "", mId));
+ dst->append(base::StringPrintf("Port ID: %d; ", mId));
}
-
if (extraInfo != nullptr) {
- dst->append(extraInfo);
+ dst->append(base::StringPrintf("%s; ", extraInfo));
}
-
- dst->append(base::StringPrintf("%*s- type: %-48s\n",
- spaces, "", ::android::toString(mDeviceTypeAddr.mType).c_str()));
+ dst->append(base::StringPrintf("{%s}\n",
+ mDeviceTypeAddr.toString(true /*includeSensitiveInfo*/).c_str()));
dst->append(base::StringPrintf(
- "%*s- supported encapsulation modes: %u\n", spaces, "", mEncapsulationModes));
- dst->append(base::StringPrintf(
- "%*s- supported encapsulation metadata types: %u\n",
- spaces, "", mEncapsulationMetadataTypes));
+ "%*sEncapsulation modes: %u, metadata types: %u\n", spaces, "",
+ mEncapsulationModes, mEncapsulationMetadataTypes));
- if (mDeviceTypeAddr.address().size() != 0) {
- dst->append(base::StringPrintf(
- "%*s- address: %-32s\n", spaces, "", mDeviceTypeAddr.getAddress()));
- }
- AudioPort::dump(dst, spaces, verbose);
+ AudioPort::dump(dst, spaces, nullptr, verbose);
}
std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
@@ -148,60 +143,83 @@
AudioPort::log(" ");
}
+template<typename T>
+bool checkEqual(const T& f1, const T& f2)
+{
+ std::set<typename T::value_type> s1(f1.begin(), f1.end());
+ std::set<typename T::value_type> s2(f2.begin(), f2.end());
+ return s1 == s2;
+}
+
bool DeviceDescriptorBase::equals(const sp<DeviceDescriptorBase> &other) const
{
return other != nullptr &&
static_cast<const AudioPort*>(this)->equals(other) &&
- static_cast<const AudioPortConfig*>(this)->equals(other) &&
- mDeviceTypeAddr.equals(other->mDeviceTypeAddr);
+ static_cast<const AudioPortConfig*>(this)->equals(other, useInputChannelMask()) &&
+ mDeviceTypeAddr.equals(other->mDeviceTypeAddr) &&
+ checkEqual(mEncodedFormats, other->mEncodedFormats);
}
-
-status_t DeviceDescriptorBase::writeToParcel(Parcel *parcel) const
+bool DeviceDescriptorBase::supportsFormat(audio_format_t format)
{
- media::AudioPort parcelable;
- return writeToParcelable(&parcelable)
- ?: parcelable.writeToParcel(parcel);
+ if (mEncodedFormats.empty()) {
+ return true;
+ }
+
+ for (const auto& devFormat : mEncodedFormats) {
+ if (devFormat == format) {
+ return true;
+ }
+ }
+ return false;
}
status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
AudioPort::writeToParcelable(parcelable);
- AudioPortConfig::writeToParcelable(&parcelable->activeConfig);
- parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+ AudioPortConfig::writeToParcelable(&parcelable->sys.activeConfig.hal, useInputChannelMask());
+ parcelable->hal.id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+ parcelable->sys.activeConfig.hal.portId = parcelable->hal.id;
- media::AudioPortDeviceExt ext;
- ext.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
- ext.encapsulationModes = VALUE_OR_RETURN_STATUS(
+ media::audio::common::AudioPortDeviceExt deviceExt;
+ deviceExt.device = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
+ deviceExt.encodedFormats = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<media::audio::common::AudioFormatDescription>>(
+ mEncodedFormats, legacy2aidl_audio_format_t_AudioFormatDescription));
+ UNION_SET(parcelable->hal.ext, device, deviceExt);
+ media::AudioPortDeviceExtSys deviceSys;
+ deviceSys.encapsulationModes = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioEncapsulationMode_mask(mEncapsulationModes));
- ext.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
+ deviceSys.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioEncapsulationMetadataType_mask(mEncapsulationMetadataTypes));
- UNION_SET(parcelable->ext, device, std::move(ext));
+ UNION_SET(parcelable->sys.ext, device, deviceSys);
return OK;
}
-status_t DeviceDescriptorBase::readFromParcel(const Parcel *parcel) {
- media::AudioPort parcelable;
- return parcelable.readFromParcel(parcel)
- ?: readFromParcelable(parcelable);
-}
-
status_t DeviceDescriptorBase::readFromParcelable(const media::AudioPort& parcelable) {
- if (parcelable.type != media::AudioPortType::DEVICE) {
+ if (parcelable.sys.type != media::AudioPortType::DEVICE) {
return BAD_VALUE;
}
status_t status = AudioPort::readFromParcelable(parcelable)
- ?: AudioPortConfig::readFromParcelable(parcelable.activeConfig);
+ ?: AudioPortConfig::readFromParcelable(
+ parcelable.sys.activeConfig.hal, useInputChannelMask());
if (status != OK) {
return status;
}
- media::AudioPortDeviceExt ext = VALUE_OR_RETURN_STATUS(UNION_GET(parcelable.ext, device));
+ media::audio::common::AudioPortDeviceExt deviceExt = VALUE_OR_RETURN_STATUS(
+ UNION_GET(parcelable.hal.ext, device));
mDeviceTypeAddr = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioDeviceTypeAddress(ext.device));
+ aidl2legacy_AudioDeviceTypeAddress(deviceExt.device));
+ mEncodedFormats = VALUE_OR_RETURN_STATUS(
+ convertContainer<FormatVector>(deviceExt.encodedFormats,
+ aidl2legacy_AudioFormatDescription_audio_format_t));
+ media::AudioPortDeviceExtSys deviceSys = VALUE_OR_RETURN_STATUS(
+ UNION_GET(parcelable.sys.ext, device));
mEncapsulationModes = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioEncapsulationMode_mask(ext.encapsulationModes));
+ aidl2legacy_AudioEncapsulationMode_mask(deviceSys.encapsulationModes));
mEncapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioEncapsulationMetadataType_mask(ext.encapsulationMetadataTypes));
+ aidl2legacy_AudioEncapsulationMetadataType_mask(deviceSys.encapsulationMetadataTypes));
return OK;
}
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 204b365..707ab68 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -40,6 +40,7 @@
const DeviceTypeSet& getAudioDeviceOutAllUsbSet();
const DeviceTypeSet& getAudioDeviceInAllSet();
const DeviceTypeSet& getAudioDeviceInAllUsbSet();
+const DeviceTypeSet& getAudioDeviceOutAllBleSet();
template<typename T>
static std::vector<T> Intersection(const std::set<T>& a, const std::set<T>& b) {
@@ -130,14 +131,16 @@
return deviceTypes;
}
-bool deviceTypesToString(const DeviceTypeSet& deviceTypes, std::string &str);
+std::string deviceTypesToString(const DeviceTypeSet& deviceTypes);
std::string dumpDeviceTypes(const DeviceTypeSet& deviceTypes);
/**
* Return human readable string for device types.
*/
-std::string toString(const DeviceTypeSet& deviceTypes);
+inline std::string toString(const DeviceTypeSet& deviceTypes) {
+ return deviceTypesToString(deviceTypes);
+}
} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
index 8edcc58..11aa222 100644
--- a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
+++ b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
@@ -19,7 +19,7 @@
#include <string>
#include <vector>
-#include <android/media/AudioDevice.h>
+#include <android/media/audio/common/AudioDevice.h>
#include <binder/Parcelable.h>
#include <binder/Parcel.h>
#include <media/AudioContainers.h>
@@ -32,6 +32,7 @@
class AudioDeviceTypeAddr : public Parcelable {
public:
AudioDeviceTypeAddr() = default;
+ AudioDeviceTypeAddr(const AudioDeviceTypeAddr&) = default;
AudioDeviceTypeAddr(audio_devices_t type, const std::string& address);
@@ -88,8 +89,8 @@
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<AudioDeviceTypeAddr>
-aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl);
-ConversionResult<media::AudioDevice>
+aidl2legacy_AudioDeviceTypeAddress(const media::audio::common::AudioDevice& aidl);
+ConversionResult<media::audio::common::AudioDevice>
legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy);
} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
index a06b686..10088f2 100644
--- a/media/libaudiofoundation/include/media/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -16,23 +16,23 @@
#pragma once
-#include <android/media/AudioGain.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android/media/AudioGainSys.h>
#include <media/AidlConversion.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <system/audio.h>
-#include <string>
-#include <vector>
namespace android {
-class AudioGain: public RefBase, public Parcelable
+class AudioGain: public RefBase
{
public:
- AudioGain(int index, bool useInChannelMask);
- virtual ~AudioGain() {}
+ AudioGain(int index, bool isInput);
+ virtual ~AudioGain() = default;
void setMode(audio_gain_mode_t mode) { mGain.mode = mode; }
const audio_gain_mode_t &getMode() const { return mGain.mode; }
@@ -71,26 +71,24 @@
bool equals(const sp<AudioGain>& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
- status_t writeToParcelable(media::AudioGain* parcelable) const;
- status_t readFromParcelable(const media::AudioGain& parcelable);
+ using Aidl = std::pair<media::audio::common::AudioGain, media::AudioGainSys>;
+ ConversionResult<Aidl> toParcelable() const;
+ static ConversionResult<sp<AudioGain>> fromParcelable(const Aidl& aidl);
private:
int mIndex;
- struct audio_gain mGain;
- bool mUseInChannelMask;
+ bool mIsInput;
+ struct audio_gain mGain = {};
bool mUseForVolume = false;
};
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<sp<AudioGain>>
-aidl2legacy_AudioGain(const media::AudioGain& aidl);
-ConversionResult<media::AudioGain>
+aidl2legacy_AudioGain(const AudioGain::Aidl& aidl);
+ConversionResult<AudioGain::Aidl>
legacy2aidl_AudioGain(const sp<AudioGain>& legacy);
-class AudioGains : public std::vector<sp<AudioGain> >, public Parcelable
+class AudioGains : public std::vector<sp<AudioGain>>
{
public:
bool canUseForVolume() const
@@ -103,7 +101,7 @@
return false;
}
- int32_t add(const sp<AudioGain> gain)
+ int32_t add(const sp<AudioGain>& gain)
{
push_back(gain);
return 0;
@@ -111,14 +109,15 @@
bool equals(const AudioGains& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
+ using Aidl = std::pair<
+ std::vector<media::audio::common::AudioGain>,
+ std::vector<media::AudioGainSys>>;
};
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<AudioGains>
-aidl2legacy_AudioGains(const std::vector<media::AudioGain>& aidl);
-ConversionResult<std::vector<media::AudioGain>>
+aidl2legacy_AudioGains(const AudioGains::Aidl& aidl);
+ConversionResult<AudioGains::Aidl>
legacy2aidl_AudioGains(const AudioGains& legacy);
} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
index 1cee1c9..d6a098f 100644
--- a/media/libaudiofoundation/include/media/AudioPort.h
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -21,7 +21,7 @@
#include <android/media/AudioPort.h>
#include <android/media/AudioPortConfig.h>
-#include <android/media/ExtraAudioDescriptor.h>
+#include <android/media/audio/common/ExtraAudioDescriptor.h>
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
#include <media/AudioGain.h>
@@ -33,7 +33,7 @@
namespace android {
-class AudioPort : public virtual RefBase, public virtual Parcelable
+class AudioPort : public virtual RefBase
{
public:
AudioPort(const std::string& name, audio_port_type_t type, audio_port_role_t role) :
@@ -47,6 +47,9 @@
audio_port_type_t getType() const { return mType; }
audio_port_role_t getRole() const { return mRole; }
+ virtual void setFlags(uint32_t flags);
+ uint32_t getFlags() const { return useInputChannelMask() ? mFlags.input : mFlags.output; }
+
void setGains(const AudioGains &gains) { mGains = gains; }
const AudioGains &getGains() const { return mGains; }
@@ -69,10 +72,10 @@
AudioProfileVector &getAudioProfiles() { return mProfiles; }
void setExtraAudioDescriptors(
- const std::vector<media::ExtraAudioDescriptor> extraAudioDescriptors) {
+ const std::vector<media::audio::common::ExtraAudioDescriptor> extraAudioDescriptors) {
mExtraAudioDescriptors = extraAudioDescriptors;
}
- std::vector<media::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
+ std::vector<media::audio::common::ExtraAudioDescriptor> &getExtraAudioDescriptors() {
return mExtraAudioDescriptors;
}
@@ -93,19 +96,47 @@
((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
}
- void dump(std::string *dst, int spaces, bool verbose = true) const;
+ bool isDirectOutput() const
+ {
+ return (mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+ ((mFlags.output & AUDIO_OUTPUT_FLAG_DIRECT) != 0);
+ }
+
+ bool isMmap() const
+ {
+ return (mType == AUDIO_PORT_TYPE_MIX)
+ && (((mRole == AUDIO_PORT_ROLE_SOURCE) &&
+ ((mFlags.output & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0))
+ || ((mRole == AUDIO_PORT_ROLE_SINK) &&
+ ((mFlags.input & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0)));
+ }
+
+ void dump(std::string *dst, int spaces,
+ const char* extraInfo = nullptr, bool verbose = true) const;
void log(const char* indent) const;
bool equals(const sp<AudioPort>& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
status_t writeToParcelable(media::AudioPort* parcelable) const;
status_t readFromParcelable(const media::AudioPort& parcelable);
AudioGains mGains; // gain controllers
+ // Maximum number of input or output streams that can be simultaneously
+ // opened for this profile. By convention 0 means no limit. To respect
+ // legacy behavior, initialized to 1 for output profiles and 0 for input
+ // profiles
+ // FIXME: IOProfile code used the same value for both cases.
+ uint32_t maxOpenCount = 1;
+ // Maximum number of input or output streams that can be simultaneously
+ // active for this profile. By convention 0 means no limit. To respect
+ // legacy behavior, initialized to 0 for output profiles and 1 for input
+ // profiles
+ // FIXME: IOProfile code used the same value for both cases.
+ uint32_t maxActiveCount = 1;
+ // Mute duration while changing device on this output profile.
+ uint32_t recommendedMuteDurationMs = 0;
+
protected:
std::string mName;
audio_port_type_t mType;
@@ -114,7 +145,8 @@
// Audio capabilities that are defined by hardware descriptors when the format is unrecognized
// by the platform, e.g. short audio descriptor in EDID for HDMI.
- std::vector<media::ExtraAudioDescriptor> mExtraAudioDescriptors;
+ std::vector<media::audio::common::ExtraAudioDescriptor> mExtraAudioDescriptors;
+ union audio_io_flags mFlags = { .output = AUDIO_OUTPUT_FLAG_NONE };
private:
template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
|| std::is_same<T, struct audio_port_v7>::value, int> = 0>
@@ -130,7 +162,7 @@
};
-class AudioPortConfig : public virtual RefBase, public virtual Parcelable
+class AudioPortConfig : public virtual RefBase
{
public:
virtual ~AudioPortConfig() = default;
@@ -147,15 +179,16 @@
audio_format_t getFormat() const { return mFormat; }
audio_channel_mask_t getChannelMask() const { return mChannelMask; }
audio_port_handle_t getId() const { return mId; }
+ audio_io_flags getFlags() const { return mFlags; }
bool hasGainController(bool canUseForVolume = false) const;
- bool equals(const sp<AudioPortConfig>& other) const;
+ bool equals(const sp<AudioPortConfig>& other, bool isInput) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
- status_t writeToParcelable(media::AudioPortConfig* parcelable) const;
- status_t readFromParcelable(const media::AudioPortConfig& parcelable);
+ status_t writeToParcelable(
+ media::audio::common::AudioPortConfig* parcelable, bool isInput) const;
+ status_t readFromParcelable(
+ const media::audio::common::AudioPortConfig& parcelable, bool isInput);
protected:
unsigned int mSamplingRate = 0u;
@@ -163,6 +196,7 @@
audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
struct audio_gain_config mGain = { .index = -1 };
+ union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
};
} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
index 6a36e78..d7cddb7 100644
--- a/media/libaudiofoundation/include/media/AudioProfile.h
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -17,11 +17,10 @@
#pragma once
#include <string>
+#include <utility>
#include <vector>
-#include <android/media/AudioProfile.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <android/media/AudioProfileSys.h>
#include <media/AidlConversion.h>
#include <media/AudioContainers.h>
#include <system/audio.h>
@@ -29,7 +28,7 @@
namespace android {
-class AudioProfile final : public RefBase, public Parcelable
+class AudioProfile final : public RefBase
{
public:
static sp<AudioProfile> createFullDynamic(audio_format_t dynamicFormat = AUDIO_FORMAT_DEFAULT);
@@ -70,7 +69,7 @@
void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
bool isDynamicFormat() const { return mIsDynamicFormat; }
- bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
+ bool isDynamic() const { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
audio_encapsulation_type_t getEncapsulationType() const { return mEncapsulationType; }
void setEncapsulationType(audio_encapsulation_type_t encapsulationType) {
@@ -81,11 +80,10 @@
bool equals(const sp<AudioProfile>& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
- ConversionResult<media::AudioProfile> toParcelable() const;
- static ConversionResult<sp<AudioProfile>> fromParcelable(const media::AudioProfile& parcelable);
+ using Aidl = std::pair<media::audio::common::AudioProfile, media::AudioProfileSys>;
+ ConversionResult<Aidl> toParcelable(bool isInput) const;
+ static ConversionResult<sp<AudioProfile>> fromParcelable(
+ const Aidl& aidl, bool isInput);
private:
@@ -106,11 +104,11 @@
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl);
-ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy);
+aidl2legacy_AudioProfile(const AudioProfile::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfile::Aidl>
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput);
-class AudioProfileVector : public std::vector<sp<AudioProfile>>, public Parcelable
+class AudioProfileVector : public std::vector<sp<AudioProfile>>
{
public:
virtual ~AudioProfileVector() = default;
@@ -137,17 +135,18 @@
bool equals(const AudioProfileVector& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
+ using Aidl = std::pair<
+ std::vector<media::audio::common::AudioProfile>,
+ std::vector<media::AudioProfileSys>>;
};
bool operator == (const AudioProfile &left, const AudioProfile &right);
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl);
-ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy);
+aidl2legacy_AudioProfileVector(const AudioProfileVector::Aidl& aidl, bool isInput);
+ConversionResult<AudioProfileVector::Aidl>
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput);
AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
const AudioProfileVector& profiles2);
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index 140ce36..1f0c768 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -36,16 +36,21 @@
public:
// Note that empty name refers by convention to a generic device.
explicit DeviceDescriptorBase(audio_devices_t type);
- DeviceDescriptorBase(audio_devices_t type, const std::string& address);
- explicit DeviceDescriptorBase(const AudioDeviceTypeAddr& deviceTypeAddr);
+ DeviceDescriptorBase(audio_devices_t type, const std::string& address,
+ const FormatVector &encodedFormats = FormatVector{});
+ DeviceDescriptorBase(const AudioDeviceTypeAddr& deviceTypeAddr,
+ const FormatVector &encodedFormats = FormatVector{});
- virtual ~DeviceDescriptorBase() {}
+ virtual ~DeviceDescriptorBase() = default;
audio_devices_t type() const { return mDeviceTypeAddr.mType; }
const std::string& address() const { return mDeviceTypeAddr.address(); }
void setAddress(const std::string &address);
const AudioDeviceTypeAddr& getDeviceTypeAddr() const { return mDeviceTypeAddr; }
+ const FormatVector& encodedFormats() const { return mEncodedFormats; }
+ bool supportsFormat(audio_format_t format);
+
// AudioPortConfig
virtual sp<AudioPort> getAudioPort() const {
return static_cast<AudioPort*>(const_cast<DeviceDescriptorBase*>(this));
@@ -60,7 +65,7 @@
status_t setEncapsulationModes(uint32_t encapsulationModes);
status_t setEncapsulationMetadataTypes(uint32_t encapsulationMetadataTypes);
- void dump(std::string *dst, int spaces, int index,
+ void dump(std::string *dst, int spaces,
const char* extraInfo = nullptr, bool verbose = true) const;
void log() const;
@@ -74,14 +79,12 @@
bool equals(const sp<DeviceDescriptorBase>& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
status_t writeToParcelable(media::AudioPort* parcelable) const;
status_t readFromParcelable(const media::AudioPort& parcelable);
protected:
AudioDeviceTypeAddr mDeviceTypeAddr;
+ FormatVector mEncodedFormats;
uint32_t mEncapsulationModes = 0;
uint32_t mEncapsulationMetadataTypes = 0;
private:
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index bb9a5f2..3f1fbea 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -11,12 +11,20 @@
name: "audiofoundation_parcelable_test",
shared_libs: [
- "libaudiofoundation",
+ "libbase",
"libbinder",
"liblog",
"libutils",
],
+ static_libs: [
+ "android.media.audio.common.types-V1-cpp",
+ "audioclient-types-aidl-cpp",
+ "libaudioclient_aidl_conversion",
+ "libaudiofoundation",
+ "libstagefright_foundation",
+ ],
+
header_libs: [
"libaudio_system_headers",
],
diff --git a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
index 068b5d8..50d8dc8 100644
--- a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
+++ b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
@@ -53,7 +53,7 @@
AudioGains getAudioGainsForTest() {
AudioGains audioGains;
- sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*useInChannelMask*/);
+ sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*isInput*/);
audioGain->setMode(AUDIO_GAIN_MODE_JOINT);
audioGain->setChannelMask(AUDIO_CHANNEL_OUT_STEREO);
audioGain->setMinValueInMb(-3200);
@@ -75,57 +75,74 @@
return audioProfiles;
}
-TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
- Parcel data;
- AudioGains audioGains = getAudioGainsForTest();
-
- ASSERT_EQ(data.writeParcelable(audioGains), NO_ERROR);
- data.setDataPosition(0);
- AudioGains audioGainsFromParcel;
- ASSERT_EQ(data.readParcelable(&audioGainsFromParcel), NO_ERROR);
- ASSERT_TRUE(audioGainsFromParcel.equals(audioGains));
+TEST(AudioFoundationParcelableTest, ParcelingAudioProfile) {
+ sp<AudioProfile> profile = getAudioProfileVectorForTest()[0];
+ auto conv = legacy2aidl_AudioProfile(profile, false /*isInput*/);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = aidl2legacy_AudioProfile(conv.value(), false /*isInput*/);
+ ASSERT_TRUE(convBack.ok());
+ ASSERT_TRUE(profile->equals(convBack.value()));
}
TEST(AudioFoundationParcelableTest, ParcelingAudioProfileVector) {
- Parcel data;
- AudioProfileVector audioProfiles = getAudioProfileVectorForTest();
+ AudioProfileVector profiles = getAudioProfileVectorForTest();
+ auto conv = legacy2aidl_AudioProfileVector(profiles, false /*isInput*/);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = aidl2legacy_AudioProfileVector(conv.value(), false /*isInput*/);
+ ASSERT_TRUE(convBack.ok());
+ ASSERT_TRUE(profiles.equals(convBack.value()));
+}
- ASSERT_EQ(data.writeParcelable(audioProfiles), NO_ERROR);
- data.setDataPosition(0);
- AudioProfileVector audioProfilesFromParcel;
- ASSERT_EQ(data.readParcelable(&audioProfilesFromParcel), NO_ERROR);
- ASSERT_TRUE(audioProfilesFromParcel.equals(audioProfiles));
+TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
+ sp<AudioGain> audioGain = getAudioGainsForTest()[0];
+ auto conv = legacy2aidl_AudioGain(audioGain);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = aidl2legacy_AudioGain(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ ASSERT_TRUE(audioGain->equals(convBack.value()));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioGains) {
+ AudioGains audioGains = getAudioGainsForTest();
+ auto conv = legacy2aidl_AudioGains(audioGains);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = aidl2legacy_AudioGains(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ ASSERT_TRUE(audioGains.equals(convBack.value()));
}
TEST(AudioFoundationParcelableTest, ParcelingAudioPort) {
- Parcel data;
sp<AudioPort> audioPort = new AudioPort(
"AudioPortName", AUDIO_PORT_TYPE_DEVICE, AUDIO_PORT_ROLE_SINK);
audioPort->setGains(getAudioGainsForTest());
audioPort->setAudioProfiles(getAudioProfileVectorForTest());
- ASSERT_EQ(data.writeParcelable(*audioPort), NO_ERROR);
- data.setDataPosition(0);
+ media::AudioPort parcelable;
+ ASSERT_EQ(NO_ERROR, audioPort->writeToParcelable(&parcelable));
sp<AudioPort> audioPortFromParcel = new AudioPort(
"", AUDIO_PORT_TYPE_NONE, AUDIO_PORT_ROLE_NONE);
- ASSERT_EQ(data.readParcelable(audioPortFromParcel.get()), NO_ERROR);
+ ASSERT_EQ(NO_ERROR, audioPortFromParcel->readFromParcelable(parcelable));
ASSERT_TRUE(audioPortFromParcel->equals(audioPort));
}
TEST(AudioFoundationParcelableTest, ParcelingAudioPortConfig) {
+ const bool isInput = false;
Parcel data;
sp<AudioPortConfig> audioPortConfig = new AudioPortConfigTestStub();
audioPortConfig->applyAudioPortConfig(&TEST_AUDIO_PORT_CONFIG);
-
- ASSERT_EQ(data.writeParcelable(*audioPortConfig), NO_ERROR);
+ media::audio::common::AudioPortConfig parcelable{};
+ ASSERT_EQ(NO_ERROR, audioPortConfig->writeToParcelable(&parcelable, isInput));
+ ASSERT_EQ(NO_ERROR, data.writeParcelable(parcelable));
data.setDataPosition(0);
+ media::audio::common::AudioPortConfig parcelableFromParcel{};
+ ASSERT_EQ(NO_ERROR, data.readParcelable(&parcelableFromParcel));
sp<AudioPortConfig> audioPortConfigFromParcel = new AudioPortConfigTestStub();
- ASSERT_EQ(data.readParcelable(audioPortConfigFromParcel.get()), NO_ERROR);
- ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig));
+ ASSERT_EQ(NO_ERROR, audioPortConfigFromParcel->readFromParcelable(
+ parcelableFromParcel, isInput));
+ ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig, isInput));
}
TEST(AudioFoundationParcelableTest, ParcelingDeviceDescriptorBase) {
- Parcel data;
sp<DeviceDescriptorBase> desc = new DeviceDescriptorBase(AUDIO_DEVICE_OUT_SPEAKER);
desc->setGains(getAudioGainsForTest());
desc->setAudioProfiles(getAudioProfileVectorForTest());
@@ -135,10 +152,10 @@
ASSERT_EQ(desc->setEncapsulationMetadataTypes(
AUDIO_ENCAPSULATION_METADATA_TYPE_ALL_POSITION_BITS), NO_ERROR);
- ASSERT_EQ(data.writeParcelable(*desc), NO_ERROR);
- data.setDataPosition(0);
+ media::AudioPort parcelable;
+ ASSERT_EQ(NO_ERROR, desc->writeToParcelable(&parcelable));
sp<DeviceDescriptorBase> descFromParcel = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
- ASSERT_EQ(data.readParcelable(descFromParcel.get()), NO_ERROR);
+ ASSERT_EQ(NO_ERROR, descFromParcel->readFromParcelable(parcelable));
ASSERT_TRUE(descFromParcel->equals(desc));
}
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index bd24c84..5fe74f9 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -30,6 +30,7 @@
],
shared_libs: [
+ "audioclient-types-aidl-cpp",
"libdl",
"libhidlbase",
"liblog",
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
index e420d07..c19d2c2 100644
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ b/media/libaudiohal/FactoryHalHidl.cpp
@@ -94,7 +94,7 @@
} // namespace
void* createPreferredImpl(const std::string& package, const std::string& interface) {
- for (auto version = detail::sAudioHALVersions; version != nullptr; ++version) {
+ for (auto version = detail::sAudioHALVersions; *version != nullptr; ++version) {
void* rawInterface = nullptr;
if (hasHalService(package, *version, interface)
&& createHalService(*version, interface, &rawInterface)) {
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index a2c6e8a..d6576f5 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -50,6 +50,7 @@
"libmedia_helper",
"libmediautils",
"libutils",
+ "audioclient-types-aidl-cpp",
],
header_libs: [
"android.hardware.audio.common.util@all-versions",
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index 32eaa31..0503698 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -105,6 +105,15 @@
}
// static
+void ConversionHelperHidl::argsFromHal(
+ const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs) {
+ hidlArgs->resize(args.size());
+ for (size_t i = 0; i < args.size(); ++i) {
+ (*hidlArgs)[i] = String8(args[i]).c_str();
+ }
+}
+
+// static
status_t ConversionHelperHidl::analyzeResult(const Result& result) {
switch (result) {
case Result::OK: return OK;
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
index 59122c7..2909013 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -21,6 +21,8 @@
#include <hidl/HidlSupport.h>
#include <system/audio.h>
#include <utils/String8.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
using ::android::hardware::audio::CPP_VERSION::ParameterValue;
using CoreResult = ::android::hardware::audio::CPP_VERSION::Result;
@@ -37,6 +39,7 @@
static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+ static void argsFromHal(const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs);
ConversionHelperHidl(const char* className);
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 02d66ae..47acb19 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -20,6 +20,7 @@
//#define LOG_NDEBUG 0
#include <cutils/native_handle.h>
+#include <cutils/properties.h>
#include <hwbinder/IPCThreadState.h>
#include <media/AudioContainers.h>
#include <utils/Log.h>
@@ -457,11 +458,13 @@
}
#endif
-status_t DeviceHalHidl::dump(int fd) {
+status_t DeviceHalHidl::dump(int fd, const Vector<String16>& args) {
if (mDevice == 0) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
hidlHandle->data[0] = fd;
- Return<void> ret = mDevice->debug(hidlHandle, {} /* options */);
+ hidl_vec<hidl_string> hidlArgs;
+ argsFromHal(args, &hidlArgs);
+ Return<void> ret = mDevice->debug(hidlHandle, hidlArgs);
native_handle_delete(hidlHandle);
// TODO(b/111997867, b/177271958) Workaround - remove when fixed.
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 2c847cf..9fd0ac0 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -119,7 +119,24 @@
status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
- virtual status_t dump(int fd);
+ status_t getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType __unused,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+ // TODO: Implement the HAL query when moving to AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
+ int32_t getAAudioMixerBurstCount() override {
+ // TODO: Implement the HAL query when moving to AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
+ int32_t getAAudioHardwareBurstMinUsec() override {
+ // TODO: Implement the HAL query when moving to AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
+ status_t dump(int fd, const Vector<String16>& args) override;
private:
friend class DevicesFactoryHalHidl;
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index af7dc1a..e0304af 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -233,7 +233,7 @@
return INVALID_OPERATION;
}
-status_t DeviceHalLocal::dump(int fd) {
+status_t DeviceHalLocal::dump(int fd, const Vector<String16>& /* args */) {
return mDev->dump(mDev, fd);
}
diff --git a/media/libaudiohal/impl/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
index 46b510b..ee1d2c5 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -112,7 +112,24 @@
status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
- virtual status_t dump(int fd);
+ status_t getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType __unused,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos __unused) override {
+ // This function will only be available on AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
+ int32_t getAAudioMixerBurstCount() override {
+ // This function will only be available on AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
+ int32_t getAAudioHardwareBurstMinUsec() override {
+ // This function will only be available on AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
+ status_t dump(int fd, const Vector<String16>& args) override;
void closeOutputStream(struct audio_stream_out *stream_out);
void closeInputStream(struct audio_stream_in *stream_in);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 6f84efe..b46259b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -46,6 +46,8 @@
status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
+ float getHalVersion() const override { return MAJOR_VERSION + (float)MINOR_VERSION / 10; }
+
private:
friend class ServiceNotificationListener;
void addDeviceFactory(sp<IDevicesFactory> factory, bool needToNotify);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 568a1fb..5baefa4b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -40,6 +40,10 @@
status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
+ float getHalVersion() const override {
+ return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+ }
+
private:
sp<DevicesFactoryHalInterface> mLocalFactory;
sp<DevicesFactoryHalInterface> mHidlFactory;
diff --git a/media/libaudiohal/impl/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
index 32bf362..d2b9104 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -41,6 +41,10 @@
return INVALID_OPERATION;
}
+ float getHalVersion() const override {
+ return MAJOR_VERSION + (float)MINOR_VERSION / 10;
+ }
+
private:
friend class DevicesFactoryHalHybrid;
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index f042b92..ffe0d72 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -73,7 +73,9 @@
uint32_t index, effect_descriptor_t *pDescriptor) {
// TODO: We need somehow to track the changes on the server side
// or figure out how to convert everybody to query all the descriptors at once.
- // TODO: check for nullptr
+ if (pDescriptor == nullptr) {
+ return BAD_VALUE;
+ }
if (mLastDescriptors.size() == 0) {
status_t queryResult = queryAllDescriptors();
if (queryResult != OK) return queryResult;
@@ -85,7 +87,9 @@
status_t EffectsFactoryHalHidl::getDescriptor(
const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- // TODO: check for nullptr
+ if (pDescriptor == nullptr || pEffectUuid == nullptr) {
+ return BAD_VALUE;
+ }
if (mEffectsFactory == 0) return NO_INIT;
Uuid hidlUuid;
UuidUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
@@ -105,6 +109,33 @@
return processReturn(__FUNCTION__, ret);
}
+status_t EffectsFactoryHalHidl::getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors) {
+ if (pEffectType == nullptr || descriptors == nullptr) {
+ return BAD_VALUE;
+ }
+
+ uint32_t numEffects = 0;
+ status_t status = queryNumberEffects(&numEffects);
+ if (status != NO_ERROR) {
+ ALOGW("%s error %d from FactoryHal queryNumberEffects", __func__, status);
+ return status;
+ }
+
+ for (uint32_t i = 0; i < numEffects; i++) {
+ effect_descriptor_t descriptor;
+ status = getDescriptor(i, &descriptor);
+ if (status != NO_ERROR) {
+ ALOGW("%s error %d from FactoryHal getDescriptor", __func__, status);
+ continue;
+ }
+ if (memcmp(&descriptor.type, pEffectType, sizeof(effect_uuid_t)) == 0) {
+ descriptors->push_back(descriptor);
+ }
+ }
+ return descriptors->empty() ? NAME_NOT_FOUND : NO_ERROR;
+}
+
status_t EffectsFactoryHalHidl::createEffect(
const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
int32_t deviceId __unused, sp<EffectHalInterface> *effect) {
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 5fa85e7..ff26d9f 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -45,6 +45,9 @@
virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
effect_descriptor_t *pDescriptor);
+ virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors);
+
// Creates an effect engine of the specified type.
// To release the effect engine, it is necessary to release references
// to the returned effect object.
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 129b1c1..e63aded 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -152,11 +152,13 @@
return processReturn("standby", mStream->standby());
}
-status_t StreamHalHidl::dump(int fd) {
+status_t StreamHalHidl::dump(int fd, const Vector<String16>& args) {
if (!mStream) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
hidlHandle->data[0] = fd;
- Return<void> ret = mStream->debug(hidlHandle, {} /* options */);
+ hidl_vec<hidl_string> hidlArgs;
+ argsFromHal(args, &hidlArgs);
+ Return<void> ret = mStream->debug(hidlHandle, hidlArgs);
native_handle_delete(hidlHandle);
// TODO(b/111997867, b/177271958) Workaround - remove when fixed.
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 970903b..6f5dd04 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -71,7 +71,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby();
- virtual status_t dump(int fd);
+ virtual status_t dump(int fd, const Vector<String16>& args) override;
// Start a stream operating in mmap mode.
virtual status_t start();
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 34bd5df..11fac61 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -87,7 +87,8 @@
return mStream->standby(mStream);
}
-status_t StreamHalLocal::dump(int fd) {
+status_t StreamHalLocal::dump(int fd, const Vector<String16>& args) {
+ (void) args;
status_t status = mStream->dump(mStream, fd);
mStreamPowerLog.dump(fd);
return status;
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index b260495..493c521 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -50,7 +50,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby();
- virtual status_t dump(int fd);
+ virtual status_t dump(int fd, const Vector<String16>& args) override;
// Start a stream operating in mmap mode.
virtual status_t start() = 0;
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 29ef011..70c3199 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
#define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/MicrophoneInfo.h>
#include <system/audio.h>
@@ -120,7 +122,13 @@
virtual status_t removeDeviceEffect(
audio_port_handle_t device, sp<EffectHalInterface> effect) = 0;
- virtual status_t dump(int fd) = 0;
+ virtual status_t getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
+ virtual int32_t getAAudioMixerBurstCount() = 0;
+ virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
+
+ virtual status_t dump(int fd, const Vector<String16>& args) = 0;
protected:
// Subclasses can not be constructed directly by clients.
diff --git a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
index 5091558..17010e6 100644
--- a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
@@ -43,6 +43,8 @@
// The callback can be only set once.
virtual status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) = 0;
+ virtual float getHalVersion() const = 0;
+
static sp<DevicesFactoryHalInterface> create();
protected:
diff --git a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
index 9fb56ae..3e505bd 100644
--- a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -37,6 +37,9 @@
virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
effect_descriptor_t *pDescriptor) = 0;
+ virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors) = 0;
+
// Creates an effect engine of the specified type.
// To release the effect engine, it is necessary to release references
// to the returned effect object.
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 2be12fb..2b5b2db 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -25,6 +25,7 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
+#include <utils/Vector.h>
namespace android {
@@ -69,7 +70,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby() = 0;
- virtual status_t dump(int fd) = 0;
+ virtual status_t dump(int fd, const Vector<String16>& args = {}) = 0;
// Start a stream operating in mmap mode.
virtual status_t start() = 0;
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index d85e2e9..e6fdb1d 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -108,15 +108,11 @@
if (track->mHapticChannelCount > 0) {
track->mAdjustInChannelCount = track->channelCount + track->mHapticChannelCount;
- track->mAdjustOutChannelCount = track->channelCount + track->mMixerHapticChannelCount;
- track->mAdjustNonDestructiveInChannelCount = track->mAdjustOutChannelCount;
- track->mAdjustNonDestructiveOutChannelCount = track->channelCount;
+ track->mAdjustOutChannelCount = track->channelCount;
track->mKeepContractedChannels = track->mHapticPlaybackEnabled;
} else {
track->mAdjustInChannelCount = 0;
track->mAdjustOutChannelCount = 0;
- track->mAdjustNonDestructiveInChannelCount = 0;
- track->mAdjustNonDestructiveOutChannelCount = 0;
track->mKeepContractedChannels = false;
}
@@ -131,8 +127,7 @@
// do it after downmix since track format may change!
track->prepareForReformat();
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
- track->prepareForAdjustChannels();
+ track->prepareForAdjustChannels(mFrameCount);
// Resampler channels may have changed.
track->recreateResampler(mSampleRate);
@@ -193,6 +188,24 @@
// mDownmixerBufferProvider reset below.
}
+ // See if we should use our built-in non-effect downmixer.
+ if (mMixerInFormat == AUDIO_FORMAT_PCM_FLOAT
+ && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO
+ && audio_channel_mask_get_representation(channelMask)
+ == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+ mDownmixerBufferProvider.reset(new ChannelMixBufferProvider(channelMask,
+ mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
+ if (static_cast<ChannelMixBufferProvider *>(mDownmixerBufferProvider.get())
+ ->isValid()) {
+ mDownmixRequiresFormat = mMixerInFormat;
+ reconfigureBufferProviders();
+ ALOGD("%s: Fallback using ChannelMix", __func__);
+ return NO_ERROR;
+ } else {
+ ALOGD("%s: ChannelMix not supported for channel mask %#x", __func__, channelMask);
+ }
+ }
+
// Effect downmixer does not accept the channel conversion. Let's use our remixer.
mDownmixerBufferProvider.reset(new RemixBufferProvider(channelMask,
mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
@@ -265,48 +278,20 @@
}
}
-status_t AudioMixer::Track::prepareForAdjustChannels()
+status_t AudioMixer::Track::prepareForAdjustChannels(size_t frames)
{
ALOGV("AudioMixer::prepareForAdjustChannels(%p) with inChannelCount: %u, outChannelCount: %u",
this, mAdjustInChannelCount, mAdjustOutChannelCount);
unprepareForAdjustChannels();
if (mAdjustInChannelCount != mAdjustOutChannelCount) {
- mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
- mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, kCopyBufferFrameCount));
- reconfigureBufferProviders();
- }
- return NO_ERROR;
-}
-
-void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
-{
- ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
- if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
- reconfigureBufferProviders();
- }
-}
-
-status_t AudioMixer::Track::prepareForAdjustChannelsNonDestructive(size_t frames)
-{
- ALOGV("AudioMixer::prepareForAdjustChannelsNonDestructive(%p) with inChannelCount: %u, "
- "outChannelCount: %u, keepContractedChannels: %d",
- this, mAdjustNonDestructiveInChannelCount, mAdjustNonDestructiveOutChannelCount,
- mKeepContractedChannels);
- unprepareForAdjustChannelsNonDestructive();
- if (mAdjustNonDestructiveInChannelCount != mAdjustNonDestructiveOutChannelCount) {
uint8_t* buffer = mKeepContractedChannels
? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
mMixerChannelCount, mMixerFormat)
- : NULL;
- mContractChannelsNonDestructiveBufferProvider.reset(
- new AdjustChannelsBufferProvider(
- mFormat,
- mAdjustNonDestructiveInChannelCount,
- mAdjustNonDestructiveOutChannelCount,
- frames,
- mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
- buffer));
+ : nullptr;
+ mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
+ mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, frames,
+ mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
+ buffer, mMixerHapticChannelCount));
reconfigureBufferProviders();
}
return NO_ERROR;
@@ -314,9 +299,9 @@
void AudioMixer::Track::clearContractedBuffer()
{
- if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ if (mAdjustChannelsBufferProvider.get() != nullptr) {
static_cast<AdjustChannelsBufferProvider*>(
- mContractChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+ mAdjustChannelsBufferProvider.get())->clearContractedFrames();
}
}
@@ -328,10 +313,6 @@
mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
bufferProvider = mAdjustChannelsBufferProvider.get();
}
- if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mContractChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mContractChannelsNonDestructiveBufferProvider.get();
- }
if (mReformatBufferProvider.get() != nullptr) {
mReformatBufferProvider->setBufferProvider(bufferProvider);
bufferProvider = mReformatBufferProvider.get();
@@ -377,7 +358,7 @@
track->mainBuffer = valueBuf;
ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
if (track->mKeepContractedChannels) {
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ track->prepareForAdjustChannels(mFrameCount);
}
invalidate();
}
@@ -405,7 +386,7 @@
track->mMixerFormat = format;
ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
if (track->mKeepContractedChannels) {
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ track->prepareForAdjustChannels(mFrameCount);
}
}
} break;
@@ -424,8 +405,7 @@
if (track->mHapticPlaybackEnabled != hapticPlaybackEnabled) {
track->mHapticPlaybackEnabled = hapticPlaybackEnabled;
track->mKeepContractedChannels = hapticPlaybackEnabled;
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
- track->prepareForAdjustChannels();
+ track->prepareForAdjustChannels(mFrameCount);
}
} break;
case HAPTIC_INTENSITY: {
@@ -434,6 +414,12 @@
track->mHapticIntensity = hapticIntensity;
}
} break;
+ case HAPTIC_MAX_AMPLITUDE: {
+ const float hapticMaxAmplitude = *reinterpret_cast<float*>(value);
+ if (track->mHapticMaxAmplitude != hapticMaxAmplitude) {
+ track->mHapticMaxAmplitude = hapticMaxAmplitude;
+ }
+ } break;
default:
LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
}
@@ -512,8 +498,6 @@
track->mDownmixerBufferProvider->reset();
} else if (track->mReformatBufferProvider.get() != nullptr) {
track->mReformatBufferProvider->reset();
- } else if (track->mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
- track->mContractChannelsNonDestructiveBufferProvider->reset();
} else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
track->mAdjustChannelsBufferProvider->reset();
}
@@ -553,12 +537,11 @@
// haptic
t->mHapticPlaybackEnabled = false;
t->mHapticIntensity = os::HapticScale::NONE;
+ t->mHapticMaxAmplitude = NAN;
t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
t->mMixerHapticChannelCount = 0;
t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
- t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
- t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
- t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+ t->mAdjustOutChannelCount = t->channelCount;
t->mKeepContractedChannels = false;
// Check the downmixing (or upmixing) requirements.
status_t status = t->prepareForDownmix();
@@ -569,8 +552,7 @@
// prepareForDownmix() may change mDownmixRequiresFormat
ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
t->prepareForReformat();
- t->prepareForAdjustChannelsNonDestructive(mFrameCount);
- t->prepareForAdjustChannels();
+ t->prepareForAdjustChannels(mFrameCount);
return OK;
}
@@ -602,7 +584,8 @@
switch (t->mMixerFormat) {
// Mixer format should be AUDIO_FORMAT_PCM_FLOAT.
case AUDIO_FORMAT_PCM_FLOAT: {
- os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity);
+ os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity,
+ t->mHapticMaxAmplitude);
} break;
default:
LOG_ALWAYS_FATAL("bad mMixerFormat: %#x", t->mMixerFormat);
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index cd47dc6..ab6a8b6 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_MIXER_OPS_H
#define ANDROID_AUDIO_MIXER_OPS_H
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
#include <system/audio.h>
namespace android {
@@ -229,15 +231,26 @@
* complexity of working on interleaved streams is now getting
* too high, and likely limits compiler optimization.
*/
-template <int MIXTYPE, int NCHAN,
+
+// compile-time function.
+constexpr inline bool usesCenterChannel(audio_channel_mask_t mask) {
+ using namespace audio_utils::channels;
+ for (size_t i = 0; i < std::size(kSideFromChannelIdx); ++i) {
+ if ((mask & (1 << i)) != 0 && kSideFromChannelIdx[i] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Applies stereo volume to the audio data based on proper left right channel affinity
+ * (templated channel MASK parameter).
+ */
+template <int MIXTYPE, audio_channel_mask_t MASK,
typename TO, typename TI, typename TV,
typename F>
-void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
- static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
- static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
- || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
- || MIXTYPE == MIXTYPE_STEREOEXPAND
- || MIXTYPE == MIXTYPE_MONOEXPAND);
+void stereoVolumeHelperWithChannelMask(TO*& out, const TI*& in, const TV *vol, F f) {
auto proc = [](auto& a, const auto& b) {
if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
|| MIXTYPE == MIXTYPE_STEREOEXPAND
@@ -250,59 +263,113 @@
auto inp = [&in]() -> const TI& {
if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND
|| MIXTYPE == MIXTYPE_MONOEXPAND) {
- return *in;
+ return *in; // note STEREOEXPAND assumes replicated L/R channels (see doc below).
} else {
return *in++;
}
};
- // HALs should only expose the canonical channel masks.
- proc(*out++, f(inp(), vol[0])); // front left
- if constexpr (NCHAN == 1) return;
- proc(*out++, f(inp(), vol[1])); // front right
- if constexpr (NCHAN == 2) return;
- if constexpr (NCHAN == 4) {
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- return;
- }
-
- // TODO: Precompute center volume if not ramping.
std::decay_t<TV> center;
- if constexpr (std::is_floating_point_v<TV>) {
- center = (vol[0] + vol[1]) * 0.5; // do not use divide
- } else {
- center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
- }
- proc(*out++, f(inp(), center)); // center (or 2.1 LFE)
- if constexpr (NCHAN == 3) return;
- if constexpr (NCHAN == 5) {
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- return;
- }
-
- proc(*out++, f(inp(), center)); // lfe
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- if constexpr (NCHAN == 6) return;
- if constexpr (NCHAN == 7) {
- proc(*out++, f(inp(), center)); // back center
- return;
- }
- // NCHAN == 8
- proc(*out++, f(inp(), vol[0])); // side left
- proc(*out++, f(inp(), vol[1])); // side right
- if constexpr (NCHAN > FCC_8) {
- // Mutes to zero extended surround channels.
- // 7.1.4 has the correct behavior.
- // 22.2 has the behavior that FLC and FRC will be mixed instead
- // of SL and SR and LFE will be center, not left.
- for (int i = 8; i < NCHAN; ++i) {
- // TODO: Consider using android::audio_utils::channels::kSideFromChannelIdx
- proc(*out++, f(inp(), 0.f));
+ constexpr bool USES_CENTER_CHANNEL = usesCenterChannel(MASK);
+ if constexpr (USES_CENTER_CHANNEL) {
+ if constexpr (std::is_floating_point_v<TV>) {
+ center = (vol[0] + vol[1]) * 0.5; // do not use divide
+ } else {
+ center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
}
}
+
+ using namespace audio_utils::channels;
+
+ // if LFE and LFE2 are both present, they take left and right volume respectively.
+ constexpr unsigned LFE_LFE2 = \
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+ constexpr bool has_LFE_LFE2 = (MASK & LFE_LFE2) == LFE_LFE2;
+
+#pragma push_macro("DO_CHANNEL_POSITION")
+#undef DO_CHANNEL_POSITION
+#define DO_CHANNEL_POSITION(BIT_INDEX) \
+ if constexpr ((MASK & (1 << BIT_INDEX)) != 0) { \
+ constexpr auto side = kSideFromChannelIdx[BIT_INDEX]; \
+ if constexpr (side == AUDIO_GEOMETRY_SIDE_LEFT || \
+ has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) { \
+ proc(*out++, f(inp(), vol[0])); \
+ } else if constexpr (side == AUDIO_GEOMETRY_SIDE_RIGHT || \
+ has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) { \
+ proc(*out++, f(inp(), vol[1])); \
+ } else /* constexpr */ { \
+ proc(*out++, f(inp(), center)); \
+ } \
+ }
+
+ DO_CHANNEL_POSITION(0);
+ DO_CHANNEL_POSITION(1);
+ DO_CHANNEL_POSITION(2);
+ DO_CHANNEL_POSITION(3);
+ DO_CHANNEL_POSITION(4);
+ DO_CHANNEL_POSITION(5);
+ DO_CHANNEL_POSITION(6);
+ DO_CHANNEL_POSITION(7);
+
+ DO_CHANNEL_POSITION(8);
+ DO_CHANNEL_POSITION(9);
+ DO_CHANNEL_POSITION(10);
+ DO_CHANNEL_POSITION(11);
+ DO_CHANNEL_POSITION(12);
+ DO_CHANNEL_POSITION(13);
+ DO_CHANNEL_POSITION(14);
+ DO_CHANNEL_POSITION(15);
+
+ DO_CHANNEL_POSITION(16);
+ DO_CHANNEL_POSITION(17);
+ DO_CHANNEL_POSITION(18);
+ DO_CHANNEL_POSITION(19);
+ DO_CHANNEL_POSITION(20);
+ DO_CHANNEL_POSITION(21);
+ DO_CHANNEL_POSITION(22);
+ DO_CHANNEL_POSITION(23);
+ DO_CHANNEL_POSITION(24);
+ DO_CHANNEL_POSITION(25);
+ static_assert(FCC_LIMIT <= FCC_26); // Note: this may need to change.
+#pragma pop_macro("DO_CHANNEL_POSITION")
+}
+
+// These are the channel position masks we expect from the HAL.
+// See audio_channel_out_mask_from_count() but this is constexpr
+constexpr inline audio_channel_mask_t canonicalChannelMaskFromCount(size_t channelCount) {
+ constexpr audio_channel_mask_t canonical[] = {
+ [0] = AUDIO_CHANNEL_NONE,
+ [1] = AUDIO_CHANNEL_OUT_MONO,
+ [2] = AUDIO_CHANNEL_OUT_STEREO,
+ [3] = AUDIO_CHANNEL_OUT_2POINT1,
+ [4] = AUDIO_CHANNEL_OUT_QUAD,
+ [5] = AUDIO_CHANNEL_OUT_PENTA,
+ [6] = AUDIO_CHANNEL_OUT_5POINT1,
+ [7] = AUDIO_CHANNEL_OUT_6POINT1,
+ [8] = AUDIO_CHANNEL_OUT_7POINT1,
+ [12] = AUDIO_CHANNEL_OUT_7POINT1POINT4,
+ [14] = AUDIO_CHANNEL_OUT_9POINT1POINT4,
+ [16] = AUDIO_CHANNEL_OUT_9POINT1POINT6,
+ [24] = AUDIO_CHANNEL_OUT_22POINT2,
+ };
+ return channelCount < std::size(canonical) ? canonical[channelCount] : AUDIO_CHANNEL_NONE;
+}
+
+template <int MIXTYPE, int NCHAN,
+ typename TO, typename TI, typename TV,
+ typename F>
+void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
+ static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
+ static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
+ || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+ || MIXTYPE == MIXTYPE_STEREOEXPAND
+ || MIXTYPE == MIXTYPE_MONOEXPAND);
+ constexpr audio_channel_mask_t MASK{canonicalChannelMaskFromCount(NCHAN)};
+ if constexpr (MASK == AUDIO_CHANNEL_NONE) {
+ ALOGE("%s: Invalid position count %d", __func__, NCHAN);
+ return; // not a valid system mask, ignore.
+ }
+ stereoVolumeHelperWithChannelMask<MIXTYPE, MASK, TO, TI, TV, F>(out, in, vol, f);
}
/*
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index 6d31c12..4658db8 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -364,6 +364,29 @@
src, mInputChannels, mIdxAry, mSampleSize, frames);
}
+ChannelMixBufferProvider::ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ size_t bufferFrameCount) :
+ CopyBufferProvider(
+ audio_bytes_per_sample(format)
+ * audio_channel_count_from_out_mask(inputChannelMask),
+ audio_bytes_per_sample(format)
+ * audio_channel_count_from_out_mask(outputChannelMask),
+ bufferFrameCount)
+{
+ ALOGV("ChannelMixBufferProvider(%p)(%#x, %#x, %#x)",
+ this, format, inputChannelMask, outputChannelMask);
+ if (outputChannelMask == AUDIO_CHANNEL_OUT_STEREO && format == AUDIO_FORMAT_PCM_FLOAT) {
+ mIsValid = mChannelMix.setInputChannelMask(inputChannelMask);
+ }
+}
+
+void ChannelMixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+ mChannelMix.process(static_cast<const float *>(src), static_cast<float *>(dst),
+ frames, false /* accumulate */);
+}
+
ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
audio_format_t inputFormat, audio_format_t outputFormat,
size_t bufferFrameCount) :
@@ -630,7 +653,8 @@
AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(
audio_format_t format, size_t inChannelCount, size_t outChannelCount,
- size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer) :
+ size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer,
+ size_t contractedOutChannelCount) :
CopyBufferProvider(
audio_bytes_per_frame(inChannelCount, format),
audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
@@ -640,15 +664,22 @@
mOutChannelCount(outChannelCount),
mSampleSizeInBytes(audio_bytes_per_sample(format)),
mFrameCount(frameCount),
- mContractedChannelCount(inChannelCount - outChannelCount),
- mContractedFormat(contractedFormat),
+ mContractedFormat(inChannelCount > outChannelCount
+ ? contractedFormat : AUDIO_FORMAT_INVALID),
+ mContractedInChannelCount(inChannelCount > outChannelCount
+ ? inChannelCount - outChannelCount : 0),
+ mContractedOutChannelCount(contractedOutChannelCount),
+ mContractedSampleSizeInBytes(audio_bytes_per_sample(contractedFormat)),
+ mContractedInputFrameSize(mContractedInChannelCount * mContractedSampleSizeInBytes),
mContractedBuffer(contractedBuffer),
mContractedWrittenFrames(0)
{
- ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p)", this, format,
- inChannelCount, outChannelCount, frameCount, contractedFormat, contractedBuffer);
+ ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p, %zu)",
+ this, format, inChannelCount, outChannelCount, frameCount, contractedFormat,
+ contractedBuffer, contractedOutChannelCount);
if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
- mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
+ mContractedOutputFrameSize =
+ audio_bytes_per_frame(mContractedOutChannelCount, mContractedFormat);
}
}
@@ -667,25 +698,39 @@
void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
{
- if (mInChannelCount > mOutChannelCount) {
- // For case multi to mono, adjust_channels has special logic that will mix first two input
- // channels into a single output channel. In that case, use adjust_channels_non_destructive
- // to keep only one channel data even when contracting to mono.
- adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
- mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
- if (mContractedFormat != AUDIO_FORMAT_INVALID
- && mContractedBuffer != nullptr) {
- const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+ // For case multi to mono, adjust_channels has special logic that will mix first two input
+ // channels into a single output channel. In that case, use adjust_channels_non_destructive
+ // to keep only one channel data even when contracting to mono.
+ adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
+ mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+ if (mContractedFormat != AUDIO_FORMAT_INVALID
+ && mContractedBuffer != nullptr) {
+ const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+ uint8_t* oriBuf = (uint8_t*) dst + contractedIdx;
+ uint8_t* buf = (uint8_t*) mContractedBuffer
+ + mContractedWrittenFrames * mContractedOutputFrameSize;
+ if (mContractedInChannelCount > mContractedOutChannelCount) {
+ // Adjust the channels first as the contracted buffer may not have enough
+ // space for the data.
+ // Use adjust_channels_non_destructive to avoid mix first two channels into one single
+ // output channel when it is multi to mono.
+ adjust_channels_non_destructive(
+ oriBuf, mContractedInChannelCount, oriBuf, mContractedOutChannelCount,
+ mSampleSizeInBytes, frames * mContractedInChannelCount * mSampleSizeInBytes);
memcpy_by_audio_format(
- (uint8_t*) mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
- mContractedFormat, (uint8_t*) dst + contractedIdx, mFormat,
- mContractedChannelCount * frames);
- mContractedWrittenFrames += frames;
+ buf, mContractedFormat, oriBuf, mFormat, mContractedOutChannelCount * frames);
+ } else {
+ // Copy the data first as the dst buffer may not have enough space for extra channel.
+ memcpy_by_audio_format(
+ buf, mContractedFormat, oriBuf, mFormat, mContractedInChannelCount * frames);
+ // Note that if the contracted data is from MONO to MULTICHANNEL, the first 2 channels
+ // will be duplicated with the original single input channel and all the other channels
+ // will be 0-filled.
+ adjust_channels(
+ buf, mContractedInChannelCount, buf, mContractedOutChannelCount,
+ mContractedSampleSizeInBytes, mContractedInputFrameSize * frames);
}
- } else {
- // Prefer expanding data from the end of each audio frame.
- adjust_channels(src, mInChannelCount, dst, mOutChannelCount,
- mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+ mContractedWrittenFrames += frames;
}
}
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
index 70eafe3..2993a60 100644
--- a/media/libaudioprocessing/include/media/AudioMixer.h
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -50,6 +50,7 @@
// for haptic
HAPTIC_ENABLED = 0x4007, // Set haptic data from this track should be played or not.
HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+ HAPTIC_MAX_AMPLITUDE = 0x4009, // Set the max amplitude allowed for haptic data.
// for target TIMESTRETCH
PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
// parameter 'value' is a pointer to the new playback rate.
@@ -79,7 +80,6 @@
mPostDownmixReformatBufferProvider.reset(nullptr);
mDownmixerBufferProvider.reset(nullptr);
mReformatBufferProvider.reset(nullptr);
- mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
mAdjustChannelsBufferProvider.reset(nullptr);
}
@@ -94,10 +94,8 @@
void unprepareForDownmix();
status_t prepareForReformat();
void unprepareForReformat();
- status_t prepareForAdjustChannels();
+ status_t prepareForAdjustChannels(size_t frames);
void unprepareForAdjustChannels();
- status_t prepareForAdjustChannelsNonDestructive(size_t frames);
- void unprepareForAdjustChannelsNonDestructive();
void clearContractedBuffer();
bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
void reconfigureBufferProviders();
@@ -113,24 +111,18 @@
* 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
* channel format to another. Expanded channels are filled with zeros and put at the end
* of each audio frame. Contracted channels are copied to the end of the buffer.
- * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
- * This is currently using at audio-haptic coupled playback to separate audio and haptic
- * data. Contracted channels could be written to given buffer.
- * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
+ * 3) mReformatBufferProvider: If not NULL, performs the audio reformat to
* match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
* requires reformat. For example, it may convert floating point input to
* PCM_16_bit if that's required by the downmixer.
- * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * 4) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
* the number of channels required by the mixer sink.
- * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+ * 5) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
* the downmixer requirements to the mixer engine input requirements.
- * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
+ * 6) mTimestretchBufferProvider: Adds timestretching for playback rate
*/
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
- // TODO: combine mAdjustChannelsBufferProvider and
- // mContractChannelsNonDestructiveBufferProvider
std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
@@ -145,14 +137,13 @@
// Haptic
bool mHapticPlaybackEnabled;
os::HapticScale mHapticIntensity;
+ float mHapticMaxAmplitude;
audio_channel_mask_t mHapticChannelMask;
uint32_t mHapticChannelCount;
audio_channel_mask_t mMixerHapticChannelMask;
uint32_t mMixerHapticChannelCount;
uint32_t mAdjustInChannelCount;
uint32_t mAdjustOutChannelCount;
- uint32_t mAdjustNonDestructiveInChannelCount;
- uint32_t mAdjustNonDestructiveOutChannelCount;
bool mKeepContractedChannels;
};
diff --git a/media/libaudioprocessing/include/media/BufferProviders.h b/media/libaudioprocessing/include/media/BufferProviders.h
index b038854..b3ab8a5 100644
--- a/media/libaudioprocessing/include/media/BufferProviders.h
+++ b/media/libaudioprocessing/include/media/BufferProviders.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include <sys/types.h>
+#include <audio_utils/ChannelMix.h>
#include <media/AudioBufferProvider.h>
#include <media/AudioResamplerPublic.h>
#include <system/audio.h>
@@ -129,6 +130,23 @@
static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
};
+// ChannelMixBufferProvider derives from CopyBufferProvider to perform an
+// downmix to the proper channel count and mask.
+class ChannelMixBufferProvider : public CopyBufferProvider {
+public:
+ ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ size_t bufferFrameCount);
+
+ void copyFrames(void *dst, const void *src, size_t frames) override;
+
+ bool isValid() const { return mIsValid; }
+
+protected:
+ audio_utils::channels::ChannelMix mChannelMix;
+ bool mIsValid = false;
+};
+
// RemixBufferProvider derives from CopyBufferProvider to perform an
// upmix or downmix to the proper channel count and mask.
class RemixBufferProvider : public CopyBufferProvider {
@@ -223,17 +241,22 @@
// Extra expanded channels are filled with zeros and put at the end of each audio frame.
// Contracted channels are copied to the end of the output buffer(storage should be
// allocated appropriately).
-// Contracted channels could be written to output buffer.
+// Contracted channels could be written to output buffer and got adjusted. When the contracted
+// channels are adjusted in the contracted buffer, the input channel count will be calculated
+// as `inChannelCount - outChannelCount`. The output channel count is provided by caller, which
+// is `contractedOutChannelCount`. Currently, adjusting contracted channels is used for audio
+// coupled haptic playback. If the device supports two haptic channels while apps only provide
+// single haptic channel, the second haptic channel will be duplicated with the first haptic
+// channel's data. If the device supports single haptic channels while apps provide two haptic
+// channels, the second channel will be contracted.
class AdjustChannelsBufferProvider : public CopyBufferProvider {
public:
- AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, size_t frameCount) : AdjustChannelsBufferProvider(
- format, inChannelCount, outChannelCount,
- frameCount, AUDIO_FORMAT_INVALID, nullptr) { }
// Contracted data is converted to contractedFormat and put into contractedBuffer.
AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, size_t frameCount, audio_format_t contractedFormat,
- void* contractedBuffer);
+ size_t outChannelCount, size_t frameCount,
+ audio_format_t contractedFormat = AUDIO_FORMAT_INVALID,
+ void* contractedBuffer = nullptr,
+ size_t contractedOutChannelCount = 0);
//Overrides
status_t getNextBuffer(Buffer* pBuffer) override;
void copyFrames(void *dst, const void *src, size_t frames) override;
@@ -247,11 +270,14 @@
const size_t mOutChannelCount;
const size_t mSampleSizeInBytes;
const size_t mFrameCount;
- const size_t mContractedChannelCount;
const audio_format_t mContractedFormat;
+ const size_t mContractedInChannelCount;
+ const size_t mContractedOutChannelCount;
+ const size_t mContractedSampleSizeInBytes;
+ const size_t mContractedInputFrameSize; // contracted input frame size
void *mContractedBuffer;
size_t mContractedWrittenFrames;
- size_t mContractedFrameSize;
+ size_t mContractedOutputFrameSize; // contracted output frame size
};
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index 3856817..ad402db 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -76,6 +76,7 @@
//
cc_binary {
name: "mixerops_objdump",
+ header_libs: ["libaudioutils_headers"],
srcs: ["mixerops_objdump.cpp"],
}
@@ -84,6 +85,16 @@
//
cc_benchmark {
name: "mixerops_benchmark",
+ header_libs: ["libaudioutils_headers"],
srcs: ["mixerops_benchmark.cpp"],
static_libs: ["libgoogle-benchmark"],
}
+
+//
+// mixerops unit test
+//
+cc_test {
+ name: "mixerops_tests",
+ defaults: ["libaudioprocessing_test_defaults"],
+ srcs: ["mixerops_tests.cpp"],
+}
diff --git a/media/libaudioprocessing/tests/mixerops_benchmark.cpp b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
index 7a4c5c7..f866b1a 100644
--- a/media/libaudioprocessing/tests/mixerops_benchmark.cpp
+++ b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
@@ -16,11 +16,9 @@
#include <inttypes.h>
#include <type_traits>
-#include "../../../../system/media/audio_utils/include/audio_utils/primitives.h"
#define LOG_ALWAYS_FATAL(...)
#include <../AudioMixerOps.h>
-
#include <benchmark/benchmark.h>
using namespace android;
diff --git a/media/libaudioprocessing/tests/mixerops_tests.cpp b/media/libaudioprocessing/tests/mixerops_tests.cpp
new file mode 100644
index 0000000..2500ba9
--- /dev/null
+++ b/media/libaudioprocessing/tests/mixerops_tests.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "mixerop_tests"
+#include <log/log.h>
+
+#include <inttypes.h>
+#include <type_traits>
+
+#include <../AudioMixerOps.h>
+#include <gtest/gtest.h>
+
+using namespace android;
+
+// Note: gtest templated tests require typenames, not integers.
+template <int MIXTYPE, int NCHAN>
+class MixerOpsBasicTest {
+public:
+ static void testStereoVolume() {
+ using namespace android::audio_utils::channels;
+
+ constexpr size_t FRAME_COUNT = 1000;
+ constexpr size_t SAMPLE_COUNT = FRAME_COUNT * NCHAN;
+
+ const float in[SAMPLE_COUNT] = {[0 ... (SAMPLE_COUNT - 1)] = 1.f};
+
+ AUDIO_GEOMETRY_SIDE sides[NCHAN];
+ size_t i = 0;
+ unsigned channel = canonicalChannelMaskFromCount(NCHAN);
+ constexpr unsigned LFE_LFE2 =
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+ bool has_LFE_LFE2 = (channel & LFE_LFE2) == LFE_LFE2;
+ while (channel != 0) {
+ const int index = __builtin_ctz(channel);
+ if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+ sides[i++] = AUDIO_GEOMETRY_SIDE_LEFT; // special case
+ } else if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ sides[i++] = AUDIO_GEOMETRY_SIDE_RIGHT; // special case
+ } else {
+ sides[i++] = sideFromChannelIdx(index);
+ }
+ channel &= ~(1 << index);
+ }
+
+ float vola[2] = {1.f, 0.f}; // left volume at max.
+ float out[SAMPLE_COUNT]{};
+ float aux[FRAME_COUNT]{};
+ float volaux = 0.5;
+ {
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vola, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ const float left = vola[0];
+ const float center = (vola[0] + vola[1]) * 0.5;
+ const float right = vola[1];
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ for (size_t j = 0; j < NCHAN; ++j) {
+ const float audio = *outp++;
+ if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+ EXPECT_EQ(left, audio);
+ } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ EXPECT_EQ(center, audio);
+ } else {
+ EXPECT_EQ(right, audio);
+ }
+ }
+ EXPECT_EQ(volaux, *auxp++); // works if all channels contain 1.f
+ }
+ }
+ float volb[2] = {0.f, 0.5f}; // right volume at half max.
+ {
+ // this accumulates into out, aux.
+ // float out[SAMPLE_COUNT]{};
+ // float aux[FRAME_COUNT]{};
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, volb, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ const float left = vola[0] + volb[0];
+ const float center = (vola[0] + vola[1] + volb[0] + volb[1]) * 0.5;
+ const float right = vola[1] + volb[1];
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ for (size_t j = 0; j < NCHAN; ++j) {
+ const float audio = *outp++;
+ if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+ EXPECT_EQ(left, audio);
+ } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ EXPECT_EQ(center, audio);
+ } else {
+ EXPECT_EQ(right, audio);
+ }
+ }
+ // aux is accumulated so 2x the amplitude
+ EXPECT_EQ(volaux * 2.f, *auxp++); // works if all channels contain 1.f
+ }
+ }
+
+ { // test aux as derived from out.
+ // AUX channel is the weighted sum of all of the output channels prior to volume
+ // adjustment. We must set L and R to the same volume to allow computation
+ // of AUX from the output values.
+ const float volmono = 0.25f;
+ const float vollr[2] = {volmono, volmono}; // all the same.
+ float out[SAMPLE_COUNT]{};
+ float aux[FRAME_COUNT]{};
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vollr, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ float accum = 0.f;
+ for (size_t j = 0; j < NCHAN; ++j) {
+ accum += *outp++;
+ }
+ EXPECT_EQ(accum / NCHAN * volaux / volmono, *auxp++);
+ }
+ }
+ }
+};
+
+TEST(mixerops, stereovolume_1) { // Note: mono not used for output sinks yet.
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 1>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_2) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 2>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_3) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 3>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_4) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 4>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_5) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 5>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_6) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 6>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_7) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 7>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_8) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 8>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_12) {
+ if constexpr (FCC_LIMIT >= 12) { // NOTE: FCC_LIMIT is an enum, so can't #if
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 12>::testStereoVolume();
+ }
+}
+TEST(mixerops, stereovolume_24) {
+ if constexpr (FCC_LIMIT >= 24) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 24>::testStereoVolume();
+ }
+}
+TEST(mixerops, channel_equivalence) {
+ // we must match the constexpr function with the system determined channel mask from count.
+ for (size_t i = 0; i < FCC_LIMIT; ++i) {
+ const audio_channel_mask_t actual = canonicalChannelMaskFromCount(i);
+ const audio_channel_mask_t system = audio_channel_out_mask_from_count(i);
+ if (system == AUDIO_CHANNEL_INVALID) continue;
+ EXPECT_EQ(system, actual);
+ }
+}
diff --git a/media/libeffects/downmix/EffectDownmix.cpp b/media/libeffects/downmix/EffectDownmix.cpp
index f500bc3..d8f5787 100644
--- a/media/libeffects/downmix/EffectDownmix.cpp
+++ b/media/libeffects/downmix/EffectDownmix.cpp
@@ -19,7 +19,7 @@
#include <log/log.h>
#include "EffectDownmix.h"
-#include <math.h>
+#include <audio_utils/ChannelMix.h>
// Do not submit with DOWNMIX_TEST_CHANNEL_INDEX defined, strictly for testing
//#define DOWNMIX_TEST_CHANNEL_INDEX 0
@@ -35,12 +35,13 @@
} downmix_state_t;
/* parameters for each downmixer */
-typedef struct {
+struct downmix_object_t {
downmix_state_t state;
downmix_type_t type;
bool apply_volume_correction;
uint8_t input_channel_count;
-} downmix_object_t;
+ android::audio_utils::channels::ChannelMix channelMix;
+};
typedef struct downmix_module_s {
const struct effect_interface_s *itfe;
@@ -77,11 +78,6 @@
downmix_object_t *pDownmixer, int32_t param, uint32_t size, void *pValue);
static int Downmix_getParameter(
downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
-static void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static bool Downmix_foldGeneric(
- uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate);
// effect_handle_t interface implementation for downmix effect
const struct effect_interface_s gDownmixInterface = {
@@ -192,9 +188,11 @@
if (!mask) {
return false;
}
- // check against unsupported channels
- if (mask & ~AUDIO_CHANNEL_OUT_22POINT2) {
- ALOGE("Unsupported channels in %u", mask & ~AUDIO_CHANNEL_OUT_22POINT2);
+ // check against unsupported channels (up to FCC_26)
+ constexpr uint32_t MAXIMUM_CHANNEL_MASK = AUDIO_CHANNEL_OUT_22POINT2
+ | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT;
+ if (mask & ~MAXIMUM_CHANNEL_MASK) {
+ ALOGE("Unsupported channels in %#x", mask & ~MAXIMUM_CHANNEL_MASK);
return false;
}
return true;
@@ -315,7 +313,8 @@
audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
downmix_object_t *pDownmixer;
- float *pSrc, *pDst;
+ const float *pSrc;
+ float *pDst;
downmix_module_t *pDwmModule = (downmix_module_t *)self;
if (pDwmModule == NULL) {
@@ -344,7 +343,8 @@
const bool accumulate =
(pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
- const uint32_t downmixInputChannelMask = pDwmModule->config.inputCfg.channels;
+ const audio_channel_mask_t downmixInputChannelMask =
+ (audio_channel_mask_t)pDwmModule->config.inputCfg.channels;
switch(pDownmixer->type) {
@@ -368,38 +368,13 @@
}
break;
- case DOWNMIX_TYPE_FOLD:
-#ifdef DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER
- // bypass the optimized downmix routines for the common formats
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
- ALOGE("Multichannel configuration %#x is not supported",
- downmixInputChannelMask);
- return -EINVAL;
- }
- break;
-#endif
- // optimize for the common formats
- switch (downmixInputChannelMask) {
- case AUDIO_CHANNEL_OUT_QUAD_BACK:
- case AUDIO_CHANNEL_OUT_QUAD_SIDE:
- Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
- break;
- case AUDIO_CHANNEL_OUT_5POINT1_BACK:
- case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
- Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
- break;
- case AUDIO_CHANNEL_OUT_7POINT1:
- Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
- break;
- default:
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
+ case DOWNMIX_TYPE_FOLD: {
+ if (!pDownmixer->channelMix.process(
+ pSrc, pDst, numFrames, accumulate, downmixInputChannelMask)) {
ALOGE("Multichannel configuration %#x is not supported",
downmixInputChannelMask);
return -EINVAL;
}
- break;
}
break;
@@ -674,6 +649,12 @@
ALOGE("Downmix_Configure error: invalid config");
return -EINVAL;
}
+ // when configuring the effect, do not allow a blank or unsupported channel mask
+ if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
+ ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+ pConfig->inputCfg.channels);
+ return -EINVAL;
+ }
if (&pDwmModule->config != pConfig) {
memcpy(&pDwmModule->config, pConfig, sizeof(effect_config_t));
@@ -684,12 +665,6 @@
pDownmixer->apply_volume_correction = false;
pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
} else {
- // when configuring the effect, do not allow a blank or unsupported channel mask
- if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
- ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
- pConfig->inputCfg.channels);
- return -EINVAL;
- }
pDownmixer->input_channel_count =
audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
}
@@ -780,7 +755,6 @@
return 0;
} /* end Downmix_setParameter */
-
/*----------------------------------------------------------------------------
* Downmix_getParameter()
*----------------------------------------------------------------------------
@@ -829,299 +803,3 @@
return 0;
} /* end Downmix_getParameter */
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFromQuad()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a quad signal to stereo
- *
- * Inputs:
- * pSrc quad audio samples to downmix
- * numFrames the number of quad frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is RL
- // sample at index 3 is RR
- if (accumulate) {
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp_float(pDst[0] + ((pSrc[0] + pSrc[2]) / 2.0f));
- // FR + RR
- pDst[1] = clamp_float(pDst[1] + ((pSrc[1] + pSrc[3]) / 2.0f));
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp_float((pSrc[0] + pSrc[2]) / 2.0f);
- // FR + RR
- pDst[1] = clamp_float((pSrc[1] + pSrc[3]) / 2.0f);
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom5Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 5.1 signal to stereo
- *
- * Inputs:
- * pSrc 5.1 audio samples to downmix
- * numFrames the number of 5.1 frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
- float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
- // FR + centerPlusLfeContrib + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
- // accumulate in destination
- pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
- pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
- // FR + centerPlusLfeContrib + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
- // store in destination
- pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
- pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom7Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 7.1 signal to stereo
- *
- * Inputs:
- * pSrc 7.1 audio samples to downmix
- * numFrames the number of 7.1 frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
- float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // sample at index 6 is SL
- // sample at index 7 is SR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + SL + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
- // FR + centerPlusLfeContrib + SR + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
- //accumulate in destination
- pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
- pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + SL + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
- // FR + centerPlusLfeContrib + SR + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
- // store in destination
- pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
- pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldGeneric()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix to stereo a multichannel signal of arbitrary channel position mask.
- *
- * Inputs:
- * mask the channel mask of pSrc
- * pSrc multichannel audio buffer to downmix
- * numFrames the number of multichannel frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- * Returns: false if multichannel format is not supported
- *
- *----------------------------------------------------------------------------
- */
-bool Downmix_foldGeneric(
- uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-
- if (!Downmix_validChannelMask(mask)) {
- return false;
- }
- const int numChan = audio_channel_count_from_out_mask(mask);
-
- // compute at what index each channel is: samples will be in the following order:
- // FL FR FC LFE BL BR BC SL SR
- //
- // (transfer matrix)
- // FL FR FC LFE BL BR BC SL SR
- // 0.5 0.353 0.353 0.5 0.353 0.5
- // 0.5 0.353 0.353 0.5 0.353 0.5
-
- // derive the indices for the transfer matrix columns that have non-zero values.
- int indexFL = -1;
- int indexFR = -1;
- int indexFC = -1;
- int indexLFE = -1;
- int indexBL = -1;
- int indexBR = -1;
- int indexBC = -1;
- int indexSL = -1;
- int indexSR = -1;
- int index = 0;
- for (unsigned tmp = mask;
- (tmp & (AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER)) != 0;
- ++index) {
- const unsigned lowestBit = tmp & -(signed)tmp;
- switch (lowestBit) {
- case AUDIO_CHANNEL_OUT_FRONT_LEFT:
- indexFL = index;
- break;
- case AUDIO_CHANNEL_OUT_FRONT_RIGHT:
- indexFR = index;
- break;
- case AUDIO_CHANNEL_OUT_FRONT_CENTER:
- indexFC = index;
- break;
- case AUDIO_CHANNEL_OUT_LOW_FREQUENCY:
- indexLFE = index;
- break;
- case AUDIO_CHANNEL_OUT_BACK_LEFT:
- indexBL = index;
- break;
- case AUDIO_CHANNEL_OUT_BACK_RIGHT:
- indexBR = index;
- break;
- case AUDIO_CHANNEL_OUT_BACK_CENTER:
- indexBC = index;
- break;
- case AUDIO_CHANNEL_OUT_SIDE_LEFT:
- indexSL = index;
- break;
- case AUDIO_CHANNEL_OUT_SIDE_RIGHT:
- indexSR = index;
- break;
- }
- tmp ^= lowestBit;
- }
-
- // With good branch prediction, this should run reasonably fast.
- // Also consider using a transfer matrix form.
- while (numFrames) {
- // compute contribution of FC, BC and LFE
- float centersLfeContrib = 0;
- if (indexFC >= 0) centersLfeContrib = pSrc[indexFC];
- if (indexLFE >= 0) centersLfeContrib += pSrc[indexLFE];
- if (indexBC >= 0) centersLfeContrib += pSrc[indexBC];
- centersLfeContrib *= MINUS_3_DB_IN_FLOAT;
-
- float ch[2];
- ch[0] = centersLfeContrib;
- ch[1] = centersLfeContrib;
-
- // mix in left / right channels
- if (indexFL >= 0) ch[0] += pSrc[indexFL];
- if (indexFR >= 0) ch[1] += pSrc[indexFR];
-
- if (indexSL >= 0) ch[0] += pSrc[indexSL];
- if (indexSR >= 0) ch[1] += pSrc[indexSR]; // note pair checks enforce this if indexSL != 0
-
- if (indexBL >= 0) ch[0] += pSrc[indexBL];
- if (indexBR >= 0) ch[1] += pSrc[indexBR]; // note pair checks enforce this if indexBL != 0
-
- // scale to prevent overflow.
- ch[0] *= 0.5f;
- ch[1] *= 0.5f;
-
- if (accumulate) {
- ch[0] += pDst[0];
- ch[1] += pDst[1];
- }
-
- pDst[0] = clamp_float(ch[0]);
- pDst[1] = clamp_float(ch[1]);
- pSrc += numChan;
- pDst += 2;
- numFrames--;
- }
- return true;
-}
diff --git a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
index ee169c2..d9d40ed 100644
--- a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
+++ b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
@@ -35,16 +35,14 @@
AUDIO_CHANNEL_OUT_STEREO,
AUDIO_CHANNEL_OUT_2POINT1,
AUDIO_CHANNEL_OUT_2POINT0POINT2,
- AUDIO_CHANNEL_OUT_QUAD,
- AUDIO_CHANNEL_OUT_QUAD_BACK,
+ AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
AUDIO_CHANNEL_OUT_QUAD_SIDE,
AUDIO_CHANNEL_OUT_SURROUND,
AUDIO_CHANNEL_OUT_2POINT1POINT2,
AUDIO_CHANNEL_OUT_3POINT0POINT2,
AUDIO_CHANNEL_OUT_PENTA,
AUDIO_CHANNEL_OUT_3POINT1POINT2,
- AUDIO_CHANNEL_OUT_5POINT1,
- AUDIO_CHANNEL_OUT_5POINT1_BACK,
+ AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
AUDIO_CHANNEL_OUT_5POINT1_SIDE,
AUDIO_CHANNEL_OUT_6POINT1,
AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -62,58 +60,34 @@
static constexpr size_t kFrameCount = 1000;
/*
-Pixel 3XL
-downmix_benchmark:
- #BM_Downmix/0 4723 ns 4708 ns 148694
- #BM_Downmix/1 4717 ns 4702 ns 148873
- #BM_Downmix/2 4803 ns 4788 ns 145893
- #BM_Downmix/3 5056 ns 5041 ns 139110
- #BM_Downmix/4 4710 ns 4696 ns 149625
- #BM_Downmix/5 1514 ns 1509 ns 463694
- #BM_Downmix/6 1513 ns 1509 ns 463451
- #BM_Downmix/7 1516 ns 1511 ns 463899
- #BM_Downmix/8 4445 ns 4431 ns 157831
- #BM_Downmix/9 5081 ns 5065 ns 138412
- #BM_Downmix/10 4354 ns 4341 ns 161247
- #BM_Downmix/11 4411 ns 4397 ns 158893
- #BM_Downmix/12 4434 ns 4420 ns 157992
- #BM_Downmix/13 4845 ns 4830 ns 144873
- #BM_Downmix/14 4851 ns 4835 ns 144954
- #BM_Downmix/15 4884 ns 4870 ns 144233
- #BM_Downmix/16 5832 ns 5813 ns 120565
- #BM_Downmix/17 5241 ns 5224 ns 133927
- #BM_Downmix/18 5044 ns 5028 ns 139131
- #BM_Downmix/19 5244 ns 5227 ns 132315
- #BM_Downmix/20 5943 ns 5923 ns 117759
- #BM_Downmix/21 5990 ns 5971 ns 117263
- #BM_Downmix/22 4468 ns 4454 ns 156689
- #BM_Downmix/23 7306 ns 7286 ns 95911
---
-downmix_benchmark: (generic fold)
- #BM_Downmix/0 4722 ns 4707 ns 149847
- #BM_Downmix/1 4714 ns 4698 ns 148748
- #BM_Downmix/2 4794 ns 4779 ns 145661
- #BM_Downmix/3 5053 ns 5035 ns 139172
- #BM_Downmix/4 4695 ns 4678 ns 149762
- #BM_Downmix/5 4381 ns 4368 ns 159675
- #BM_Downmix/6 4387 ns 4373 ns 160267
- #BM_Downmix/7 4732 ns 4717 ns 148514
- #BM_Downmix/8 4430 ns 4415 ns 158133
- #BM_Downmix/9 5101 ns 5084 ns 138353
- #BM_Downmix/10 4356 ns 4343 ns 160821
- #BM_Downmix/11 4397 ns 4383 ns 159995
- #BM_Downmix/12 4438 ns 4424 ns 158117
- #BM_Downmix/13 5243 ns 5226 ns 133863
- #BM_Downmix/14 5259 ns 5242 ns 131855
- #BM_Downmix/15 5245 ns 5228 ns 133686
- #BM_Downmix/16 5829 ns 5809 ns 120543
- #BM_Downmix/17 5245 ns 5228 ns 133533
- #BM_Downmix/18 5935 ns 5916 ns 118282
- #BM_Downmix/19 5263 ns 5245 ns 133657
- #BM_Downmix/20 5998 ns 5978 ns 114693
- #BM_Downmix/21 5989 ns 5969 ns 117450
- #BM_Downmix/22 4442 ns 4431 ns 157913
- #BM_Downmix/23 7309 ns 7290 ns 95797
+Pixel 4XL
+$ adb shell /data/benchmarktest/downmix_benchmark/vendor/downmix_benchmark
+
+--------------------------------------------------------
+Benchmark Time CPU Iterations
+--------------------------------------------------------
+BM_Downmix/0 3638 ns 3624 ns 197517 AUDIO_CHANNEL_OUT_MONO
+BM_Downmix/1 4040 ns 4024 ns 178766
+BM_Downmix/2 4759 ns 4740 ns 134741 AUDIO_CHANNEL_OUT_STEREO
+BM_Downmix/3 6042 ns 6017 ns 129546 AUDIO_CHANNEL_OUT_2POINT1
+BM_Downmix/4 6897 ns 6868 ns 96316 AUDIO_CHANNEL_OUT_2POINT0POINT2
+BM_Downmix/5 2117 ns 2109 ns 331705 AUDIO_CHANNEL_OUT_QUAD
+BM_Downmix/6 2097 ns 2088 ns 335421 AUDIO_CHANNEL_OUT_QUAD_SIDE
+BM_Downmix/7 7291 ns 7263 ns 96256 AUDIO_CHANNEL_OUT_SURROUND
+BM_Downmix/8 8246 ns 8206 ns 84318 AUDIO_CHANNEL_OUT_2POINT1POINT2
+BM_Downmix/9 8341 ns 8303 ns 84298 AUDIO_CHANNEL_OUT_3POINT0POINT2
+BM_Downmix/10 7549 ns 7517 ns 84293 AUDIO_CHANNEL_OUT_PENTA
+BM_Downmix/11 9395 ns 9354 ns 75209 AUDIO_CHANNEL_OUT_3POINT1POINT2
+BM_Downmix/12 3267 ns 3253 ns 215596 AUDIO_CHANNEL_OUT_5POINT1
+BM_Downmix/13 3178 ns 3163 ns 220132 AUDIO_CHANNEL_OUT_5POINT1_SIDE
+BM_Downmix/14 10245 ns 10199 ns 67486 AUDIO_CHANNEL_OUT_6POINT1
+BM_Downmix/15 10975 ns 10929 ns 61359 AUDIO_CHANNEL_OUT_5POINT1POINT2
+BM_Downmix/16 3796 ns 3780 ns 184728 AUDIO_CHANNEL_OUT_7POINT1
+BM_Downmix/17 13562 ns 13503 ns 51823 AUDIO_CHANNEL_OUT_5POINT1POINT4
+BM_Downmix/18 13573 ns 13516 ns 51800 AUDIO_CHANNEL_OUT_7POINT1POINT2
+BM_Downmix/19 15502 ns 15435 ns 47147 AUDIO_CHANNEL_OUT_7POINT1POINT4
+BM_Downmix/20 16693 ns 16624 ns 42109 AUDIO_CHANNEL_OUT_13POINT_360RA
+BM_Downmix/21 28267 ns 28116 ns 24982 AUDIO_CHANNEL_OUT_22POINT2
*/
static void BM_Downmix(benchmark::State& state) {
@@ -125,7 +99,7 @@
std::minstd_rand gen(channelMask);
std::uniform_real_distribution<> dis(-1.0f, 1.0f);
std::vector<float> input(kFrameCount * channelCount);
- std::vector<float> output(kFrameCount * 2);
+ std::vector<float> output(kFrameCount * FCC_2);
for (auto& in : input) {
in = dis(gen);
}
@@ -187,7 +161,8 @@
benchmark::ClobberMemory();
}
- state.SetComplexityN(state.range(0));
+ state.SetComplexityN(channelCount);
+ state.SetLabel(audio_channel_out_mask_to_string(channelMask));
if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle); status != 0) {
ALOGE("release_effect returned an error = %d\n", status);
diff --git a/media/libeffects/downmix/tests/downmix_tests.cpp b/media/libeffects/downmix/tests/downmix_tests.cpp
index d4b7a3a..20e19a3 100644
--- a/media/libeffects/downmix/tests/downmix_tests.cpp
+++ b/media/libeffects/downmix/tests/downmix_tests.cpp
@@ -33,16 +33,14 @@
AUDIO_CHANNEL_OUT_STEREO,
AUDIO_CHANNEL_OUT_2POINT1,
AUDIO_CHANNEL_OUT_2POINT0POINT2,
- AUDIO_CHANNEL_OUT_QUAD,
- AUDIO_CHANNEL_OUT_QUAD_BACK,
+ AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
AUDIO_CHANNEL_OUT_QUAD_SIDE,
AUDIO_CHANNEL_OUT_SURROUND,
AUDIO_CHANNEL_OUT_2POINT1POINT2,
AUDIO_CHANNEL_OUT_3POINT0POINT2,
AUDIO_CHANNEL_OUT_PENTA,
AUDIO_CHANNEL_OUT_3POINT1POINT2,
- AUDIO_CHANNEL_OUT_5POINT1,
- AUDIO_CHANNEL_OUT_5POINT1_BACK,
+ AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
AUDIO_CHANNEL_OUT_5POINT1_SIDE,
AUDIO_CHANNEL_OUT_6POINT1,
AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -52,10 +50,72 @@
AUDIO_CHANNEL_OUT_7POINT1POINT4,
AUDIO_CHANNEL_OUT_13POINT_360RA,
AUDIO_CHANNEL_OUT_22POINT2,
+ audio_channel_mask_t(AUDIO_CHANNEL_OUT_22POINT2
+ | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT),
};
-static constexpr audio_channel_mask_t kConsideredChannels =
- (audio_channel_mask_t)(AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER);
+constexpr float COEF_25 = 0.2508909536f;
+constexpr float COEF_35 = 0.3543928915f;
+constexpr float COEF_36 = 0.3552343859f;
+constexpr float COEF_61 = 0.6057043428f;
+
+constexpr inline float kScaleFromChannelIdxLeft[] = {
+ 1.f, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
+ 0.5f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
+ 0.f, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
+ COEF_61, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
+ COEF_25, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+ 0.5f, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
+ 0.f, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
+ COEF_36, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
+ 1.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
+ COEF_35, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
+ COEF_61, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u,
+ 1.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT = 0x100000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
+ 0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
+ 0.f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u,
+};
+
+constexpr inline float kScaleFromChannelIdxRight[] = {
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
+ 1.f, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
+ 0.5f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
+ 0.f, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
+ COEF_25, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
+ COEF_61, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+ 0.5f, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
+ 0.f, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
+ COEF_36, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
+ 1.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
+ COEF_35, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
+ COEF_61, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u,
+ 0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT = 0x100000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
+ 1.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u,
+};
// Downmix doesn't change with sample rate
static constexpr size_t kSampleRates[] = {
@@ -93,8 +153,8 @@
void testBalance(int sampleRate, audio_channel_mask_t channelMask) {
using namespace ::android::audio_utils::channels;
- size_t frames = 100;
- unsigned outChannels = 2;
+ size_t frames = 100; // set to an even number (2, 4, 6 ... ) stream alternates +1, -1.
+ constexpr unsigned outChannels = 2;
unsigned inChannels = audio_channel_count_from_out_mask(channelMask);
std::vector<float> input(frames * inChannels);
std::vector<float> output(frames * outChannels);
@@ -102,7 +162,7 @@
double savedPower[32][2]{};
for (unsigned i = 0, channel = channelMask; channel != 0; ++i) {
const int index = __builtin_ctz(channel);
- ASSERT_LT(index, FCC_24);
+ ASSERT_LT(index, FCC_26);
const int pairIndex = pairIdxFromChannelIdx(index);
const AUDIO_GEOMETRY_SIDE side = sideFromChannelIdx(index);
const int channelBit = 1 << index;
@@ -119,7 +179,7 @@
auto stats = channelStatistics(output, 2 /* channels */);
// printf("power: %s %s\n", stats[0].toString().c_str(), stats[1].toString().c_str());
- double power[2] = { stats[0].getVariance(), stats[1].getVariance() };
+ double power[2] = { stats[0].getPopVariance(), stats[1].getPopVariance() };
// Check symmetric power for pair channels on exchange of left/right position.
// to do this, we save previous power measurements.
@@ -130,28 +190,39 @@
savedPower[index][0] = power[0];
savedPower[index][1] = power[1];
- // Confirm exactly the mix amount prescribed by the existing downmix effect.
- // For future changes to the downmix effect, the nearness needs to be relaxed
- // to compare behavior S or earlier.
- if ((channelBit & kConsideredChannels) == 0) {
- // for channels not considered, expect 0 power for legacy downmix
- EXPECT_EQ(0.f, power[0]);
- EXPECT_EQ(0.f, power[1]);
- continue;
- }
- constexpr float POWER_TOLERANCE = 0.01; // for variance sum error.
+ constexpr float POWER_TOLERANCE = 0.001;
+ const float expectedPower =
+ kScaleFromChannelIdxLeft[index] * kScaleFromChannelIdxLeft[index]
+ + kScaleFromChannelIdxRight[index] * kScaleFromChannelIdxRight[index];
+ EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
switch (side) {
case AUDIO_GEOMETRY_SIDE_LEFT:
- EXPECT_NEAR(0.25f, power[0], POWER_TOLERANCE);
+ if (channelBit == AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) {
+ break;
+ }
EXPECT_EQ(0.f, power[1]);
break;
case AUDIO_GEOMETRY_SIDE_RIGHT:
+ if (channelBit == AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) {
+ break;
+ }
EXPECT_EQ(0.f, power[0]);
- EXPECT_NEAR(0.25f, power[1], POWER_TOLERANCE);
break;
case AUDIO_GEOMETRY_SIDE_CENTER:
- EXPECT_NEAR(0.125f, power[0], POWER_TOLERANCE);
- EXPECT_NEAR(0.125f, power[1], POWER_TOLERANCE);
+ if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+ if (channelMask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ EXPECT_EQ(0.f, power[1]);
+ break;
+ } else {
+ EXPECT_NEAR_EPSILON(power[0], power[1]); // always true
+ EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
+ break;
+ }
+ } else if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ EXPECT_EQ(0.f, power[0]);
+ EXPECT_NEAR(expectedPower, power[1], POWER_TOLERANCE);
+ break;
+ }
EXPECT_NEAR_EPSILON(power[0], power[1]);
break;
}
@@ -178,6 +249,7 @@
handle_, EFFECT_CMD_SET_CONFIG,
sizeof(effect_config_t), &config_, &replySize, &reply);
ASSERT_EQ(0, err);
+ ASSERT_EQ(0, reply);
err = (downmixApi->command)(
handle_, EFFECT_CMD_ENABLE,
0, nullptr, &replySize, &reply);
@@ -188,6 +260,27 @@
ASSERT_EQ(0, err);
}
+ // This test assumes the channel mask is invalid.
+ void testInvalidChannelMask(audio_channel_mask_t invalidChannelMask) {
+ reconfig(48000 /* sampleRate */, invalidChannelMask);
+ const int32_t sessionId = 0;
+ const int32_t ioId = 0;
+ int32_t err = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(
+ &downmix_uuid_, sessionId, ioId, &handle_);
+ ASSERT_EQ(0, err);
+
+ const struct effect_interface_s * const downmixApi = *handle_;
+ int32_t reply = 0;
+ uint32_t replySize = (uint32_t)sizeof(reply);
+ err = (downmixApi->command)(
+ handle_, EFFECT_CMD_SET_CONFIG,
+ sizeof(effect_config_t), &config_, &replySize, &reply);
+ ASSERT_EQ(0, err);
+ ASSERT_NE(0, reply); // error has occurred.
+ err = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(handle_);
+ ASSERT_EQ(0, err);
+ }
+
private:
void reconfig(int sampleRate, audio_channel_mask_t channelMask) {
config_.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
@@ -234,6 +327,16 @@
int inputChannelCount_{};
};
+TEST(DownmixTestSimple, invalidChannelMask) {
+ // Fill in a dummy test method to use DownmixTest outside of a parameterized test.
+ class DownmixTestComplete : public DownmixTest {
+ void TestBody() override {}
+ } downmixtest;
+
+ constexpr auto INVALID_CHANNEL_MASK = audio_channel_mask_t(1 << 31);
+ downmixtest.testInvalidChannelMask(INVALID_CHANNEL_MASK);
+}
+
TEST_P(DownmixTest, basic) {
testBalance(kSampleRates[std::get<0>(GetParam())],
kChannelPositionMasks[std::get<1>(GetParam())]);
@@ -244,10 +347,11 @@
::testing::Combine(
::testing::Range(0, (int)std::size(kSampleRates)),
::testing::Range(0, (int)std::size(kChannelPositionMasks))
- ));
-
-int main(int argc, /* const */ char** argv) {
- ::testing::InitGoogleTest(&argc, argv);
- int status = RUN_ALL_TESTS();
- return status;
-}
+ ),
+ [](const testing::TestParamInfo<DownmixTest::ParamType>& info) {
+ const int index = std::get<1>(info.param);
+ const audio_channel_mask_t channelMask = kChannelPositionMasks[index];
+ const std::string name = std::string(audio_channel_out_mask_to_string(channelMask))
+ + "_" + std::to_string(std::get<0>(info.param)) + "_" + std::to_string(index);
+ return name;
+ });
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index a660957..03ce329 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -45,6 +45,7 @@
shared_libs: [
"libaudioutils",
+ "libbase",
"libbinder",
"liblog",
"libutils",
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index 65a20a7..3137e13 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -22,12 +22,15 @@
#include <algorithm>
#include <memory>
+#include <string>
#include <utility>
#include <errno.h>
#include <inttypes.h>
#include <math.h>
+#include <android-base/parsedouble.h>
+#include <android-base/properties.h>
#include <audio_effects/effect_hapticgenerator.h>
#include <audio_utils/format.h>
#include <system/audio.h>
@@ -35,6 +38,7 @@
static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+static constexpr float DEFAULT_DISTORTION_OUTPUT_GAIN = 1.5f;
// This is the only symbol that needs to be exported
__attribute__ ((visibility ("default")))
@@ -81,6 +85,15 @@
namespace {
+float getFloatProperty(const std::string& key, float defaultValue) {
+ float result;
+ std::string value = android::base::GetProperty(key, "");
+ if (!value.empty() && android::base::ParseFloat(value, &result)) {
+ return result;
+ }
+ return defaultValue;
+}
+
int HapticGenerator_Init(struct HapticGeneratorContext *context) {
context->itfe = &gHapticGeneratorInterface;
@@ -114,7 +127,9 @@
context->param.distortionCornerFrequency = 300.0f;
context->param.distortionInputGain = 0.3f;
context->param.distortionCubeThreshold = 0.1f;
- context->param.distortionOutputGain = 1.5f;
+ context->param.distortionOutputGain = getFloatProperty(
+ "vendor.audio.hapticgenerator.distortion.output.gain", DEFAULT_DISTORTION_OUTPUT_GAIN);
+ ALOGD("Using distortion output gain as %f", context->param.distortionOutputGain);
context->state = HAPTICGENERATOR_STATE_INITIALIZED;
return 0;
@@ -287,15 +302,17 @@
break;
}
case HG_PARAM_VIBRATOR_INFO: {
- if (value == nullptr || size != 2 * sizeof(float)) {
+ if (value == nullptr || size != 3 * sizeof(float)) {
return -EINVAL;
}
const float resonantFrequency = *(float*) value;
const float qFactor = *((float *) value + 1);
+ const float maxAmplitude = *((float *) value + 2);
context->param.resonantFrequency =
isnan(resonantFrequency) ? DEFAULT_RESONANT_FREQUENCY : resonantFrequency;
context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
+ context->param.maxHapticAmplitude = maxAmplitude;
if (context->processorsRecord.bpf != nullptr) {
context->processorsRecord.bpf->setCoefficients(
@@ -448,7 +465,8 @@
float* hapticOutBuffer = HapticGenerator_runProcessingChain(
context->processingChain, context->inputBuffer.data(),
context->outputBuffer.data(), inBuffer->frameCount);
- os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity);
+ os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity,
+ context->param.maxHapticAmplitude);
// For haptic data, the haptic playback thread will copy the data from effect input buffer,
// which contains haptic data at the end of the buffer, directly to sink buffer.
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index 96b744a..85e961f 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -51,6 +51,7 @@
// A map from track id to haptic intensity.
std::map<int, os::HapticScale> id2Intensity;
os::HapticScale maxHapticIntensity; // max intensity will be used to scale haptic data.
+ float maxHapticAmplitude; // max amplitude will be used to limit haptic data absolute values.
float resonantFrequency;
float bpfQ;
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
index 7e5caed..ccef5ab 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
@@ -135,7 +135,6 @@
LVM_UINT32 fs =
(LVM_UINT32)LVEQNB_SampleRateTab[(LVM_UINT16)pParams->SampleRate]; /* Sample rate */
LVM_UINT32 fc; /* Filter centre frequency */
- LVM_INT16 QFactor; /* Filter Q factor */
pInstance->NBands = pParams->NBands;
@@ -144,7 +143,6 @@
* Get the filter settings
*/
fc = (LVM_UINT32)pParams->pBandDefinition[i].Frequency; /* Get the band centre frequency */
- QFactor = (LVM_INT16)pParams->pBandDefinition[i].QFactor; /* Get the band Q factor */
pInstance->pBiquadType[i] = LVEQNB_SinglePrecision_Float; /* Default to single precision */
@@ -313,9 +311,9 @@
*/
pInstance->eqBiquad.resize(pParams->NBands,
android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
- LVEQNB_ClearFilterHistory(pInstance);
if (bChange || modeChange) {
+ LVEQNB_ClearFilterHistory(pInstance);
/*
* If the sample rate has changed clear the history
*/
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
index 8e63502..ffed6d4 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Control.cpp
@@ -421,7 +421,6 @@
* Intermediate variables and temporary values
*/
LVM_FLOAT T0;
- LVM_FLOAT D;
LVM_FLOAT A0;
LVM_FLOAT B1;
LVM_FLOAT B2;
@@ -444,9 +443,6 @@
* Calculating the intermediate values
*/
T0 = Frequency * LVPSA_Float_TwoPiOnFsTable[Fs]; /* T0 = 2 * Pi * Fc / Fs */
- D = 3200; /* Floating point value 1.000000 (1*100*2^5) */
- /* Force D = 1 : the function was originally used for a peaking filter.
- The D parameter do not exist for a BandPass filter coefficients */
/*
* Calculate the B2 coefficient
@@ -535,7 +531,6 @@
* Intermediate variables and temporary values
*/
LVM_FLOAT T0;
- LVM_FLOAT D;
LVM_FLOAT A0;
LVM_FLOAT B1;
LVM_FLOAT B2;
@@ -558,9 +553,6 @@
* Calculating the intermediate values
*/
T0 = Frequency * LVPSA_Float_TwoPiOnFsTable[Fs]; /* T0 = 2 * Pi * Fc / Fs */
- D = 3200; /* Floating point value 1.000000 (1*100*2^5) */
- /* Force D = 1 : the function was originally used for a peaking filter.
- The D parameter do not exist for a BandPass filter coefficients */
/*
* Calculate the B2 coefficient
diff --git a/media/libeffects/lvm/tests/EffectBundleTest.cpp b/media/libeffects/lvm/tests/EffectBundleTest.cpp
index 881ffb1..018cb7c 100644
--- a/media/libeffects/lvm/tests/EffectBundleTest.cpp
+++ b/media/libeffects/lvm/tests/EffectBundleTest.cpp
@@ -14,29 +14,39 @@
* limitations under the License.
*/
+#include <system/audio_effects/effect_bassboost.h>
+#include <system/audio_effects/effect_equalizer.h>
+#include <system/audio_effects/effect_virtualizer.h>
#include "EffectTestHelper.h"
-using namespace android;
-// Update isBassBoost, if the order of effects is updated
-constexpr effect_uuid_t kEffectUuids[] = {
+using namespace android;
+typedef enum {
+ EFFECT_BASS_BOOST,
+ EFFECT_EQUALIZER,
+ EFFECT_VIRTUALIZER,
+ EFFECT_VOLUME
+} effect_type_t;
+
+const std::map<effect_type_t, effect_uuid_t> kEffectUuids = {
// NXP SW BassBoost
- {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
- // NXP SW Virtualizer
- {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ {EFFECT_BASS_BOOST,
+ {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
// NXP SW Equalizer
- {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ {EFFECT_EQUALIZER,
+ {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
+ // NXP SW Virtualizer
+ {EFFECT_VIRTUALIZER,
+ {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
// NXP SW Volume
- {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ {EFFECT_VOLUME, {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
};
-static bool isBassBoost(const effect_uuid_t* uuid) {
- // Update this, if the order of effects in kEffectUuids is updated
- return uuid == &kEffectUuids[0];
-}
+const size_t kNumEffectUuids = std::size(kEffectUuids);
-constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
+constexpr float kMinAmplitude = -1.0f;
+constexpr float kMaxAmplitude = 1.0f;
-typedef std::tuple<int, int, int, int, int> SingleEffectTestParam;
+using SingleEffectTestParam = std::tuple<int, int, int, int, int>;
class SingleEffectTest : public ::testing::TestWithParam<SingleEffectTestParam> {
public:
SingleEffectTest()
@@ -46,7 +56,8 @@
mFrameCount(EffectTestHelper::kFrameCounts[std::get<2>(GetParam())]),
mLoopCount(EffectTestHelper::kLoopCounts[std::get<3>(GetParam())]),
mTotalFrameCount(mFrameCount * mLoopCount),
- mUuid(&kEffectUuids[std::get<4>(GetParam())]) {}
+ mEffectType((effect_type_t)std::get<4>(GetParam())),
+ mUuid(kEffectUuids.at(mEffectType)) {}
const size_t mChMask;
const size_t mChannelCount;
@@ -54,7 +65,8 @@
const size_t mFrameCount;
const size_t mLoopCount;
const size_t mTotalFrameCount;
- const effect_uuid_t* mUuid;
+ const effect_type_t mEffectType;
+ const effect_uuid_t mUuid;
};
// Tests applying a single effect
@@ -63,7 +75,7 @@
<< "chMask: " << mChMask << " sampleRate: " << mSampleRate
<< " frameCount: " << mFrameCount << " loopCount: " << mLoopCount);
- EffectTestHelper effect(mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+ EffectTestHelper effect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
ASSERT_NO_FATAL_FAILURE(effect.createEffect());
ASSERT_NO_FATAL_FAILURE(effect.setConfig());
@@ -72,7 +84,7 @@
std::vector<float> input(mTotalFrameCount * mChannelCount);
std::vector<float> output(mTotalFrameCount * mChannelCount);
std::minstd_rand gen(mChMask);
- std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+ std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
for (auto& in : input) {
in = dis(gen);
}
@@ -88,7 +100,7 @@
::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
::testing::Range(0, (int)kNumEffectUuids)));
-typedef std::tuple<int, int, int, int> SingleEffectComparisonTestParam;
+using SingleEffectComparisonTestParam = std::tuple<int, int, int, int>;
class SingleEffectComparisonTest
: public ::testing::TestWithParam<SingleEffectComparisonTestParam> {
public:
@@ -97,13 +109,15 @@
mFrameCount(EffectTestHelper::kFrameCounts[std::get<1>(GetParam())]),
mLoopCount(EffectTestHelper::kLoopCounts[std::get<2>(GetParam())]),
mTotalFrameCount(mFrameCount * mLoopCount),
- mUuid(&kEffectUuids[std::get<3>(GetParam())]) {}
+ mEffectType((effect_type_t)std::get<3>(GetParam())),
+ mUuid(kEffectUuids.at(mEffectType)) {}
const size_t mSampleRate;
const size_t mFrameCount;
const size_t mLoopCount;
const size_t mTotalFrameCount;
- const effect_uuid_t* mUuid;
+ const effect_type_t mEffectType;
+ const effect_uuid_t mUuid;
};
// Compares first two channels in multi-channel output to stereo output when same effect is applied
@@ -115,7 +129,7 @@
std::vector<float> monoInput(mTotalFrameCount);
std::minstd_rand gen(mSampleRate);
- std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+ std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
for (auto& in : monoInput) {
in = dis(gen);
}
@@ -126,7 +140,7 @@
mTotalFrameCount * sizeof(float) * FCC_1);
// Apply effect on stereo channels
- EffectTestHelper stereoEffect(mUuid, AUDIO_CHANNEL_OUT_STEREO, AUDIO_CHANNEL_OUT_STEREO,
+ EffectTestHelper stereoEffect(&mUuid, AUDIO_CHANNEL_OUT_STEREO, AUDIO_CHANNEL_OUT_STEREO,
mSampleRate, mFrameCount, mLoopCount);
ASSERT_NO_FATAL_FAILURE(stereoEffect.createEffect());
@@ -142,7 +156,7 @@
for (size_t chMask : EffectTestHelper::kChMasks) {
size_t channelCount = audio_channel_count_from_out_mask(chMask);
- EffectTestHelper testEffect(mUuid, chMask, chMask, mSampleRate, mFrameCount, mLoopCount);
+ EffectTestHelper testEffect(&mUuid, chMask, chMask, mSampleRate, mFrameCount, mLoopCount);
ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
ASSERT_NO_FATAL_FAILURE(testEffect.setConfig());
@@ -170,7 +184,7 @@
memcpy_to_i16_from_float(stereoTestI16.data(), stereoTestOutput.data(),
mTotalFrameCount * FCC_2);
- if (isBassBoost(mUuid)) {
+ if (EFFECT_BASS_BOOST == mEffectType) {
// SNR must be above the threshold
float snr = computeSnr<int16_t>(stereoRefI16.data(), stereoTestI16.data(),
mTotalFrameCount * FCC_2);
@@ -191,6 +205,135 @@
::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
::testing::Range(0, (int)kNumEffectUuids)));
+using SingleEffectDefaultSetParamTestParam = std::tuple<int, int, int>;
+class SingleEffectDefaultSetParamTest
+ : public ::testing::TestWithParam<SingleEffectDefaultSetParamTestParam> {
+ public:
+ SingleEffectDefaultSetParamTest()
+ : mChMask(EffectTestHelper::kChMasks[std::get<0>(GetParam())]),
+ mChannelCount(audio_channel_count_from_out_mask(mChMask)),
+ mSampleRate(16000),
+ mFrameCount(EffectTestHelper::kFrameCounts[std::get<1>(GetParam())]),
+ mLoopCount(1),
+ mTotalFrameCount(mFrameCount * mLoopCount),
+ mEffectType((effect_type_t)std::get<2>(GetParam())),
+ mUuid(kEffectUuids.at(mEffectType)) {}
+
+ const size_t mChMask;
+ const size_t mChannelCount;
+ const size_t mSampleRate;
+ const size_t mFrameCount;
+ const size_t mLoopCount;
+ const size_t mTotalFrameCount;
+ const effect_type_t mEffectType;
+ const effect_uuid_t mUuid;
+};
+
+// Tests verifying that redundant setParam calls do not alter output
+TEST_P(SingleEffectDefaultSetParamTest, SimpleProcess) {
+ SCOPED_TRACE(testing::Message()
+ << "chMask: " << mChMask << " sampleRate: " << mSampleRate
+ << " frameCount: " << mFrameCount << " loopCount: " << mLoopCount);
+ // effect.process() handles mTotalFrameCount * mChannelCount samples in each call.
+ // This test calls process() twice per effect, hence total samples when allocating
+ // input and output vectors is twice the number of samples processed in one call.
+ size_t totalNumSamples = 2 * mTotalFrameCount * mChannelCount;
+ // Initialize input buffer with deterministic pseudo-random values
+ std::vector<float> input(totalNumSamples);
+ std::minstd_rand gen(mChMask);
+ std::uniform_real_distribution<> dis(kMinAmplitude, kMaxAmplitude);
+ for (auto& in : input) {
+ in = dis(gen);
+ }
+
+ uint32_t key;
+ int32_t value1, value2;
+ switch (mEffectType) {
+ case EFFECT_BASS_BOOST:
+ key = BASSBOOST_PARAM_STRENGTH;
+ value1 = 1;
+ value2 = 14;
+ break;
+ case EFFECT_VIRTUALIZER:
+ key = VIRTUALIZER_PARAM_STRENGTH;
+ value1 = 0;
+ value2 = 100;
+ break;
+ case EFFECT_EQUALIZER:
+ key = EQ_PARAM_CUR_PRESET;
+ value1 = 0;
+ value2 = 1;
+ break;
+ case EFFECT_VOLUME:
+ key = 0 /* VOLUME_PARAM_LEVEL */;
+ value1 = 0;
+ value2 = -100;
+ break;
+ default:
+ FAIL() << "Unsupported effect type : " << mEffectType;
+ }
+
+ EffectTestHelper refEffect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+
+ ASSERT_NO_FATAL_FAILURE(refEffect.createEffect());
+ ASSERT_NO_FATAL_FAILURE(refEffect.setConfig());
+
+ if (EFFECT_BASS_BOOST == mEffectType) {
+ ASSERT_NO_FATAL_FAILURE(refEffect.setParam<int16_t>(key, value1));
+ } else {
+ ASSERT_NO_FATAL_FAILURE(refEffect.setParam<int32_t>(key, value1));
+ }
+ std::vector<float> refOutput(totalNumSamples);
+ float* pInput = input.data();
+ float* pOutput = refOutput.data();
+ ASSERT_NO_FATAL_FAILURE(refEffect.process(pInput, pOutput));
+
+ pInput += totalNumSamples / 2;
+ pOutput += totalNumSamples / 2;
+ ASSERT_NO_FATAL_FAILURE(refEffect.process(pInput, pOutput));
+ ASSERT_NO_FATAL_FAILURE(refEffect.releaseEffect());
+
+ EffectTestHelper testEffect(&mUuid, mChMask, mChMask, mSampleRate, mFrameCount, mLoopCount);
+
+ ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
+ ASSERT_NO_FATAL_FAILURE(testEffect.setConfig());
+
+ if (EFFECT_BASS_BOOST == mEffectType) {
+ ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value1));
+ } else {
+ ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value1));
+ }
+
+ std::vector<float> testOutput(totalNumSamples);
+ pInput = input.data();
+ pOutput = testOutput.data();
+ ASSERT_NO_FATAL_FAILURE(testEffect.process(pInput, pOutput));
+
+ // Call setParam once to change the parameters, and then call setParam again
+ // to restore the parameters to the initial state, making the first setParam
+ // call redundant
+ if (EFFECT_BASS_BOOST == mEffectType) {
+ ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value2));
+ ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int16_t>(key, value1));
+ } else {
+ ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value2));
+ ASSERT_NO_FATAL_FAILURE(testEffect.setParam<int32_t>(key, value1));
+ }
+
+ pInput += totalNumSamples / 2;
+ pOutput += totalNumSamples / 2;
+ ASSERT_NO_FATAL_FAILURE(testEffect.process(pInput, pOutput));
+ ASSERT_NO_FATAL_FAILURE(testEffect.releaseEffect());
+ ASSERT_TRUE(areNearlySame(refOutput.data(), testOutput.data(), totalNumSamples))
+ << "Outputs do not match with default setParam calls";
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ EffectBundleTestAll, SingleEffectDefaultSetParamTest,
+ ::testing::Combine(::testing::Range(0, (int)EffectTestHelper::kNumChMasks),
+ ::testing::Range(0, (int)EffectTestHelper::kNumFrameCounts),
+ ::testing::Range(0, (int)kNumEffectUuids)));
+
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
int status = RUN_ALL_TESTS();
diff --git a/media/libeffects/lvm/tests/EffectTestHelper.cpp b/media/libeffects/lvm/tests/EffectTestHelper.cpp
index 625c15a..ec727c7 100644
--- a/media/libeffects/lvm/tests/EffectTestHelper.cpp
+++ b/media/libeffects/lvm/tests/EffectTestHelper.cpp
@@ -50,23 +50,6 @@
ASSERT_EQ(reply, 0) << "cmd_enable reply non zero " << reply;
}
-void EffectTestHelper::setParam(uint32_t type, uint32_t value) {
- int reply = 0;
- uint32_t replySize = sizeof(reply);
- uint32_t paramData[2] = {type, value};
- auto effectParam = new effect_param_t[sizeof(effect_param_t) + sizeof(paramData)];
- memcpy(&effectParam->data[0], ¶mData[0], sizeof(paramData));
- effectParam->psize = sizeof(paramData[0]);
- effectParam->vsize = sizeof(paramData[1]);
- int status = (*mEffectHandle)
- ->command(mEffectHandle, EFFECT_CMD_SET_PARAM,
- sizeof(effect_param_t) + sizeof(paramData), effectParam,
- &replySize, &reply);
- delete[] effectParam;
- ASSERT_EQ(status, 0) << "set_param returned an error " << status;
- ASSERT_EQ(reply, 0) << "set_param reply non zero " << reply;
-}
-
void EffectTestHelper::process(float* input, float* output) {
audio_buffer_t inBuffer = {.frameCount = mFrameCount, .f32 = input};
audio_buffer_t outBuffer = {.frameCount = mFrameCount, .f32 = output};
diff --git a/media/libeffects/lvm/tests/EffectTestHelper.h b/media/libeffects/lvm/tests/EffectTestHelper.h
index 3854d46..bcee84e 100644
--- a/media/libeffects/lvm/tests/EffectTestHelper.h
+++ b/media/libeffects/lvm/tests/EffectTestHelper.h
@@ -50,6 +50,23 @@
return snr;
}
+template <typename T>
+static float areNearlySame(const T* ref, const T* tst, size_t count) {
+ T delta;
+ if constexpr (std::is_floating_point_v<T>) {
+ delta = std::numeric_limits<T>::epsilon();
+ } else {
+ delta = 1;
+ }
+ for (size_t i = 0; i < count; ++i) {
+ const double diff(tst[i] - ref[i]);
+ if (abs(diff) > delta) {
+ return false;
+ }
+ }
+ return true;
+}
+
class EffectTestHelper {
public:
EffectTestHelper(const effect_uuid_t* uuid, size_t inChMask, size_t outChMask,
@@ -65,7 +82,25 @@
void createEffect();
void releaseEffect();
void setConfig();
- void setParam(uint32_t type, uint32_t val);
+ template <typename VALUE_DTYPE>
+ void setParam(uint32_t type, VALUE_DTYPE const value) {
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+
+ uint8_t paramData[sizeof(effect_param_t) + sizeof(type) + sizeof(value)];
+ auto effectParam = (effect_param_t*)paramData;
+
+ memcpy(&effectParam->data[0], &type, sizeof(type));
+ memcpy(&effectParam->data[sizeof(type)], &value, sizeof(value));
+ effectParam->psize = sizeof(type);
+ effectParam->vsize = sizeof(value);
+ int status = (*mEffectHandle)
+ ->command(mEffectHandle, EFFECT_CMD_SET_PARAM,
+ sizeof(effect_param_t) + sizeof(type) + sizeof(value),
+ effectParam, &replySize, &reply);
+ ASSERT_EQ(status, 0) << "set_param returned an error " << status;
+ ASSERT_EQ(reply, 0) << "set_param reply non zero " << reply;
+ };
void process(float* input, float* output);
// Corresponds to SNR for 1 bit difference between two int16_t signals
diff --git a/media/libeffects/preprocessing/.clang-format b/media/libeffects/preprocessing/.clang-format
deleted file mode 120000
index f1b4f69..0000000
--- a/media/libeffects/preprocessing/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/libeffects/preprocessing/tests/correlation.cpp b/media/libeffects/preprocessing/tests/correlation.cpp
index eb56fc3..0853673 100644
--- a/media/libeffects/preprocessing/tests/correlation.cpp
+++ b/media/libeffects/preprocessing/tests/correlation.cpp
@@ -36,7 +36,7 @@
const int16_t* sigY, int len,
int16_t enableCrossCorr) {
float maxCorrVal = 0.f, prevCorrVal = 0.f;
- int delay = 0, peakIndex = 0, flag = 0;
+ int peakIndex = 0, flag = 0;
int loopLim = (1 == enableCrossCorr) ? len : kMinLoopLimitValue;
std::vector<int> peakIndexVect(kNumPeaks, 0);
std::vector<float> peakValueVect(kNumPeaks, 0.f);
@@ -47,7 +47,6 @@
}
corrVal /= len - i;
if (corrVal > maxCorrVal) {
- delay = i;
maxCorrVal = corrVal;
}
// Correlation peaks are expected to be observed at equal intervals. The interval length is
diff --git a/media/libeffects/testlibs/Android.bp b/media/libeffects/testlibs/Android.bp
new file mode 100644
index 0000000..5ba56bb
--- /dev/null
+++ b/media/libeffects/testlibs/Android.bp
@@ -0,0 +1,77 @@
+// Test Reverb library
+package {
+ default_applicable_licenses: [
+ "frameworks_av_media_libeffects_testlibs_license",
+ ],
+}
+
+license {
+ name: "frameworks_av_media_libeffects_testlibs_license",
+ visibility: [":__subpackages__"],
+ license_kinds: [
+ "SPDX-license-identifier-Apache-2.0",
+ ],
+ license_text: [
+ "NOTICE",
+ ],
+}
+
+cc_library {
+ name: "libreverbtest",
+ host_supported: true,
+ vendor: true,
+ srcs: [
+ "EffectReverb.c",
+ "EffectsMath.c",
+ ],
+
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ ],
+
+ relative_install_path: "soundfx",
+
+ cflags: [
+ "-fvisibility=hidden",
+ "-Wall",
+ "-Werror",
+ "-Wno-address-of-packed-member",
+ ],
+
+ header_libs: [
+ "libaudioeffects",
+ ],
+}
+
+cc_library {
+ name: "libequalizertest",
+ host_supported: true,
+ vendor: true,
+ srcs: [
+ "AudioBiquadFilter.cpp",
+ "AudioCoefInterpolator.cpp",
+ "AudioEqualizer.cpp",
+ "AudioPeakingFilter.cpp",
+ "AudioShelvingFilter.cpp",
+ "EffectEqualizer.cpp",
+ "EffectsMath.c",
+ ],
+
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ ],
+
+ relative_install_path: "soundfx",
+
+ cflags: [
+ "-fvisibility=hidden",
+ "-Wall",
+ "-Werror",
+ ],
+
+ header_libs: [
+ "libaudioeffects",
+ ],
+}
diff --git a/media/libeffects/testlibs/Android.mk_ b/media/libeffects/testlibs/Android.mk_
deleted file mode 100644
index 14c373f..0000000
--- a/media/libeffects/testlibs/Android.mk_
+++ /dev/null
@@ -1,55 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-# Test Reverb library
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- EffectReverb.c.arm \
- EffectsMath.c.arm
-
-LOCAL_CFLAGS := -O2
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libdl
-
-LOCAL_MODULE_RELATIVE_PATH := soundfx
-LOCAL_MODULE := libreverbtest
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-effects) \
- $(call include-path-for, graphics corecg)
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-
-# Test Equalizer library
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- EffectsMath.c.arm \
- EffectEqualizer.cpp \
- AudioBiquadFilter.cpp.arm \
- AudioCoefInterpolator.cpp.arm \
- AudioPeakingFilter.cpp.arm \
- AudioShelvingFilter.cpp.arm \
- AudioEqualizer.cpp.arm
-
-LOCAL_CFLAGS := -O2
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libdl
-
-LOCAL_MODULE_RELATIVE_PATH := soundfx
-LOCAL_MODULE := libequalizertest
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, graphics corecg) \
- $(call include-path-for, audio-effects)
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/media/libeffects/testlibs/AudioEqualizer.cpp b/media/libeffects/testlibs/AudioEqualizer.cpp
index 4f3a308..141750b 100644
--- a/media/libeffects/testlibs/AudioEqualizer.cpp
+++ b/media/libeffects/testlibs/AudioEqualizer.cpp
@@ -19,7 +19,7 @@
#include <assert.h>
#include <stdlib.h>
#include <new>
-#include <utils/Log.h>
+#include <log/log.h>
#include "AudioEqualizer.h"
#include "AudioPeakingFilter.h"
diff --git a/media/libeffects/testlibs/AudioPeakingFilter.cpp b/media/libeffects/testlibs/AudioPeakingFilter.cpp
index 99323ac..4257eca 100644
--- a/media/libeffects/testlibs/AudioPeakingFilter.cpp
+++ b/media/libeffects/testlibs/AudioPeakingFilter.cpp
@@ -87,9 +87,9 @@
void AudioPeakingFilter::commit(bool immediate) {
audio_coef_t coefs[5];
int intCoord[3] = {
- mFrequency >> FREQ_PRECISION_BITS,
+ (int)(mFrequency >> FREQ_PRECISION_BITS),
mGain >> GAIN_PRECISION_BITS,
- mBandwidth >> BANDWIDTH_PRECISION_BITS
+ (int)(mBandwidth >> BANDWIDTH_PRECISION_BITS)
};
uint32_t fracCoord[3] = {
mFrequency << (32 - FREQ_PRECISION_BITS),
diff --git a/media/libeffects/testlibs/AudioShelvingFilter.cpp b/media/libeffects/testlibs/AudioShelvingFilter.cpp
index e031287..ad43c5a 100644
--- a/media/libeffects/testlibs/AudioShelvingFilter.cpp
+++ b/media/libeffects/testlibs/AudioShelvingFilter.cpp
@@ -89,8 +89,8 @@
void AudioShelvingFilter::commit(bool immediate) {
audio_coef_t coefs[5];
int intCoord[2] = {
- mFrequency >> FREQ_PRECISION_BITS,
- mGain >> GAIN_PRECISION_BITS
+ (int)(mFrequency >> FREQ_PRECISION_BITS),
+ (int)(mGain >> GAIN_PRECISION_BITS)
};
uint32_t fracCoord[2] = {
mFrequency << (32 - FREQ_PRECISION_BITS),
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index db4d009..72b530d 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -131,7 +131,8 @@
int32_t ioId,
effect_handle_t *pHandle) {
int ret;
- int i;
+ (void)sessionId;
+ (void)ioId;
ALOGV("EffectLibCreateEffect start");
@@ -160,7 +161,7 @@
pContext->state = EQUALIZER_STATE_INITIALIZED;
ALOGV("EffectLibCreateEffect %p, size %d",
- pContext, AudioEqualizer::GetInstanceSize(kNumBands)+sizeof(EqualizerContext));
+ pContext, (int)(AudioEqualizer::GetInstanceSize(kNumBands)+sizeof(EqualizerContext)));
return 0;
@@ -294,7 +295,6 @@
int Equalizer_init(EqualizerContext *pContext)
{
- int status;
ALOGV("Equalizer_init start");
@@ -630,7 +630,6 @@
void *pCmdData, uint32_t *replySize, void *pReplyData) {
android::EqualizerContext * pContext = (android::EqualizerContext *) self;
- int retsize;
if (pContext == NULL || pContext->state == EQUALIZER_STATE_UNINITIALIZED) {
return -EINVAL;
@@ -750,13 +749,13 @@
NULL
};
-
+__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
- tag : AUDIO_EFFECT_LIBRARY_TAG,
- version : EFFECT_LIBRARY_API_VERSION,
- name : "Test Equalizer Library",
- implementor : "The Android Open Source Project",
- create_effect : android::EffectCreate,
- release_effect : android::EffectRelease,
- get_descriptor : android::EffectGetDescriptor,
+ .tag = AUDIO_EFFECT_LIBRARY_TAG,
+ .version = EFFECT_LIBRARY_API_VERSION,
+ .name = "Test Equalizer Library",
+ .implementor = "The Android Open Source Project",
+ .create_effect = android::EffectCreate,
+ .release_effect = android::EffectRelease,
+ .get_descriptor = android::EffectGetDescriptor,
};
diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c
index fce9bed..efba4f4 100644
--- a/media/libeffects/testlibs/EffectReverb.c
+++ b/media/libeffects/testlibs/EffectReverb.c
@@ -107,6 +107,8 @@
const effect_descriptor_t *desc;
int aux = 0;
int preset = 0;
+ (void)sessionId;
+ (void)ioId;
ALOGV("EffectLibCreateEffect start");
@@ -149,7 +151,7 @@
module->context.mState = REVERB_STATE_INITIALIZED;
- ALOGV("EffectLibCreateEffect %p ,size %d", module, sizeof(reverb_module_t));
+ ALOGV("EffectLibCreateEffect %p ,size %zu", module, sizeof(reverb_module_t));
return 0;
}
@@ -283,7 +285,6 @@
void *pCmdData, uint32_t *replySize, void *pReplyData) {
reverb_module_t *pRvbModule = (reverb_module_t *) self;
reverb_object_t *pReverb;
- int retsize;
if (pRvbModule == NULL ||
pRvbModule->context.mState == REVERB_STATE_UNINITIALIZED) {
@@ -758,7 +759,6 @@
int32_t *pValue32;
int16_t *pValue16;
t_reverb_settings *pProperties;
- int32_t i;
int32_t temp;
int32_t temp2;
uint32_t size;
@@ -1654,7 +1654,6 @@
int32_t nApOut;
int32_t j;
- int32_t nEarlyOut;
int32_t tempValue;
@@ -2203,6 +2202,7 @@
return 0;
}
+__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
.tag = AUDIO_EFFECT_LIBRARY_TAG,
.version = EFFECT_LIBRARY_API_VERSION,
diff --git a/media/libeffects/testlibs/EffectReverb.h b/media/libeffects/testlibs/EffectReverb.h
index 756c5ea..8f405d4 100644
--- a/media/libeffects/testlibs/EffectReverb.h
+++ b/media/libeffects/testlibs/EffectReverb.h
@@ -443,7 +443,4 @@
*/
static int ReverbUpdateRoom(reverb_object_t* pReverbData, bool fullUpdate);
-
-static int ReverbComputeConstants(reverb_object_t *pReverbData, uint32_t samplingRate);
-
#endif /*ANDROID_EFFECTREVERB_H_*/
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
new file mode 100644
index 0000000..63b769e
--- /dev/null
+++ b/media/libheadtracking/Android.bp
@@ -0,0 +1,78 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_library {
+ name: "libheadtracking",
+ host_supported: true,
+ srcs: [
+ "HeadTrackingProcessor.cpp",
+ "ModeSelector.cpp",
+ "Pose.cpp",
+ "PoseDriftCompensator.cpp",
+ "PoseRateLimiter.cpp",
+ "QuaternionUtil.cpp",
+ "ScreenHeadFusion.cpp",
+ "Twist.cpp",
+ ],
+ export_include_dirs: [
+ "include",
+ ],
+ header_libs: [
+ "libeigen",
+ ],
+ export_header_lib_headers: [
+ "libeigen",
+ ],
+}
+
+cc_library {
+ name: "libheadtracking-binding",
+ srcs: [
+ "SensorPoseProvider.cpp",
+ ],
+ shared_libs: [
+ "libheadtracking",
+ "liblog",
+ "libsensor",
+ "libutils",
+ ],
+ export_shared_lib_headers: [
+ "libheadtracking",
+ ],
+}
+
+cc_binary {
+ name: "SensorPoseProvider-example",
+ srcs: [
+ "SensorPoseProvider-example.cpp",
+ ],
+ shared_libs: [
+ "libheadtracking",
+ "libheadtracking-binding",
+ "libsensor",
+ "libutils",
+ ],
+}
+
+cc_test_host {
+ name: "libheadtracking-test",
+ srcs: [
+ "HeadTrackingProcessor-test.cpp",
+ "ModeSelector-test.cpp",
+ "Pose-test.cpp",
+ "PoseDriftCompensator-test.cpp",
+ "PoseRateLimiter-test.cpp",
+ "QuaternionUtil-test.cpp",
+ "ScreenHeadFusion-test.cpp",
+ "Twist-test.cpp",
+ ],
+ shared_libs: [
+ "libheadtracking",
+ ],
+}
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
new file mode 100644
index 0000000..299192f
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = HeadTrackingProcessor::Options;
+
+TEST(HeadTrackingProcessor, Initial) {
+ for (auto mode : {HeadTrackingMode::STATIC, HeadTrackingMode::WORLD_RELATIVE,
+ HeadTrackingMode::SCREEN_RELATIVE}) {
+ std::unique_ptr<HeadTrackingProcessor> processor =
+ createHeadTrackingProcessor(Options{}, mode);
+ processor->calculate(0);
+ EXPECT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+ }
+}
+
+TEST(HeadTrackingProcessor, BasicComposition) {
+ const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+ const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+ const Pose3f screenToStage{{7, 8, 9}, Quaternionf::UnitRandom()};
+ const float physicalToLogical = M_PI_2;
+
+ std::unique_ptr<HeadTrackingProcessor> processor =
+ createHeadTrackingProcessor(Options{}, HeadTrackingMode::SCREEN_RELATIVE);
+
+ // Establish a baseline for the drift compensators.
+ processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+ processor->setWorldToScreenPose(0, Pose3f());
+
+ processor->setDisplayOrientation(physicalToLogical);
+ processor->setWorldToHeadPose(0, worldToHead, Twist3f());
+ processor->setWorldToScreenPose(0, worldToScreen);
+ processor->setScreenToStagePose(screenToStage);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * worldToScreen *
+ Pose3f(rotateY(-physicalToLogical)) *
+ screenToStage);
+
+ processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+
+ processor->setDesiredMode(HeadTrackingMode::STATIC);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), screenToStage);
+}
+
+TEST(HeadTrackingProcessor, Prediction) {
+ const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+ const Twist3f headTwist{{4, 5, 6}, quaternionToRotationVector(Quaternionf::UnitRandom()) / 10};
+ const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+
+ std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
+ Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE);
+
+ // Establish a baseline for the drift compensators.
+ processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+ processor->setWorldToScreenPose(0, Pose3f());
+
+ processor->setWorldToHeadPose(0, worldToHead, headTwist);
+ processor->setWorldToScreenPose(0, worldToScreen);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), (worldToHead * integrate(headTwist, 2.f)).inverse());
+
+ processor->setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(),
+ (worldToHead * integrate(headTwist, 2.f)).inverse() * worldToScreen);
+
+ processor->setDesiredMode(HeadTrackingMode::STATIC);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+TEST(HeadTrackingProcessor, SmoothModeSwitch) {
+ const Pose3f targetHeadToWorld = Pose3f({4, 0, 0}, rotateZ(M_PI / 2));
+
+ std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
+ Options{.maxTranslationalVelocity = 1}, HeadTrackingMode::STATIC);
+
+ // Establish a baseline for the drift compensators.
+ processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+ processor->setWorldToScreenPose(0, Pose3f());
+
+ processor->calculate(0);
+
+ processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ processor->setWorldToHeadPose(0, targetHeadToWorld.inverse(), Twist3f());
+
+ // We're expecting a gradual move to the target.
+ processor->calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+
+ processor->calculate(2);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f({2, 0, 0}, rotateZ(M_PI / 4)));
+
+ processor->calculate(4);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), targetHeadToWorld);
+
+ // Now that we've reached the target, we should no longer be rate limiting.
+ processor->setWorldToHeadPose(4, Pose3f(), Twist3f());
+ processor->calculate(5);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
new file mode 100644
index 0000000..47f7cf0
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include "ModeSelector.h"
+#include "PoseDriftCompensator.h"
+#include "QuaternionUtil.h"
+#include "ScreenHeadFusion.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+class HeadTrackingProcessorImpl : public HeadTrackingProcessor {
+ public:
+ HeadTrackingProcessorImpl(const Options& options, HeadTrackingMode initialMode)
+ : mOptions(options),
+ mHeadPoseDriftCompensator(PoseDriftCompensator::Options{
+ .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
+ .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
+ }),
+ mScreenPoseDriftCompensator(PoseDriftCompensator::Options{
+ .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
+ .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
+ }),
+ mModeSelector(ModeSelector::Options{.freshnessTimeout = options.freshnessTimeout},
+ initialMode),
+ mRateLimiter(PoseRateLimiter::Options{
+ .maxTranslationalVelocity = options.maxTranslationalVelocity,
+ .maxRotationalVelocity = options.maxRotationalVelocity}) {}
+
+ void setDesiredMode(HeadTrackingMode mode) override { mModeSelector.setDesiredMode(mode); }
+
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+ const Twist3f& headTwist) override {
+ Pose3f predictedWorldToHead =
+ worldToHead * integrate(headTwist, mOptions.predictionDuration);
+ mHeadPoseDriftCompensator.setInput(timestamp, predictedWorldToHead);
+ mWorldToHeadTimestamp = timestamp;
+ }
+
+ void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) override {
+ if (mPhysicalToLogicalAngle != mPendingPhysicalToLogicalAngle) {
+ // We're introducing an artificial discontinuity. Enable the rate limiter.
+ mRateLimiter.enable();
+ mPhysicalToLogicalAngle = mPendingPhysicalToLogicalAngle;
+ }
+
+ mScreenPoseDriftCompensator.setInput(
+ timestamp, worldToScreen * Pose3f(rotateY(-mPhysicalToLogicalAngle)));
+ mWorldToScreenTimestamp = timestamp;
+ }
+
+ void setScreenToStagePose(const Pose3f& screenToStage) override {
+ mModeSelector.setScreenToStagePose(screenToStage);
+ }
+
+ void setDisplayOrientation(float physicalToLogicalAngle) override {
+ mPendingPhysicalToLogicalAngle = physicalToLogicalAngle;
+ }
+
+ void calculate(int64_t timestamp) override {
+ if (mWorldToHeadTimestamp.has_value()) {
+ const Pose3f worldToHead = mHeadPoseDriftCompensator.getOutput();
+ mScreenHeadFusion.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+ mModeSelector.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+ }
+
+ if (mWorldToScreenTimestamp.has_value()) {
+ const Pose3f worldToLogicalScreen = mScreenPoseDriftCompensator.getOutput();
+ mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
+ worldToLogicalScreen);
+ }
+
+ auto maybeScreenToHead = mScreenHeadFusion.calculate();
+ if (maybeScreenToHead.has_value()) {
+ mModeSelector.setScreenToHeadPose(maybeScreenToHead->timestamp,
+ maybeScreenToHead->pose);
+ } else {
+ mModeSelector.setScreenToHeadPose(timestamp, std::nullopt);
+ }
+
+ HeadTrackingMode prevMode = mModeSelector.getActualMode();
+ mModeSelector.calculate(timestamp);
+ if (mModeSelector.getActualMode() != prevMode) {
+ // Mode has changed, enable rate limiting.
+ mRateLimiter.enable();
+ }
+ mRateLimiter.setTarget(mModeSelector.getHeadToStagePose());
+ mHeadToStagePose = mRateLimiter.calculatePose(timestamp);
+ }
+
+ Pose3f getHeadToStagePose() const override { return mHeadToStagePose; }
+
+ HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
+
+ void recenter(bool recenterHead, bool recenterScreen) override {
+ if (recenterHead) {
+ mHeadPoseDriftCompensator.recenter();
+ }
+ if (recenterScreen) {
+ mScreenPoseDriftCompensator.recenter();
+ }
+
+ // If a sensor being recentered is included in the current mode, apply rate limiting to
+ // avoid discontinuities.
+ HeadTrackingMode mode = mModeSelector.getActualMode();
+ if ((recenterHead && (mode == HeadTrackingMode::WORLD_RELATIVE ||
+ mode == HeadTrackingMode::SCREEN_RELATIVE)) ||
+ (recenterScreen && mode == HeadTrackingMode::SCREEN_RELATIVE)) {
+ mRateLimiter.enable();
+ }
+ }
+
+ private:
+ const Options mOptions;
+ float mPhysicalToLogicalAngle = 0;
+ // We store the physical to logical angle as "pending" until the next world-to-screen sample it
+ // applies to arrives.
+ float mPendingPhysicalToLogicalAngle = 0;
+ std::optional<int64_t> mWorldToHeadTimestamp;
+ std::optional<int64_t> mWorldToScreenTimestamp;
+ Pose3f mHeadToStagePose;
+ PoseDriftCompensator mHeadPoseDriftCompensator;
+ PoseDriftCompensator mScreenPoseDriftCompensator;
+ ScreenHeadFusion mScreenHeadFusion;
+ ModeSelector mModeSelector;
+ PoseRateLimiter mRateLimiter;
+};
+
+} // namespace
+
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcessor(
+ const HeadTrackingProcessor::Options& options, HeadTrackingMode initialMode) {
+ return std::make_unique<HeadTrackingProcessorImpl>(options, initialMode);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
new file mode 100644
index 0000000..6247d84
--- /dev/null
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+TEST(ModeSelector, Initial) {
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), Pose3f());
+}
+
+TEST(ModeSelector, InitialWorldRelative) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options, HeadTrackingMode::WORLD_RELATIVE);
+
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse());
+}
+
+TEST(ModeSelector, InitialScreenRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options, HeadTrackingMode::SCREEN_RELATIVE);
+
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse());
+}
+
+TEST(ModeSelector, WorldRelative) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, WorldRelativeStale) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options{.freshnessTimeout = 100};
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeStaleToWorldRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+ const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options{.freshnessTimeout = 100};
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.setWorldToHeadPose(50, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeInvalidToWorldRelative) {
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+ const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(50, std::nullopt);
+ selector.setWorldToHeadPose(50, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp
new file mode 100644
index 0000000..16e1712
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+namespace android {
+namespace media {
+
+ModeSelector::ModeSelector(const Options& options, HeadTrackingMode initialMode)
+ : mOptions(options), mDesiredMode(initialMode), mActualMode(initialMode) {}
+
+void ModeSelector::setDesiredMode(HeadTrackingMode mode) {
+ mDesiredMode = mode;
+}
+
+void ModeSelector::setScreenToStagePose(const Pose3f& screenToStage) {
+ mScreenToStage = screenToStage;
+}
+
+void ModeSelector::setScreenToHeadPose(int64_t timestamp,
+ const std::optional<Pose3f>& screenToHead) {
+ mScreenToHead = screenToHead;
+ mScreenToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+ mWorldToHead = worldToHead;
+ mWorldToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::calculateActualMode(int64_t timestamp) {
+ bool isValidScreenToHead = mScreenToHead.has_value() &&
+ timestamp - mScreenToHeadTimestamp < mOptions.freshnessTimeout;
+ bool isValidWorldToHead = mWorldToHead.has_value() &&
+ timestamp - mWorldToHeadTimestamp < mOptions.freshnessTimeout;
+
+ HeadTrackingMode mode = mDesiredMode;
+
+ // Optional downgrade from screen-relative to world-relative.
+ if (mode == HeadTrackingMode::SCREEN_RELATIVE) {
+ if (!isValidScreenToHead) {
+ mode = HeadTrackingMode::WORLD_RELATIVE;
+ }
+ }
+
+ // Optional downgrade from world-relative to static.
+ if (mode == HeadTrackingMode::WORLD_RELATIVE) {
+ if (!isValidWorldToHead) {
+ mode = HeadTrackingMode::STATIC;
+ }
+ }
+
+ mActualMode = mode;
+}
+
+void ModeSelector::calculate(int64_t timestamp) {
+ calculateActualMode(timestamp);
+
+ switch (mActualMode) {
+ case HeadTrackingMode::STATIC:
+ mHeadToStage = mScreenToStage;
+ break;
+
+ case HeadTrackingMode::WORLD_RELATIVE:
+ mHeadToStage = mWorldToHead.value().inverse() * mScreenToStage;
+ break;
+
+ case HeadTrackingMode::SCREEN_RELATIVE:
+ mHeadToStage = mScreenToHead.value().inverse() * mScreenToStage;
+ break;
+ }
+}
+
+Pose3f ModeSelector::getHeadToStagePose() const {
+ return mHeadToStage;
+}
+
+HeadTrackingMode ModeSelector::getActualMode() const {
+ return mActualMode;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector.h b/media/libheadtracking/ModeSelector.h
new file mode 100644
index 0000000..17a5142
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/HeadTrackingMode.h"
+#include "media/Pose.h"
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Head-tracking mode selector.
+ *
+ * This class is responsible for production of the determining pose for audio virtualization, based
+ * on a number of available sources and a selectable mode.
+ *
+ * Typical flow is:
+ * ModeSelector selector(...);
+ * while (...) {
+ * // Set inputs.
+ * selector.setFoo(...);
+ * selector.setBar(...);
+ *
+ * // Update outputs based on inputs.
+ * selector.calculate(...);
+ *
+ * // Get outputs.
+ * Pose3f pose = selector.getHeadToStagePose();
+ * }
+ *
+ * This class is not thread-safe, but thread-compatible.
+ *
+ * For details on the frames of reference involved, their composition and the definitions to the
+ * different modes, refer to:
+ * go/immersive-audio-frames
+ *
+ * The actual mode may deviate from the desired mode in the following cases:
+ * - When we cannot get a valid and fresh estimate of the screen-to-head pose, we will fall back
+ * from screen-relative to world-relative.
+ * - When we cannot get a fresh estimate of the world-to-head pose, we will fall back from
+ * world-relative to static.
+ *
+ * All the timestamps used here are of arbitrary units and origin. They just need to be consistent
+ * between all the calls and with the Options provided for determining freshness and rate limiting.
+ */
+class ModeSelector {
+ public:
+ struct Options {
+ int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+ };
+
+ ModeSelector(const Options& options, HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+ /** Sets the desired head-tracking mode. */
+ void setDesiredMode(HeadTrackingMode mode);
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ void setScreenToStagePose(const Pose3f& screenToStage);
+
+ /**
+ * Set the screen-to-head pose, used in screen-relative mode.
+ * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+ * it applies to). nullopt can be used if it is determined that the listener is not in front of
+ * the screen.
+ */
+ void setScreenToHeadPose(int64_t timestamp, const std::optional<Pose3f>& screenToHead);
+
+ /**
+ * Set the world-to-head pose, used in world-relative mode.
+ * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+ * it applies to).
+ */
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+ /**
+ * Process all the previous inputs and update the outputs.
+ */
+ void calculate(int64_t timestamp);
+
+ /**
+ * Get the aggregate head-to-stage pose (primary output of this module).
+ */
+ Pose3f getHeadToStagePose() const;
+
+ /**
+ * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+ * class documentation above).
+ */
+ HeadTrackingMode getActualMode() const;
+
+ private:
+ const Options mOptions;
+
+ HeadTrackingMode mDesiredMode;
+ Pose3f mScreenToStage;
+ std::optional<Pose3f> mScreenToHead;
+ int64_t mScreenToHeadTimestamp;
+ std::optional<Pose3f> mWorldToHead;
+ int64_t mWorldToHeadTimestamp;
+
+ HeadTrackingMode mActualMode;
+ Pose3f mHeadToStage;
+
+ void calculateActualMode(int64_t timestamp);
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/OWNERS b/media/libheadtracking/OWNERS
new file mode 100644
index 0000000..e5d0370
--- /dev/null
+++ b/media/libheadtracking/OWNERS
@@ -0,0 +1,2 @@
+ytai@google.com
+elaurent@google.com
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
new file mode 100644
index 0000000..a9e18ce
--- /dev/null
+++ b/media/libheadtracking/Pose-test.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using android::media::Pose3f;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Pose, CtorDefault) {
+ Pose3f pose;
+ EXPECT_EQ(pose.translation(), Vector3f::Zero());
+ EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorRotation) {
+ Quaternionf rot = Quaternionf::UnitRandom();
+ Pose3f pose(rot);
+ EXPECT_EQ(pose.translation(), Vector3f::Zero());
+ EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, CtorTranslation) {
+ Vector3f trans{1, 2, 3};
+ Pose3f pose(trans);
+ EXPECT_EQ(pose.translation(), trans);
+ EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorTranslationRotation) {
+ Quaternionf rot = Quaternionf::UnitRandom();
+ Vector3f trans{1, 2, 3};
+ Pose3f pose(trans, rot);
+ EXPECT_EQ(pose.translation(), trans);
+ EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, Inverse) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+ EXPECT_EQ(pose.inverse() * pose, Pose3f());
+ EXPECT_EQ(pose * pose.inverse(), Pose3f());
+}
+
+TEST(Pose, IsApprox) {
+ constexpr float eps = std::numeric_limits<float>::epsilon();
+
+ EXPECT_EQ(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1 + eps, 2 + eps, 3 + eps},
+ rotationVectorToQuaternion({4 + eps, 5 + eps, 6 + eps})));
+
+ EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1.01, 2, 3}, rotationVectorToQuaternion({4, 5, 6})));
+
+ EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1, 2, 3}, rotationVectorToQuaternion({4.01, 5, 6})));
+}
+
+TEST(Pose, Compose) {
+ Pose3f p1({1, 2, 3}, rotateZ(M_PI_2));
+ Pose3f p2({4, 5, 6}, rotateX(M_PI_2));
+ Pose3f p3({-4, 6, 9}, p1.rotation() * p2.rotation());
+ EXPECT_EQ(p1 * p2, p3);
+}
+
+TEST(Pose, MoveWithRateLimit_NoLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 10, 10);
+ EXPECT_EQ(std::get<0>(result), to);
+ EXPECT_FALSE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_TranslationLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 0.5f, 10);
+ Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+ EXPECT_EQ(std::get<0>(result), expected);
+ EXPECT_TRUE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_RotationLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 10, M_PI_4);
+ Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+ EXPECT_EQ(std::get<0>(result), expected);
+ EXPECT_TRUE(std::get<1>(result));
+}
+
+TEST(Pose, FloatVectorRoundTrip1) {
+ // Rotation vector magnitude must be less than Pi.
+ std::vector<float> vec = { 1, 2, 3, 0.4, 0.5, 0.6};
+ std::optional<Pose3f> pose = Pose3f::fromVector(vec);
+ ASSERT_TRUE(pose.has_value());
+ std::vector<float> reconstructed = pose->toVector();
+ EXPECT_EQ(vec, reconstructed);
+}
+
+TEST(Pose, FloatVectorRoundTrip2) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+ std::vector<float> vec = pose.toVector();
+ std::optional<Pose3f> reconstructed = Pose3f::fromVector(vec);
+ ASSERT_TRUE(reconstructed.has_value());
+ EXPECT_EQ(pose, reconstructed.value());
+}
+
+TEST(Pose, FloatVectorInvalid) {
+ EXPECT_FALSE(Pose3f::fromVector({}).has_value());
+ EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5}).has_value());
+ EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5, 6, 7}).has_value());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
new file mode 100644
index 0000000..47241ce
--- /dev/null
+++ b/media/libheadtracking/Pose.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+using Eigen::Vector3f;
+
+std::optional<Pose3f> Pose3f::fromVector(const std::vector<float>& vec) {
+ if (vec.size() != 6) {
+ return std::nullopt;
+ }
+ return Pose3f({vec[0], vec[1], vec[2]}, rotationVectorToQuaternion({vec[3], vec[4], vec[5]}));
+}
+
+std::vector<float> Pose3f::toVector() const {
+ Eigen::Vector3f rot = quaternionToRotationVector(mRotation);
+ return {mTranslation[0], mTranslation[1], mTranslation[2], rot[0], rot[1], rot[2]};
+}
+
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+ float maxTranslationalVelocity,
+ float maxRotationalVelocity) {
+ // Never rate limit if both limits are set to infinity.
+ if (isinf(maxTranslationalVelocity) && isinf(maxRotationalVelocity)) {
+ return {to, false};
+ }
+ // Always rate limit if t is 0 (required to avoid division by 0).
+ if (t == 0) {
+ return {from, true};
+ }
+
+ Pose3f fromToTo = from.inverse() * to;
+ Twist3f twist = differentiate(fromToTo, t);
+ float angularRotationalRatio = twist.scalarRotationalVelocity() / maxRotationalVelocity;
+ float translationalVelocityRatio =
+ twist.scalarTranslationalVelocity() / maxTranslationalVelocity;
+ float maxRatio = std::max(angularRotationalRatio, translationalVelocityRatio);
+ if (maxRatio <= 1) {
+ return {to, false};
+ }
+ return {from * integrate(twist, t / maxRatio), true};
+}
+
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose) {
+ os << "translation: " << pose.translation().transpose()
+ << " quaternion: " << pose.rotation().coeffs().transpose();
+ return os;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
new file mode 100644
index 0000000..df0a05f
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <cmath>
+
+#include "PoseDriftCompensator.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseDriftCompensator::Options;
+
+TEST(PoseDriftCompensator, Initial) {
+ PoseDriftCompensator comp(Options{});
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, NoDrift) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(Options{});
+
+ // First pose sets the baseline.
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(2000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+ // Recentering resets the baseline.
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(3000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(4000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, NoDriftZeroTime) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(Options{});
+
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, Asymptotic) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 1, .rotationalDriftTimeConstant = 1});
+
+ // Set the same pose for a long time.
+ for (int64_t t = 0; t < 1000; ++t) {
+ comp.setInput(t, pose);
+ }
+
+ // Output would have faded to approx. identity.
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, Fast) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 1e7, .rotationalDriftTimeConstant = 1e7});
+
+ comp.setInput(0, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(2, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(3, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, Drift) {
+ Pose3f pose1({1, 2, 3}, rotateZ(-M_PI * 3 / 4));
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 500, .rotationalDriftTimeConstant = 1000});
+
+ // Establish a baseline.
+ comp.setInput(1000, Pose3f());
+
+ // Initial pose is used as is.
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), pose1);
+
+ // After 1000 ticks, our rotation should be exp(-1) and translation exp(-2) from identity.
+ comp.setInput(2000, pose1);
+ EXPECT_EQ(comp.getOutput(),
+ Pose3f(Vector3f{1, 2, 3} * std::expf(-2), rotateZ(-M_PI * 3 / 4 * std::expf(-1))));
+
+ // As long as the input stays the same, we'll continue to advance towards identity.
+ comp.setInput(3000, pose1);
+ EXPECT_EQ(comp.getOutput(),
+ Pose3f(Vector3f{1, 2, 3} * std::expf(-4), rotateZ(-M_PI * 3 / 4 * std::expf(-2))));
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
new file mode 100644
index 0000000..0e90cad
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseDriftCompensator.h"
+
+#include <cmath>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+PoseDriftCompensator::PoseDriftCompensator(const Options& options) : mOptions(options) {}
+
+void PoseDriftCompensator::setInput(int64_t timestamp, const Pose3f& input) {
+ if (mTimestamp.has_value()) {
+ // Avoid computation upon first input (only sets the initial state).
+ Pose3f prevInputToInput = mPrevInput.inverse() * input;
+ mOutput = scale(mOutput, timestamp - mTimestamp.value()) * prevInputToInput;
+ }
+ mPrevInput = input;
+ mTimestamp = timestamp;
+}
+
+void PoseDriftCompensator::recenter() {
+ mTimestamp.reset();
+ mOutput = Pose3f();
+}
+
+Pose3f PoseDriftCompensator::getOutput() const {
+ return mOutput;
+}
+
+Pose3f PoseDriftCompensator::scale(const Pose3f& pose, int64_t dt) {
+ // Translation.
+ Vector3f translation = pose.translation();
+ translation *= std::expf(-static_cast<float>(dt) / mOptions.translationalDriftTimeConstant);
+
+ // Rotation.
+ Vector3f rotationVec = quaternionToRotationVector(pose.rotation());
+ rotationVec *= std::expf(-static_cast<float>(dt) / mOptions.rotationalDriftTimeConstant);
+
+ return Pose3f(translation, rotationVectorToQuaternion(rotationVec));
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.h b/media/libheadtracking/PoseDriftCompensator.h
new file mode 100644
index 0000000..a71483b
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Drift compensator for a stream of poses.
+ *
+ * This is effectively a high-pass filter for a pose stream, removing any DC-offset / bias. The
+ * provided input stream will be "pulled" toward identity with an exponential decay filter with a
+ * configurable time constant. Rotation and translation are handled separately.
+ *
+ * Typical usage:
+ * PoseDriftCompensator comp(...);
+ *
+ * while (...) {
+ * comp.setInput(...);
+ * Pose3f output = comp.getOutput();
+ * }
+ *
+ * There doesn't need to be a 1:1 correspondence between setInput() and getOutput() calls. The
+ * output timestamp is always that of the last setInput() call. Calling recenter() will reset the
+ * bias to the current output, causing the output to be identity.
+ *
+ * The initial bias point is identity.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseDriftCompensator {
+ public:
+ struct Options {
+ float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ };
+
+ explicit PoseDriftCompensator(const Options& options);
+
+ void setInput(int64_t timestamp, const Pose3f& input);
+
+ void recenter();
+
+ Pose3f getOutput() const;
+
+ private:
+ const Options mOptions;
+
+ Pose3f mPrevInput;
+ Pose3f mOutput;
+ std::optional<int64_t> mTimestamp;
+
+ Pose3f scale(const Pose3f& pose, int64_t dt);
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseProcessingGraph.png b/media/libheadtracking/PoseProcessingGraph.png
new file mode 100644
index 0000000..0363068
--- /dev/null
+++ b/media/libheadtracking/PoseProcessingGraph.png
Binary files differ
diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp
new file mode 100644
index 0000000..f306183
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter-test.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "PoseRateLimiter.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseRateLimiter::Options;
+
+TEST(PoseRateLimiter, Initial) {
+ Pose3f target({1, 2, 3}, Quaternionf::UnitRandom());
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 10, .maxRotationalVelocity = 10});
+ limiter.setTarget(target);
+ EXPECT_EQ(limiter.calculatePose(1000), target);
+}
+
+TEST(PoseRateLimiter, UnlimitedZeroTime) {
+ Pose3f target1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f target2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseRateLimiter limiter(Options{});
+ limiter.setTarget(target1);
+ EXPECT_EQ(limiter.calculatePose(0), target1);
+ limiter.setTarget(target2);
+ EXPECT_EQ(limiter.calculatePose(0), target2);
+ limiter.setTarget(target1);
+ EXPECT_EQ(limiter.calculatePose(0), target1);
+}
+
+TEST(PoseRateLimiter, Limited) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+ Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1000), pose2);
+
+ // Rate limiting is inactive. Should track despite the violation.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1001), pose1);
+
+ // Enable rate limiting and observe gradual motion from pose1 to pose2.
+ limiter.enable();
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1002), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1003), Pose3f({1, 2, 5}, rotateZ(M_PI * 2 / 8)));
+ // Skip a tick.
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1005), Pose3f({1, 2, 7}, rotateZ(M_PI * 4 / 8)));
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1006), pose2);
+
+ // We reached the target, so rate limiting should now be disabled.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1007), pose1);
+}
+
+TEST(PoseRateLimiter, Reset) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+ Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1000), pose1);
+
+ // Enable rate limiting and observe gradual motion from pose1 to pose2.
+ limiter.enable();
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1001), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+
+ // Reset the pose and disable rate limiting.
+ limiter.reset(pose2);
+ EXPECT_EQ(limiter.calculatePose(1002), pose2);
+
+ // Rate limiting should now be disabled.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1003), pose1);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.cpp b/media/libheadtracking/PoseRateLimiter.cpp
new file mode 100644
index 0000000..380e22b
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+PoseRateLimiter::PoseRateLimiter(const Options& options) : mOptions(options), mLimiting(false) {}
+
+void PoseRateLimiter::enable() {
+ mLimiting = true;
+}
+
+void PoseRateLimiter::reset(const Pose3f& target) {
+ mLimiting = false;
+ mTargetPose = target;
+}
+
+void PoseRateLimiter::setTarget(const Pose3f& target) {
+ mTargetPose = target;
+}
+
+Pose3f PoseRateLimiter::calculatePose(int64_t timestamp) {
+ assert(mTargetPose.has_value());
+ Pose3f pose;
+ if (mLimiting && mOutput.has_value()) {
+ std::tie(pose, mLimiting) = moveWithRateLimit(
+ mOutput->pose, mTargetPose.value(), timestamp - mOutput->timestamp,
+ mOptions.maxTranslationalVelocity, mOptions.maxRotationalVelocity);
+ } else {
+ pose = mTargetPose.value();
+ }
+ mOutput = Point{pose, timestamp};
+ return pose;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.h b/media/libheadtracking/PoseRateLimiter.h
new file mode 100644
index 0000000..aa2fe80
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Limits a stream of poses to a given maximum translational and rotational velocities.
+ *
+ * Normal operation:
+ *
+ * Pose3f output;
+ * PoseRateLimiter limiter(...);
+ *
+ * // Limiting is disabled. Output will be the same as last input.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // Enable limiting. Output will no longer be necessarily the same as last input.
+ * limiter.enable();
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // When eventually the output has been able to catch up with the last input, the limited will be
+ * // automatically disabled again and the output will match the input again.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * As shown above, the limiter is turned on manually via enable(), but turns off automatically as
+ * soon as the output is able to catch up to the input. The intention is that rate limiting will be
+ * turned on at specific times to smooth out any artificial discontinuities introduced to the pose
+ * stream, but the rest of the time will be a simple passthrough.
+
+ * setTarget(...) and calculatePose(...) don't have to be ordered in any particular way. However,
+ * setTarget or reset() must be called at least once prior to the first calculatePose().
+ *
+ * Calling reset() instead of setTarget() forces the output to the given pose and disables rate
+ * limiting.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseRateLimiter {
+ public:
+ struct Options {
+ float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+ float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+ };
+
+ explicit PoseRateLimiter(const Options& options);
+
+ void enable();
+
+ void reset(const Pose3f& target);
+ void setTarget(const Pose3f& target);
+
+ Pose3f calculatePose(int64_t timestamp);
+
+ private:
+ struct Point {
+ Pose3f pose;
+ int64_t timestamp;
+ };
+
+ const Options mOptions;
+ bool mLimiting;
+ std::optional<Pose3f> mTargetPose;
+ std::optional<Point> mOutput;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp
new file mode 100644
index 0000000..e79e54a
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil-test.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(QuaternionUtil, RotationVectorToQuaternion) {
+ // 90 degrees around Z.
+ Vector3f rot = {0, 0, M_PI_2};
+ Quaternionf quat = rotationVectorToQuaternion(rot);
+ ASSERT_EQ(quat * Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+ ASSERT_EQ(quat * Vector3f(0, 1, 0), Vector3f(-1, 0, 0));
+ ASSERT_EQ(quat * Vector3f(0, 0, 1), Vector3f(0, 0, 1));
+}
+
+TEST(QuaternionUtil, QuaternionToRotationVector) {
+ Quaternionf quat = Quaternionf::FromTwoVectors(Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+ Vector3f rot = quaternionToRotationVector(quat);
+ ASSERT_EQ(rot, Vector3f(0, 0, M_PI_2));
+}
+
+TEST(QuaternionUtil, RoundTripFromQuaternion) {
+ Quaternionf quaternion = Quaternionf::UnitRandom();
+ EXPECT_EQ(quaternion, rotationVectorToQuaternion(quaternionToRotationVector(quaternion)));
+}
+
+TEST(QuaternionUtil, RoundTripFromVector) {
+ Vector3f vec{0.1, 0.2, 0.3};
+ EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec)));
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp
new file mode 100644
index 0000000..5d090de
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "QuaternionUtil.h"
+
+#include <cassert>
+
+namespace android {
+namespace media {
+
+using Eigen::NumTraits;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace {
+
+Vector3f LogSU2(const Quaternionf& q) {
+ // Implementation of the logarithmic map of SU(2) using atan.
+ // This follows Hertzberg et al. "Integrating Generic Sensor Fusion Algorithms
+ // with Sound State Representations through Encapsulation of Manifolds", Eq.
+ // (31)
+ // We use asin and acos instead of atan to enable the use of Eigen Autodiff
+ // with SU2.
+ const float sign_of_w = q.w() < 0.f ? -1.f : 1.f;
+ const float abs_w = sign_of_w * q.w();
+ const Vector3f v = sign_of_w * q.vec();
+ const float squared_norm_of_v = v.squaredNorm();
+
+ assert(abs(1.f - abs_w * abs_w - squared_norm_of_v) < NumTraits<float>::dummy_precision());
+
+ if (squared_norm_of_v > NumTraits<float>::dummy_precision()) {
+ const float norm_of_v = sqrt(squared_norm_of_v);
+ if (abs_w > NumTraits<float>::dummy_precision()) {
+ // asin(x) = acos(x) at x = 1/sqrt(2).
+ if (norm_of_v <= float(M_SQRT1_2)) {
+ return (asin(norm_of_v) / norm_of_v) * v;
+ }
+ return (acos(abs_w) / norm_of_v) * v;
+ }
+ return (M_PI_2 / norm_of_v) * v;
+ }
+
+ // Taylor expansion at squared_norm_of_v == 0
+ return (1.f / abs_w - squared_norm_of_v / (3.f * pow(abs_w, 3))) * v;
+}
+
+Quaternionf ExpSU2(const Vector3f& delta) {
+ Quaternionf q_delta;
+ const float theta_squared = delta.squaredNorm();
+ if (theta_squared > NumTraits<float>::dummy_precision()) {
+ const float theta = sqrt(theta_squared);
+ q_delta.w() = cos(theta);
+ q_delta.vec() = (sin(theta) / theta) * delta;
+ } else {
+ // taylor expansions around theta == 0
+ q_delta.w() = 1.f - 0.5f * theta_squared;
+ q_delta.vec() = (1.f - 1.f / 6.f * theta_squared) * delta;
+ }
+ return q_delta;
+}
+
+} // namespace
+
+Quaternionf rotationVectorToQuaternion(const Vector3f& rotationVector) {
+ // SU(2) is a double cover of SO(3), thus we have to half the tangent vector
+ // delta
+ const Vector3f half_delta = 0.5f * rotationVector;
+ return ExpSU2(half_delta);
+}
+
+Vector3f quaternionToRotationVector(const Quaternionf& quaternion) {
+ // SU(2) is a double cover of SO(3), thus we have to multiply the tangent
+ // vector delta by two
+ return 2.f * LogSU2(quaternion);
+}
+
+Quaternionf rotateX(float angle) {
+ return rotationVectorToQuaternion(Vector3f(1, 0, 0) * angle);
+}
+
+Quaternionf rotateY(float angle) {
+ return rotationVectorToQuaternion(Vector3f(0, 1, 0) * angle);
+}
+
+Quaternionf rotateZ(float angle) {
+ return rotationVectorToQuaternion(Vector3f(0, 0, 1) * angle);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h
new file mode 100644
index 0000000..f7a2ca9
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a rotation vector to an equivalent quaternion.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
+
+/**
+ * Converts a quaternion to an equivalent rotation vector.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
+
+/**
+ * Returns a quaternion representing a rotation around the X-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateX(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateY(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateZ(float angle);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/README.md b/media/libheadtracking/README.md
new file mode 100644
index 0000000..3d5b71a
--- /dev/null
+++ b/media/libheadtracking/README.md
@@ -0,0 +1,185 @@
+# Head-Tracking Library For Immersive Audio
+
+This library handles the processing of head-tracking information, necessary for
+Immersive Audio functionality. It goes from bare sensor reading into the final
+pose fed into a virtualizer.
+
+## Basic Usage
+
+The main entry point into this library is the `HeadTrackingProcessor` class.
+This class is provided with the following inputs:
+
+- Head pose, relative to some arbitrary world frame.
+- Screen pose, relative to some arbitrary world frame.
+- Display orientation, defined as the angle between the "physical" screen and
+ the "logical" screen.
+- Transform between the screen and the sound stage.
+- Desired operational mode:
+ - Static: only the sound stage pose is taken into account. This will result
+ in an experience where the sound stage moved with the listener's head.
+ - World-relative: both the head pose and stage pose are taken into account.
+ This will result in an experience where the sound stage is perceived to be
+ located at a fixed place in the world.
+ - Screen-relative: the head pose, screen pose and stage pose are all taken
+ into account. This will result in an experience where the sound stage is
+ perceived to be located at a fixed place relative to the screen.
+
+Once inputs are provided, the `calculate()` method will make the following
+output available:
+
+- Stage pose, relative to the head. This aggregates all the inputs mentioned
+ above and is ready to be fed into a virtualizer.
+- Actual operational mode. May deviate from the desired one in cases where the
+ desired mode cannot be calculated (for example, as result of dropped messages
+ from one of the sensors).
+
+A `recenter()` operation is also available, which indicates to the system that
+whatever pose the screen and head are currently at should be considered as the
+"center" pose, or frame of reference.
+
+## Pose-Related Conventions
+
+### Naming and Composition
+
+When referring to poses in code, it is always good practice to follow
+conventional naming, which highlights the reference and target frames clearly:
+
+Bad:
+
+```
+Pose3f headPose;
+```
+
+Good:
+
+```
+Pose3f worldToHead; // “world” is the reference frame,
+ // “head” is the target frame.
+```
+
+By following this convention, it is easy to follow correct composition of poses,
+by making sure adjacent frames are identical:
+
+```
+Pose3f aToD = aToB * bToC * cToD;
+```
+
+And similarly, inverting the transform simply flips the reference and target:
+
+```
+Pose3f aToB = bToA.inverse();
+```
+
+### Twist
+
+“Twist” is to pose what velocity is to distance: it is the time-derivative of a
+pose, representing the change in pose over a short period of time. Its naming
+convention always states one frame, e.g.:
+Twist3f headTwist;
+
+This means that this twist represents the head-at-time-T to head-at-time-T+dt
+transform. Twists are not composable in the same way as poses.
+
+### Frames of Interest
+
+The frames of interest in this library are defined as follows:
+
+#### Head
+
+This is the listener’s head. The origin is at the center point between the
+ear-drums, the X-axis goes from left ear to right ear, Y-axis goes from the back
+of the head towards the face and Z-axis goes from the bottom of the head to the
+top.
+
+#### Screen
+
+This is the primary screen that the user will be looking at, which is relevant
+for some Immersive Audio use-cases, such as watching a movie. We will follow a
+different convention for this frame than what the Sensor framework uses. The
+origin is at the center of the screen. X-axis goes from left to right, Z-axis
+goes from the screen bottom to the screen top, Y-axis goes “into” the screen (
+from the direction of the viewer). The up/down/left/right of the screen are
+defined as the logical directions used for display. So when flipping the display
+orientation between “landscape” and “portrait”, the frame of reference will
+change with respect to the physical screen.
+
+#### Stage
+
+This is the frame of reference used by the virtualizer for positioning sound
+objects. It is not associated with any physical frame. In a typical
+multi-channel scenario, the listener is at the origin, the X-axis goes from left
+to right, Y-axis from back to front and Z-axis from down to up. For example, a
+front-right speaker is located at positive X, Y and Z=0, a height speaker will
+have a positive Z.
+
+#### World
+
+It is sometimes convenient to use an intermediate frame when dealing with
+head-to-screen transforms. The “world” frame is an arbitrary frame of reference
+in the physical world, relative to which we can measure the head pose and screen
+pose. In (very common) cases when we can’t establish such an absolute frame, we
+can take each measurement relative to a separate, arbitrary frame and high-pass
+the result.
+
+## Processing Description
+
+
+
+The diagram above illustrates the processing that takes place from the inputs to
+the outputs.
+
+### Predictor
+
+The Predictor block gets pose + twist (pose derivative) and extrapolates to
+obtain a predicted head pose (w/ given latency).
+
+### Drift / Bias Compensator
+
+The Drift / Bias Compensator blocks serve two purposes:
+
+- Compensate for floating reference axes by applying a high-pass filter, which
+ slowly pulls the pose toward identity.
+- Establish the reference frame for the poses by having the ability to set the
+ current pose as the reference for future poses (recentering). Effectively,
+ this is resetting the filter state to identity.
+
+### Orientation Compensation
+
+The Orientation Compensation block applies the display orientation to the screen
+pose to obtain the pose of the “logical screen” frame, in which the Y-axis is
+pointing in the direction of the logical screen “up” rather than the physical
+one.
+
+### Screen-Relative Pose
+
+The Screen-Relative Pose block is provided with a head pose and a screen pose
+and estimates the pose of the head relative to the screen. Optionally, this
+module may indicate that the user is likely not in front of the screen via the
+“valid” output.
+
+### Mode Selector
+
+The Mode Selector block aggregates the various sources of pose information into
+a head-to-stage pose that is going to feed the virtualizer. It is controlled by
+the “desired mode” signal that indicates whether the preference is to be in
+either static, world-relative or screen-relative.
+
+The actual mode may diverge from the desired mode. It is determined as follows:
+
+- If the desired mode is static, the actual mode is static.
+- If the desired mode is world-relative:
+ - If head poses are fresh, the actual mode is world-relative.
+ - Otherwise the actual mode is static.
+- If the desired mode is screen-relative:
+ - If head and screen poses are fresh and the ‘valid’ signal is asserted, the
+ actual mode is screen-relative.
+ - Otherwise, apply the same rules as the desired mode being world-relative.
+
+### Rate Limiter
+
+A Rate Limiter block is applied to the final output to smooth out any abrupt
+transitions caused by any of the following events:
+
+- Mode switch.
+- Display orientation switch.
+- Recenter operation.
diff --git a/media/libheadtracking/ScreenHeadFusion-test.cpp b/media/libheadtracking/ScreenHeadFusion-test.cpp
new file mode 100644
index 0000000..ecf27f5
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion-test.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "ScreenHeadFusion.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(ScreenHeadFusion, Init) {
+ ScreenHeadFusion fusion;
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoHead) {
+ ScreenHeadFusion fusion;
+ fusion.setWorldToScreenPose(0, Pose3f());
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoScreen) {
+ ScreenHeadFusion fusion;
+ fusion.setWorldToHeadPose(0, Pose3f());
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate) {
+ Pose3f worldToScreen1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f worldToHead1({4, 5, 6}, Quaternionf::UnitRandom());
+ Pose3f worldToScreen2({11, 12, 13}, Quaternionf::UnitRandom());
+ Pose3f worldToHead2({14, 15, 16}, Quaternionf::UnitRandom());
+
+ ScreenHeadFusion fusion;
+ fusion.setWorldToHeadPose(123, worldToHead1);
+ fusion.setWorldToScreenPose(456, worldToScreen1);
+ auto result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(123, result->timestamp);
+ EXPECT_EQ(worldToScreen1.inverse() * worldToHead1, result->pose);
+
+ fusion.setWorldToHeadPose(567, worldToHead2);
+ result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(456, result->timestamp);
+ EXPECT_EQ(worldToScreen1.inverse() * worldToHead2, result->pose);
+
+ fusion.setWorldToScreenPose(678, worldToScreen2);
+ result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(567, result->timestamp);
+ EXPECT_EQ(worldToScreen2.inverse() * worldToHead2, result->pose);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.cpp b/media/libheadtracking/ScreenHeadFusion.cpp
new file mode 100644
index 0000000..f023570
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScreenHeadFusion.h"
+
+namespace android {
+namespace media {
+
+void ScreenHeadFusion::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+ mWorldToHead = TimestampedPose{.timestamp = timestamp, .pose = worldToHead};
+}
+
+void ScreenHeadFusion::setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) {
+ mWorldToScreen = TimestampedPose{.timestamp = timestamp, .pose = worldToScreen};
+}
+
+std::optional<ScreenHeadFusion::TimestampedPose> ScreenHeadFusion::calculate() {
+ // TODO: this is temporary, simplistic logic.
+ if (!mWorldToHead.has_value() || !mWorldToScreen.has_value()) {
+ return std::nullopt;
+ }
+ return TimestampedPose{
+ .timestamp = std::min(mWorldToHead->timestamp, mWorldToScreen->timestamp),
+ .pose = mWorldToScreen->pose.inverse() * mWorldToHead->pose};
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.h b/media/libheadtracking/ScreenHeadFusion.h
new file mode 100644
index 0000000..ee81100
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Combines world-to-head pose with world-to-screen pose to obtain screen-to-head.
+ *
+ * Input poses may arrive separately. The last pose of each kind is taken into account. The
+ * timestamp of the output is the ealier (older) timestamp of the two inputs.
+ *
+ * Output may be nullopt in the following cases:
+ * - Either one of the inputs has not yet been provided.
+ * - It is estimated that the user is no longer facing the screen.
+ *
+ * Typical usage:
+ *
+ * ScreenHeadFusion fusion(...);
+ * fusion.setWorldToHeadPose(...);
+ * fusion.setWorldToScreenPose(...);
+ * auto output = fusion.calculate();
+ *
+ * This class is not thread-safe, but thread-compatible.
+ */
+class ScreenHeadFusion {
+ public:
+ struct TimestampedPose {
+ int64_t timestamp;
+ Pose3f pose;
+ };
+
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+ void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen);
+
+ /**
+ * Returns the screen-to-head pose, or nullopt if invalid.
+ */
+ std::optional<TimestampedPose> calculate();
+
+ private:
+ std::optional<TimestampedPose> mWorldToHead;
+ std::optional<TimestampedPose> mWorldToScreen;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/SensorPoseProvider-example.cpp b/media/libheadtracking/SensorPoseProvider-example.cpp
new file mode 100644
index 0000000..88e222e
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider-example.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <iostream>
+
+#include <android/sensor.h>
+#include <hardware/sensors.h>
+#include <utils/SystemClock.h>
+
+#include <media/SensorPoseProvider.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorManager.h>
+
+using android::elapsedRealtimeNano;
+using android::Sensor;
+using android::SensorManager;
+using android::String16;
+using android::media::Pose3f;
+using android::media::SensorPoseProvider;
+using android::media::Twist3f;
+
+using namespace std::chrono_literals;
+
+const char kPackageName[] = "SensorPoseProvider-example";
+
+class Listener : public SensorPoseProvider::Listener {
+ public:
+ void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+ const std::optional<Twist3f>& twist, bool isNewReference) override {
+ int64_t now = elapsedRealtimeNano();
+
+ std::cout << "onPose t=" << timestamp
+ << " lag=" << ((now - timestamp) / 1e6) << "[ms]"
+ << " sensor=" << handle
+ << " pose=" << pose
+ << " twist=";
+ if (twist.has_value()) {
+ std::cout << twist.value();
+ } else {
+ std::cout << "<none>";
+ }
+ std::cout << " isNewReference=" << isNewReference << std::endl;
+ }
+};
+
+int main() {
+ SensorManager& sensorManager = SensorManager::getInstanceForPackage(String16(kPackageName));
+
+ const Sensor* headSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_GAME_ROTATION_VECTOR);
+ const Sensor* screenSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_ROTATION_VECTOR);
+
+ Listener listener;
+
+ std::unique_ptr<SensorPoseProvider> provider =
+ SensorPoseProvider::create(kPackageName, &listener);
+ if (!provider->startSensor(headSensor->getHandle(), 500ms)) {
+ std::cout << "Failed to start head sensor" << std::endl;
+ }
+ sleep(2);
+ if (!provider->startSensor(screenSensor->getHandle(), 500ms)) {
+ std::cout << "Failed to start screenSensor sensor" << std::endl;
+ }
+ sleep(2);
+ provider->stopSensor(headSensor->getHandle());
+ sleep(2);
+ return 0;
+}
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
new file mode 100644
index 0000000..ec5e1ec
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/SensorPoseProvider.h>
+
+#define LOG_TAG "SensorPoseProvider"
+
+#include <inttypes.h>
+
+#include <future>
+#include <map>
+#include <thread>
+
+#include <android-base/thread_annotations.h>
+#include <log/log_main.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorEventQueue.h>
+#include <sensor/SensorManager.h>
+#include <utils/Looper.h>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+// Identifier to use for our event queue on the loop.
+// The number 19 is arbitrary, only useful if using multiple objects on the same looper.
+constexpr int kIdent = 19;
+
+static inline Looper* ALooper_to_Looper(ALooper* alooper) {
+ return reinterpret_cast<Looper*>(alooper);
+}
+
+static inline ALooper* Looper_to_ALooper(Looper* looper) {
+ return reinterpret_cast<ALooper*>(looper);
+}
+
+/**
+ * RAII-wrapper around SensorEventQueue, which unregisters it on destruction.
+ */
+class EventQueueGuard {
+ public:
+ EventQueueGuard(const sp<SensorEventQueue>& queue, Looper* looper) : mQueue(queue) {
+ mQueue->looper = Looper_to_ALooper(looper);
+ mQueue->requestAdditionalInfo = false;
+ looper->addFd(mQueue->getFd(), kIdent, ALOOPER_EVENT_INPUT, nullptr, nullptr);
+ }
+
+ ~EventQueueGuard() {
+ if (mQueue) {
+ ALooper_to_Looper(mQueue->looper)->removeFd(mQueue->getFd());
+ }
+ }
+
+ EventQueueGuard(const EventQueueGuard&) = delete;
+ EventQueueGuard& operator=(const EventQueueGuard&) = delete;
+
+ [[nodiscard]] SensorEventQueue* get() const { return mQueue.get(); }
+
+ private:
+ sp<SensorEventQueue> mQueue;
+};
+
+/**
+ * RAII-wrapper around an enabled sensor, which disables it upon destruction.
+ */
+class SensorEnableGuard {
+ public:
+ SensorEnableGuard(const sp<SensorEventQueue>& queue, int32_t sensor)
+ : mQueue(queue), mSensor(sensor) {}
+
+ ~SensorEnableGuard() {
+ if (mSensor != SensorPoseProvider::INVALID_HANDLE) {
+ int ret = mQueue->disableSensor(mSensor);
+ if (ret) {
+ ALOGE("Failed to disable sensor: %s", strerror(ret));
+ }
+ }
+ }
+
+ SensorEnableGuard(const SensorEnableGuard&) = delete;
+ SensorEnableGuard& operator=(const SensorEnableGuard&) = delete;
+
+ // Enable moving.
+ SensorEnableGuard(SensorEnableGuard&& other) : mQueue(other.mQueue), mSensor(other.mSensor) {
+ other.mSensor = SensorPoseProvider::INVALID_HANDLE;
+ }
+
+ private:
+ sp<SensorEventQueue> const mQueue;
+ int32_t mSensor;
+};
+
+/**
+ * Streams the required events to a PoseListener, based on events originating from the Sensor stack.
+ */
+class SensorPoseProviderImpl : public SensorPoseProvider {
+ public:
+ static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener) {
+ std::unique_ptr<SensorPoseProviderImpl> result(
+ new SensorPoseProviderImpl(packageName, listener));
+ return result->waitInitFinished() ? std::move(result) : nullptr;
+ }
+
+ ~SensorPoseProviderImpl() override {
+ // Disable all active sensors.
+ mEnabledSensors.clear();
+ mLooper->wake();
+ mThread.join();
+ }
+
+ bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) override {
+ // Figure out the sensor's data format.
+ DataFormat format = getSensorFormat(sensor);
+ if (format == DataFormat::kUnknown) {
+ ALOGE("Unknown format for sensor %" PRId32, sensor);
+ return false;
+ }
+
+ {
+ std::lock_guard lock(mMutex);
+ mEnabledSensorFormats.emplace(sensor, format);
+ }
+
+ // Enable the sensor.
+ if (mQueue->enableSensor(sensor, samplingPeriod.count(), 0, 0)) {
+ ALOGE("Failed to enable sensor");
+ std::lock_guard lock(mMutex);
+ mEnabledSensorFormats.erase(sensor);
+ return false;
+ }
+
+ mEnabledSensors.emplace(sensor, SensorEnableGuard(mQueue.get(), sensor));
+ return true;
+ }
+
+ void stopSensor(int handle) override {
+ mEnabledSensors.erase(handle);
+ std::lock_guard lock(mMutex);
+ mEnabledSensorFormats.erase(handle);
+ }
+
+ private:
+ enum DataFormat {
+ kUnknown,
+ kQuaternion,
+ kRotationVectorsAndFlags,
+ };
+
+ struct PoseEvent {
+ Pose3f pose;
+ std::optional<Twist3f> twist;
+ bool isNewReference;
+ };
+
+ sp<Looper> mLooper;
+ Listener* const mListener;
+ SensorManager* const mSensorManager;
+ std::thread mThread;
+ std::mutex mMutex;
+ std::map<int32_t, SensorEnableGuard> mEnabledSensors;
+ std::map<int32_t, DataFormat> mEnabledSensorFormats GUARDED_BY(mMutex);
+ sp<SensorEventQueue> mQueue;
+
+ // We must do some of the initialization operations on the worker thread, because the API relies
+ // on the thread-local looper. In addition, as a matter of convenience, we store some of the
+ // state on the stack.
+ // For that reason, we use a two-step initialization approach, where the ctor mostly just starts
+ // the worker thread and that thread would notify, via the promise below whenever initialization
+ // is finished, and whether it was successful.
+ std::promise<bool> mInitPromise;
+
+ SensorPoseProviderImpl(const char* packageName, Listener* listener)
+ : mListener(listener),
+ mSensorManager(&SensorManager::getInstanceForPackage(String16(packageName))),
+ mThread([this] { threadFunc(); }) {}
+
+ void initFinished(bool success) { mInitPromise.set_value(success); }
+
+ bool waitInitFinished() { return mInitPromise.get_future().get(); }
+
+ void threadFunc() {
+ // Obtain looper.
+ mLooper = Looper::prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+
+ // Create event queue.
+ mQueue = mSensorManager->createEventQueue();
+
+ if (mQueue == nullptr) {
+ ALOGE("Failed to create a sensor event queue");
+ initFinished(false);
+ return;
+ }
+
+ EventQueueGuard eventQueueGuard(mQueue, mLooper.get());
+
+ initFinished(true);
+
+ while (true) {
+ int ret = mLooper->pollOnce(-1 /* no timeout */, nullptr, nullptr, nullptr);
+
+ switch (ret) {
+ case ALOOPER_POLL_WAKE:
+ // Normal way to exit.
+ return;
+
+ case kIdent:
+ // Possible events on our queue.
+ break;
+
+ default:
+ ALOGE("Unexpected status out of Looper::pollOnce: %d", ret);
+ }
+
+ // Process an event.
+ ASensorEvent event;
+ ssize_t actual = mQueue->read(&event, 1);
+ if (actual > 0) {
+ mQueue->sendAck(&event, actual);
+ }
+ ssize_t size = mQueue->filterEvents(&event, actual);
+
+ if (size < 0 || size > 1) {
+ ALOGE("Unexpected return value from SensorEventQueue::filterEvents: %zd", size);
+ break;
+ }
+ if (size == 0) {
+ // No events.
+ continue;
+ }
+
+ handleEvent(event);
+ }
+ }
+
+ void handleEvent(const ASensorEvent& event) {
+ DataFormat format;
+ {
+ std::lock_guard lock(mMutex);
+ auto iter = mEnabledSensorFormats.find(event.sensor);
+ if (iter == mEnabledSensorFormats.end()) {
+ // This can happen if we have any pending events shortly after stopping.
+ return;
+ }
+ format = iter->second;
+ }
+ auto value = parseEvent(event, format);
+ mListener->onPose(event.timestamp, event.sensor, value.pose, value.twist,
+ value.isNewReference);
+ }
+
+ DataFormat getSensorFormat(int32_t handle) {
+ std::optional<const Sensor> sensor = getSensorByHandle(handle);
+ if (!sensor) {
+ ALOGE("Sensor not found: %d", handle);
+ return DataFormat::kUnknown;
+ }
+ if (sensor->getType() == ASENSOR_TYPE_ROTATION_VECTOR ||
+ sensor->getType() == ASENSOR_TYPE_GAME_ROTATION_VECTOR) {
+ return DataFormat::kQuaternion;
+ }
+
+ if (sensor->getStringType() == "com.google.hardware.sensor.hid_dynamic.headtracker") {
+ return DataFormat::kRotationVectorsAndFlags;
+ }
+
+ return DataFormat::kUnknown;
+ }
+
+ std::optional<const Sensor> getSensorByHandle(int32_t handle) {
+ const Sensor* const* list;
+ ssize_t size;
+
+ // Search static sensor list.
+ size = mSensorManager->getSensorList(&list);
+ if (size < 0) {
+ ALOGE("getSensorList failed with error code %zd", size);
+ return std::nullopt;
+ }
+ for (size_t i = 0; i < size; ++i) {
+ if (list[i]->getHandle() == handle) {
+ return *list[i];
+ }
+ }
+
+ // Search dynamic sensor list.
+ Vector<Sensor> dynList;
+ size = mSensorManager->getDynamicSensorList(dynList);
+ if (size < 0) {
+ ALOGE("getDynamicSensorList failed with error code %zd", size);
+ return std::nullopt;
+ }
+ for (size_t i = 0; i < size; ++i) {
+ if (dynList[i].getHandle() == handle) {
+ return dynList[i];
+ }
+ }
+
+ return std::nullopt;
+ }
+
+ static PoseEvent parseEvent(const ASensorEvent& event, DataFormat format) {
+ // TODO(ytai): Add more types.
+ switch (format) {
+ case DataFormat::kQuaternion: {
+ Eigen::Quaternionf quat(event.data[3], event.data[0], event.data[1], event.data[2]);
+ // Adapt to different frame convention.
+ quat *= rotateX(-M_PI_2);
+ return PoseEvent{Pose3f(quat), std::optional<Twist3f>(), false};
+ }
+
+ case DataFormat::kRotationVectorsAndFlags: {
+ // Custom sensor, assumed to contain:
+ // 3 floats representing orientation as a rotation vector (in rad).
+ // 3 floats representing angular velocity as a rotation vector (in rad/s).
+ // 1 uint32_t of flags, where:
+ // - LSb is '1' iff the given sample is the first one in a new frame of reference.
+ // - The rest of the bits are reserved for future use.
+ Eigen::Vector3f rotation = {event.data[0], event.data[1], event.data[2]};
+ Eigen::Vector3f twist = {event.data[3], event.data[4], event.data[5]};
+ Eigen::Quaternionf quat = rotationVectorToQuaternion(rotation);
+ uint32_t flags = *reinterpret_cast<const uint32_t*>(&event.data[6]);
+ return PoseEvent{Pose3f(quat), Twist3f(Eigen::Vector3f::Zero(), twist),
+ (flags & (1 << 0)) != 0};
+ }
+
+ default:
+ LOG_ALWAYS_FATAL("Unexpected sensor type: %d", static_cast<int>(format));
+ }
+ }
+};
+
+} // namespace
+
+std::unique_ptr<SensorPoseProvider> SensorPoseProvider::create(const char* packageName,
+ Listener* listener) {
+ return SensorPoseProviderImpl::create(packageName, listener);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/TestUtil.h b/media/libheadtracking/TestUtil.h
new file mode 100644
index 0000000..4636d86
--- /dev/null
+++ b/media/libheadtracking/TestUtil.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <gtest/gtest.h>
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+
+namespace {
+
+constexpr float kPoseComparisonPrecision = 1e-5;
+
+} // namespace
+
+// These specializations make {EXPECT,ASSERT}_{EQ,NE} work correctly for Pose3f, Twist3f, Vector3f
+// and Quaternionf.
+namespace testing {
+namespace internal {
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Pose3f, android::media::Pose3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+ const android::media::Pose3f& rhs) {
+ if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Pose3f, android::media::Pose3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+ const android::media::Pose3f& rhs) {
+ if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Twist3f, android::media::Twist3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+ const android::media::Twist3f& rhs) {
+ if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Twist3f, android::media::Twist3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+ const android::media::Twist3f& rhs) {
+ if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+ const char* rhs_expression,
+ const Eigen::Vector3f& lhs,
+ const Eigen::Vector3f& rhs) {
+ if (lhs.isApprox(rhs)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+ const char* rhs_expression,
+ const Eigen::Vector3f& lhs,
+ const Eigen::Vector3f& rhs) {
+ if (!lhs.isApprox(rhs)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Quaternionf, Eigen::Quaternionf>(
+ const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+ const Eigen::Quaternionf& rhs) {
+ // Negating the coefs results in an equivalent quaternion.
+ if (lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs()))) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Quaternionf, Eigen::Quaternionf>(
+ const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+ const Eigen::Quaternionf& rhs) {
+ // Negating the coefs results in an equivalent quaternion.
+ if (!(lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs())))) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+} // namespace internal
+} // namespace testing
diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp
new file mode 100644
index 0000000..7984e1e
--- /dev/null
+++ b/media/libheadtracking/Twist-test.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Twist, DefaultCtor) {
+ Twist3f twist;
+ EXPECT_EQ(twist.translationalVelocity(), Vector3f::Zero());
+ EXPECT_EQ(twist.rotationalVelocity(), Vector3f::Zero());
+ EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), 0);
+ EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), 0);
+}
+
+TEST(Twist, FullCtor) {
+ Vector3f rot{1, 2, 3};
+ Vector3f trans{4, 5, 6};
+ Twist3f twist(trans, rot);
+ EXPECT_EQ(twist.translationalVelocity(), trans);
+ EXPECT_EQ(twist.rotationalVelocity(), rot);
+ EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), std::sqrt(14.f));
+ EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), std::sqrt(77.f));
+}
+
+TEST(Twist, Integrate) {
+ Vector3f trans{1, 2, 3};
+ // 45 deg/sec around Z.
+ Vector3f rot{0, 0, M_PI_4};
+ Twist3f twist(trans, rot);
+ Pose3f pose = integrate(twist, 2.f);
+
+ EXPECT_EQ(pose, Pose3f(Vector3f{2, 4, 6}, rotateZ(M_PI_2)));
+}
+
+TEST(Twist, Differentiate) {
+ Pose3f pose(Vector3f{2, 4, 6}, rotateZ(M_PI_2));
+ Twist3f twist = differentiate(pose, 2.f);
+ EXPECT_EQ(twist, Twist3f(Vector3f(1, 2, 3), Vector3f(0, 0, M_PI_4)));
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp
new file mode 100644
index 0000000..664c4d5
--- /dev/null
+++ b/media/libheadtracking/Twist.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+Pose3f integrate(const Twist3f& twist, float dt) {
+ Eigen::Vector3f translation = twist.translationalVelocity() * dt;
+ Eigen::Vector3f rotationVector = twist.rotationalVelocity() * dt;
+ return Pose3f(translation, rotationVectorToQuaternion(rotationVector));
+}
+
+Twist3f differentiate(const Pose3f& pose, float dt) {
+ Eigen::Vector3f translationalVelocity = pose.translation() / dt;
+ Eigen::Vector3f rotationalVelocity = quaternionToRotationVector(pose.rotation()) / dt;
+ return Twist3f(translationalVelocity, rotationalVelocity);
+}
+
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist) {
+ os << "translation: " << twist.translationalVelocity().transpose()
+ << " rotation vector: " << twist.rotationalVelocity().transpose();
+ return os;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingMode.h b/media/libheadtracking/include/media/HeadTrackingMode.h
new file mode 100644
index 0000000..38496e8
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingMode.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+namespace android {
+namespace media {
+
+/**
+ * Mode of head-tracking.
+ */
+enum class HeadTrackingMode {
+ /** No head-tracking - screen-to-head pose is assumed to be identity. */
+ STATIC,
+ /** Head tracking enabled - world-to-screen pose is assumed to be identity. */
+ WORLD_RELATIVE,
+ /** Full screen-to-head tracking enabled. */
+ SCREEN_RELATIVE,
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
new file mode 100644
index 0000000..9fea273
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <limits>
+
+#include "HeadTrackingMode.h"
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Main entry-point for this library.
+ * This interfaces encompasses all the processing required for determining the head-to-stage pose
+ * used for audio virtualization.
+ * The usage involves periodic setting of the inputs, calling calculate() and obtaining the outputs.
+ * This class is not thread-safe, but thread-compatible.
+ */
+class HeadTrackingProcessor {
+ public:
+ virtual ~HeadTrackingProcessor() = default;
+
+ struct Options {
+ float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+ float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+ float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+ float predictionDuration = 0;
+ };
+
+ /** Sets the desired head-tracking mode. */
+ virtual void setDesiredMode(HeadTrackingMode mode) = 0;
+
+ /**
+ * Sets the world-to-head pose and head twist (velocity).
+ * headTwist is given in the head coordinate frame.
+ */
+ virtual void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+ const Twist3f& headTwist) = 0;
+
+ /**
+ * Sets the world-to-screen pose.
+ */
+ virtual void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) = 0;
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ virtual void setScreenToStagePose(const Pose3f& screenToStage) = 0;
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ virtual void setDisplayOrientation(float physicalToLogicalAngle) = 0;
+
+ /**
+ * Process all the previous inputs and update the outputs.
+ */
+ virtual void calculate(int64_t timestamp) = 0;
+
+ /**
+ * Get the aggregate head-to-stage pose (primary output of this module).
+ */
+ virtual Pose3f getHeadToStagePose() const = 0;
+
+ /**
+ * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+ * class documentation above).
+ */
+ virtual HeadTrackingMode getActualMode() const = 0;
+
+ /**
+ * This causes the current poses for both the head and/or screen to be considered "center".
+ */
+ virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
+};
+
+/**
+ * Creates an instance featuring a default implementation of the HeadTrackingProcessor interface.
+ */
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcessor(
+ const HeadTrackingProcessor::Options& options,
+ HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Pose.h b/media/libheadtracking/include/media/Pose.h
new file mode 100644
index 0000000..e660bb9
--- /dev/null
+++ b/media/libheadtracking/include/media/Pose.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+#include <vector>
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF pose.
+ * This class represents a proper rigid transformation (translation + rotation) between a reference
+ * frame and a target frame,
+ *
+ * See https://en.wikipedia.org/wiki/Six_degrees_of_freedom
+ */
+class Pose3f {
+ public:
+ /** Typical precision for isApprox comparisons. */
+ static constexpr float kDummyPrecision = 1e-5f;
+
+ Pose3f(const Eigen::Vector3f& translation, const Eigen::Quaternionf& rotation)
+ : mTranslation(translation), mRotation(rotation) {}
+
+ explicit Pose3f(const Eigen::Vector3f& translation)
+ : Pose3f(translation, Eigen::Quaternionf::Identity()) {}
+
+ explicit Pose3f(const Eigen::Quaternionf& rotation)
+ : Pose3f(Eigen::Vector3f::Zero(), rotation) {}
+
+ Pose3f() : Pose3f(Eigen::Vector3f::Zero(), Eigen::Quaternionf::Identity()) {}
+
+ Pose3f(const Pose3f& other) { *this = other; }
+
+ /**
+ * Create instance from a vector-of-floats representation.
+ * The vector is expected to have exactly 6 elements, where the first three are a translation
+ * vector and the last three are a rotation vector.
+ *
+ * Returns nullopt if the input vector is illegal.
+ */
+ static std::optional<Pose3f> fromVector(const std::vector<float>& vec);
+
+ /**
+ * Convert instance to a vector-of-floats representation.
+ * The vector will have exactly 6 elements, where the first three are a translation vector and
+ * the last three are a rotation vector.
+ */
+ std::vector<float> toVector() const;
+
+ Pose3f& operator=(const Pose3f& other) {
+ mTranslation = other.mTranslation;
+ mRotation = other.mRotation;
+ return *this;
+ }
+
+ Eigen::Vector3f translation() const { return mTranslation; };
+ Eigen::Quaternionf rotation() const { return mRotation; };
+
+ /**
+ * Reverses the reference and target frames.
+ */
+ Pose3f inverse() const {
+ Eigen::Quaternionf invRotation = mRotation.inverse();
+ return Pose3f(-(invRotation * translation()), invRotation);
+ }
+
+ /**
+ * Composes (chains) together two poses. By convention, this only makes sense if the target
+ * frame of the left-hand pose is the same the reference frame of the right-hand pose.
+ * Note that this operator is not commutative.
+ */
+ Pose3f operator*(const Pose3f& other) const {
+ Pose3f result = *this;
+ result *= other;
+ return result;
+ }
+
+ Pose3f& operator*=(const Pose3f& other) {
+ mTranslation += mRotation * other.mTranslation;
+ mRotation *= other.mRotation;
+ return *this;
+ }
+
+ /**
+ * This is an imprecise "fuzzy" comparison, which is only to be used for validity-testing
+ * purposes.
+ */
+ bool isApprox(const Pose3f& other, float prec = kDummyPrecision) const {
+ return (mTranslation - other.mTranslation).norm() < prec &&
+ // Quaternions are equivalent under sign inversion.
+ ((mRotation.coeffs() - other.mRotation.coeffs()).norm() < prec ||
+ (mRotation.coeffs() + other.mRotation.coeffs()).norm() < prec);
+ }
+
+ private:
+ Eigen::Vector3f mTranslation;
+ Eigen::Quaternionf mRotation;
+};
+
+/**
+ * Pretty-printer for Pose3f.
+ */
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose);
+
+/**
+ * Move between the 'from' pose and the 'to' pose, while making sure velocity limits are enforced.
+ * If velocity limits are not violated, returns the 'to' pose and false.
+ * If velocity limits are violated, returns pose farthest along the path that can be reached within
+ * the limits, and true.
+ */
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+ float maxTranslationalVelocity,
+ float maxRotationalVelocity);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/SensorPoseProvider.h b/media/libheadtracking/include/media/SensorPoseProvider.h
new file mode 100644
index 0000000..d2a6b77
--- /dev/null
+++ b/media/libheadtracking/include/media/SensorPoseProvider.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <memory>
+#include <optional>
+
+#include <android/sensor.h>
+
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A utility providing streaming of pose data from motion sensors provided by the Sensor Framework.
+ *
+ * A live instance of this interface keeps around some resources required for accessing sensor
+ * readings (e.g. a thread and a queue). Those would be released when the instance is deleted.
+ *
+ * Once alive, individual sensors can be subscribed to using startSensor() and updates can be
+ * stopped via stopSensor(). Those two methods should not be called concurrently and correct usage
+ * is assumed.
+ */
+class SensorPoseProvider {
+ public:
+ static constexpr int32_t INVALID_HANDLE = ASENSOR_INVALID;
+
+ /**
+ * Interface for consuming pose-related sensor events.
+ *
+ * The listener will be provided with a stream of events, each including:
+ * - A handle of the sensor responsible for the event.
+ * - Timestamp.
+ * - Pose.
+ * - Optional twist (time-derivative of pose).
+ *
+ * Sensors having only orientation data will have the translation part of the pose set to
+ * identity.
+ *
+ * Events are delivered in a serialized manner (i.e. callbacks do not need to be reentrant).
+ * Callbacks should not block.
+ */
+ class Listener {
+ public:
+ virtual ~Listener() = default;
+
+ virtual void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+ const std::optional<Twist3f>& twist, bool isNewReference) = 0;
+ };
+
+ /**
+ * Creates a new SensorPoseProvider instance.
+ * Events will be delivered to the listener as long as the returned instance is kept alive.
+ * @param packageName Client's package name.
+ * @param listener The listener that will get the events.
+ * @return The new instance, or nullptr in case of failure.
+ */
+ static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener);
+
+ virtual ~SensorPoseProvider() = default;
+
+ /**
+ * Start receiving pose updates from a given sensor.
+ * Attempting to start a sensor that has already been started results in undefined behavior.
+ * @param sensor The sensor to subscribe to.
+ * @param samplingPeriod Sampling interval, in microseconds. Actual rate might be slightly
+ * different.
+ * @return true iff succeeded.
+ */
+ virtual bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) = 0;
+
+ /**
+ * Stop a sensor, previously started with startSensor(). It is not required to stop all sensors
+ * before deleting the SensorPoseProvider instance.
+ * @param handle The sensor handle, as provided to startSensor().
+ */
+ virtual void stopSensor(int32_t handle) = 0;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Twist.h b/media/libheadtracking/include/media/Twist.h
new file mode 100644
index 0000000..e2fc203
--- /dev/null
+++ b/media/libheadtracking/include/media/Twist.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+#include "Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF twist.
+ * This class represents the translational and rotational velocity of a rigid object, typically
+ * relative to its own coordinate-frame.
+ * It is created by two 3-vectors, one representing linear motion per time-unit and the other, a
+ * rotation-vector in radians per time-unit (right-handed).
+ */
+class Twist3f {
+ public:
+ Twist3f(const Eigen::Vector3f& translationalVelocity, const Eigen::Vector3f& rotationalVelocity)
+ : mTranslationalVelocity(translationalVelocity), mRotationalVelocity(rotationalVelocity) {}
+
+ Twist3f() : Twist3f(Eigen::Vector3f::Zero(), Eigen::Vector3f::Zero()) {}
+
+ Twist3f(const Twist3f& other) { *this = other; }
+
+ Twist3f& operator=(const Twist3f& other) {
+ mTranslationalVelocity = other.mTranslationalVelocity;
+ mRotationalVelocity = other.mRotationalVelocity;
+ return *this;
+ }
+
+ Eigen::Vector3f translationalVelocity() const { return mTranslationalVelocity; }
+ Eigen::Vector3f rotationalVelocity() const { return mRotationalVelocity; }
+
+ float scalarTranslationalVelocity() const { return mTranslationalVelocity.norm(); }
+ float scalarRotationalVelocity() const { return mRotationalVelocity.norm(); }
+
+ bool isApprox(const Twist3f& other,
+ float prec = Eigen::NumTraits<float>::dummy_precision()) const {
+ return mTranslationalVelocity.isApprox(other.mTranslationalVelocity, prec) &&
+ mRotationalVelocity.isApprox(other.mRotationalVelocity, prec);
+ }
+
+ private:
+ Eigen::Vector3f mTranslationalVelocity;
+ Eigen::Vector3f mRotationalVelocity;
+};
+
+/**
+ * Integrate a twist over time to obtain a pose.
+ * dt is the time over which to integration.
+ * The resulting pose represents the transformation between the starting point and the ending point
+ * of the motion over the time period.
+ */
+Pose3f integrate(const Twist3f& twist, float dt);
+
+/**
+ * Differentiate pose to obtain a twist.
+ * dt is the time of the motion between the reference and the target frames of the pose.
+ */
+Twist3f differentiate(const Pose3f& pose, float dt);
+
+/**
+ * Pretty-printer for twist.
+ */
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 273d91c..fcac551 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -15,6 +15,7 @@
*/
//#define LOG_NDEBUG 0
+#include "include/HeifDecoderAPI.h"
#define LOG_TAG "HeifDecoderImpl"
#include "HeifDecoderImpl.h"
@@ -464,7 +465,7 @@
}
bool HeifDecoderImpl::setOutputColor(HeifColorFormat heifColor) {
- if (heifColor == mOutputColor) {
+ if (heifColor == (HeifColorFormat)mOutputColor) {
return true;
}
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index e98d7d8..4a2523f 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -347,6 +347,7 @@
shared_libs: [
"android.hidl.token@1.0-utils",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"av-types-aidl-cpp",
"liblog",
@@ -378,12 +379,12 @@
],
static_libs: [
- "resourcemanager_aidl_interface-ndk_platform",
+ "resourcemanager_aidl_interface-ndk",
"framework-permission-aidl-cpp",
],
export_static_lib_headers: [
- "resourcemanager_aidl_interface-ndk_platform",
+ "resourcemanager_aidl_interface-ndk",
"framework-permission-aidl-cpp",
],
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index c89c023..c9f361e 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -17,7 +17,6 @@
#include <arpa/inet.h>
#include <stdint.h>
-#include <sys/types.h>
#include <android/IDataSource.h>
#include <binder/IPCThreadState.h>
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 1c9b9e4..5215c1b 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -949,6 +949,9 @@
mVideoWidth = ext1;
mVideoHeight = ext2;
break;
+ case MEDIA_STARTED:
+ ALOGV("Received media started message");
+ break;
case MEDIA_NOTIFY_TIME:
ALOGV("Received notify time message");
break;
diff --git a/media/libmedia/tests/codeclist/Android.bp b/media/libmedia/tests/codeclist/Android.bp
index 7dd0caa..2ed3126 100644
--- a/media/libmedia/tests/codeclist/Android.bp
+++ b/media/libmedia/tests/codeclist/Android.bp
@@ -25,9 +25,25 @@
cc_test {
name: "CodecListTest",
- test_suites: ["device-tests"],
+ test_suites: ["device-tests", "mts"],
gtest: true,
+ // Support multilib variants (using different suffix per sub-architecture), which is needed on
+ // build targets with secondary architectures, as the MTS test suite packaging logic flattens
+ // all test artifacts into a single `testcases` directory.
+ compile_multilib: "both",
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+
+ // used within mainline MTS, but only to R, not to Q.
+ min_sdk_version: "30",
+
srcs: [
"CodecListTest.cpp",
],
@@ -35,7 +51,7 @@
shared_libs: [
"libbinder",
"liblog",
- "libmedia_codeclist",
+ "libmedia_codeclist", // available >= R
"libstagefright",
"libstagefright_foundation",
"libstagefright_xmlparser",
diff --git a/media/libmedia/tests/codeclist/AndroidTest.xml b/media/libmedia/tests/codeclist/AndroidTest.xml
new file mode 100644
index 0000000..eeaab8e
--- /dev/null
+++ b/media/libmedia/tests/codeclist/AndroidTest.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Test module config for CodecList unit tests">
+ <option name="test-suite-tag" value="CodecListTest" />
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.Sdk30ModuleController" />
+
+ <target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
+ <option name="cleanup" value="true" />
+ <option name="append-bitness" value="true" />
+ <option name="push" value="CodecListTest->/data/local/tmp/CodecListTest" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="CodecListTest" />
+ </test>
+
+
+</configuration>
diff --git a/media/libmedia/xsd/vts/OWNERS b/media/libmedia/xsd/vts/OWNERS
new file mode 100644
index 0000000..9af2eba
--- /dev/null
+++ b/media/libmedia/xsd/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 151862
+sundongahn@google.com
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index d3a517f..e29364c 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -30,6 +30,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_SCREEN),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_REDIRECT),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_COMMUNICATION_REDIRECT),
TERMINATOR
};
@@ -50,6 +52,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_MUTE_HAPTIC),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_SYSTEM_CAPTURE),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CAPTURE_PRIVATE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CONTENT_SPATIALIZED),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NEVER_SPATIALIZE),
TERMINATOR
};
diff --git a/media/libmediametrics/IMediaMetricsService.cpp b/media/libmediametrics/IMediaMetricsService.cpp
deleted file mode 100644
index b5675e6..0000000
--- a/media/libmediametrics/IMediaMetricsService.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaMetrics"
-
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <binder/IPCThreadState.h>
-
-#include <utils/Errors.h> // for status_t
-#include <utils/List.h>
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <media/MediaMetricsItem.h>
-#include <media/IMediaMetricsService.h>
-
-namespace android {
-
-// TODO: Currently ONE_WAY transactions, make both ONE_WAY and synchronous options.
-
-enum {
- SUBMIT_ITEM = IBinder::FIRST_CALL_TRANSACTION,
- SUBMIT_BUFFER,
-};
-
-class BpMediaMetricsService: public BpInterface<IMediaMetricsService>
-{
-public:
- explicit BpMediaMetricsService(const sp<IBinder>& impl)
- : BpInterface<IMediaMetricsService>(impl)
- {
- }
-
- status_t submit(mediametrics::Item *item) override
- {
- if (item == nullptr) {
- return BAD_VALUE;
- }
- ALOGV("%s: (ONEWAY) item=%s", __func__, item->toString().c_str());
-
- Parcel data;
- data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
- status_t status = item->writeToParcel(&data);
- if (status != NO_ERROR) { // assume failure logged in item
- return status;
- }
-
- status = remote()->transact(
- SUBMIT_ITEM, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
- ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
- __func__, status);
- return status;
- }
-
- status_t submitBuffer(const char *buffer, size_t length) override
- {
- if (buffer == nullptr || length > INT32_MAX) {
- return BAD_VALUE;
- }
- ALOGV("%s: (ONEWAY) length:%zu", __func__, length);
-
- Parcel data;
- data.writeInterfaceToken(IMediaMetricsService::getInterfaceDescriptor());
-
- status_t status = data.writeInt32(length)
- ?: data.write((uint8_t*)buffer, length);
- if (status != NO_ERROR) {
- return status;
- }
-
- status = remote()->transact(
- SUBMIT_BUFFER, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
- ALOGW_IF(status != NO_ERROR, "%s: bad response from service for submit, status=%d",
- __func__, status);
- return status;
- }
-};
-
-IMPLEMENT_META_INTERFACE(MediaMetricsService, "android.media.IMediaMetricsService");
-
-// ----------------------------------------------------------------------
-
-status_t BnMediaMetricsService::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch (code) {
- case SUBMIT_ITEM: {
- CHECK_INTERFACE(IMediaMetricsService, data, reply);
-
- mediametrics::Item * const item = mediametrics::Item::create();
- status_t status = item->readFromParcel(data);
- if (status != NO_ERROR) { // assume failure logged in item
- return status;
- }
- status = submitInternal(item, true /* release */);
- // assume failure logged by submitInternal
- return NO_ERROR;
- }
- case SUBMIT_BUFFER: {
- CHECK_INTERFACE(IMediaMetricsService, data, reply);
- int32_t length;
- status_t status = data.readInt32(&length);
- if (status != NO_ERROR || length <= 0) {
- return BAD_VALUE;
- }
- const void *ptr = data.readInplace(length);
- if (ptr == nullptr) {
- return BAD_VALUE;
- }
- status = submitBuffer(static_cast<const char *>(ptr), length);
- // assume failure logged by submitBuffer
- return NO_ERROR;
- }
-
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index d597a4d..a7ec975 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -23,6 +23,7 @@
#include <mutex>
#include <set>
+#include <unordered_map>
#include <binder/Parcel.h>
#include <cutils/properties.h>
@@ -51,6 +52,32 @@
// the service is off.
#define SVC_TRIES 2
+static const std::unordered_map<std::string, int32_t>& getErrorStringMap() {
+ // DO NOT MODIFY VALUES (OK to add new ones).
+ // This may be found in frameworks/av/media/libmediametrics/include/MediaMetricsConstants.h
+ static std::unordered_map<std::string, int32_t> map{
+ {"", NO_ERROR},
+ {AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT, BAD_VALUE},
+ {AMEDIAMETRICS_PROP_ERROR_VALUE_IO, DEAD_OBJECT},
+ {AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY, NO_MEMORY},
+ {AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY, PERMISSION_DENIED},
+ {AMEDIAMETRICS_PROP_ERROR_VALUE_STATE, INVALID_OPERATION},
+ {AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT, WOULD_BLOCK},
+ {AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN, UNKNOWN_ERROR},
+ };
+ return map;
+}
+
+status_t errorStringToStatus(const char *error) {
+ const auto& map = getErrorStringMap();
+ if (error == nullptr || error[0] == '\0') return NO_ERROR;
+ auto it = map.find(error);
+ if (it != map.end()) {
+ return it->second;
+ }
+ return UNKNOWN_ERROR;
+}
+
mediametrics::Item* mediametrics::Item::convert(mediametrics_handle_t handle) {
mediametrics::Item *item = (android::mediametrics::Item *) handle;
return item;
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index a09a673..5d0eca0 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -115,6 +115,19 @@
#define AMEDIAMETRICS_PROP_DIRECTION "direction" // string AAudio input or output
#define AMEDIAMETRICS_PROP_DURATIONNS "durationNs" // int64 duration time span
#define AMEDIAMETRICS_PROP_ENCODING "encoding" // string value of format
+
+// Error statistics
+#define AMEDIAMETRICS_PROP_ERROR "error#" // string, empty or one of
+ // AMEDIAMETRICS_PROP_ERROR_VALUE_*
+ // Used for error categorization.
+#define AMEDIAMETRICS_PROP_ERRORSUBCODE "errorSubCode#" // int32, specific code for error
+ // used in conjunction with error#.
+#define AMEDIAMETRICS_PROP_ERRORMESSAGE "errorMessage#" // string, supplemental to error.
+ // Arbitrary information treated as
+ // informational, may be logcat msg,
+ // or an exception with stack trace.
+ // Treated as "debug" information.
+
#define AMEDIAMETRICS_PROP_EVENT "event#" // string value (often func name)
#define AMEDIAMETRICS_PROP_EXECUTIONTIMENS "executionTimeNs" // time to execute the event
@@ -215,4 +228,62 @@
#define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_TONEGENERATOR "tonegenerator" // dial tones
#define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN "unknown" // callerName not set
+// MediaMetrics errors are expected to cover the following sources:
+// https://docs.oracle.com/javase/7/docs/api/java/lang/RuntimeException.html
+// https://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/native/libs/binder/include/binder/Status.h;drc=88e25c0861499ee3ab885814dddc097ab234cb7b;l=57
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/base/media/java/android/media/AudioSystem.java;drc=3ac246c43294d7f7012bdcb0ccb7bae1aa695bd4;l=785
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/av/media/libaaudio/include/aaudio/AAudio.h;drc=cfd3a6fa3aaaf712a890dc02452b38ef401083b8;l=120
+
+// Error category:
+// An empty error string indicates no error.
+
+// Error category: argument
+// IllegalArgumentException
+// NullPointerException
+// BAD_VALUE
+// Out of range, out of bounds.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT "argument"
+
+// Error category: io
+// IOException
+// android.os.DeadObjectException, android.os.RemoteException
+// DEAD_OBJECT
+// FAILED_TRANSACTION
+// IO_ERROR
+// file or ioctl failure
+// Service, rpc, binder, or socket failure.
+// Hardware or device failure.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_IO "io"
+
+// Error category: outOfMemory
+// OutOfMemoryException
+// NO_MEMORY
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY "memory"
+
+// Error category: security
+// SecurityException
+// PERMISSION_DENIED
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY "security"
+
+// Error category: state
+// IllegalStateException
+// UnsupportedOperationException
+// INVALID_OPERATION
+// NO_INIT
+// Functionality not implemented (argument may or may not be correct).
+// Call unexpected or out of order.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_STATE "state"
+
+// Error category: timeout
+// TimeoutException
+// WOULD_BLOCK
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT "timeout"
+
+// Error category: unknown
+// Exception (Java specified not listed above, or custom app/service)
+// UNKNOWN_ERROR
+// Catch-all bucket for errors not listed above.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN "unknown"
+
#endif // ANDROID_MEDIA_MEDIAMETRICSCONSTANTS_H
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index 428992c..87f608f 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -27,6 +27,7 @@
#include <variant>
#include <binder/Parcel.h>
+#include <log/log.h>
#include <utils/Errors.h>
#include <utils/Timers.h> // nsecs_t
@@ -105,6 +106,36 @@
};
/*
+ * Helper for error conversions
+ */
+
+static inline constexpr const char* statusToErrorString(status_t status) {
+ switch (status) {
+ case NO_ERROR:
+ return "";
+ case BAD_VALUE:
+ return AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT;
+ case DEAD_OBJECT:
+ case FAILED_TRANSACTION:
+ return AMEDIAMETRICS_PROP_ERROR_VALUE_IO;
+ case NO_MEMORY:
+ return AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY;
+ case PERMISSION_DENIED:
+ return AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY;
+ case NO_INIT:
+ case INVALID_OPERATION:
+ return AMEDIAMETRICS_PROP_ERROR_VALUE_STATE;
+ case WOULD_BLOCK:
+ return AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT;
+ case UNKNOWN_ERROR:
+ default:
+ return AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN;
+ }
+}
+
+status_t errorStringToStatus(const char *error);
+
+/*
* Time printing
*
* kPrintFormatLong time string is 19 characters (including null termination).
@@ -469,16 +500,16 @@
template <> // static
status_t extract(std::string *val, const char **bufferpptr, const char *bufferptrmax) {
const char *ptr = *bufferpptr;
- while (*ptr != 0) {
+ do {
if (ptr >= bufferptrmax) {
ALOGE("%s: buffer exceeded", __func__);
+ android_errorWriteLog(0x534e4554, "204445255");
return BAD_VALUE;
}
- ++ptr;
- }
- const size_t size = (ptr - *bufferpptr) + 1;
+ } while (*ptr++ != 0);
+ // ptr is terminator+1, == bufferptrmax if we finished entire buffer
*val = *bufferpptr;
- *bufferpptr += size;
+ *bufferpptr = ptr;
return NO_ERROR;
}
template <> // static
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index f55678d..e70e3b3 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -65,10 +65,12 @@
"libstagefright_foundation",
"libstagefright_httplive",
"libutils",
+ "packagemanager_aidl-cpp",
],
header_libs: [
"media_plugin_headers",
+ "libmediautils_headers",
],
static_libs: [
@@ -84,6 +86,10 @@
"framework-permission-aidl-cpp",
],
+ export_header_lib_headers: [
+ "libmediautils_headers",
+ ],
+
include_dirs: [
"frameworks/av/media/libstagefright/rtsp",
"frameworks/av/media/libstagefright/webm",
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index d278a01..c7a7a3a 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -21,6 +21,7 @@
#define LOG_TAG "MediaPlayerService"
#include <utils/Log.h>
+#include <chrono>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
@@ -1830,7 +1831,6 @@
{
close();
free(mAttributes);
- delete mCallbackData;
}
//static
@@ -2051,8 +2051,7 @@
mRecycledTrack.clear();
close_l();
- delete mCallbackData;
- mCallbackData = NULL;
+ mCallbackData.clear();
}
}
@@ -2173,7 +2172,7 @@
}
sp<AudioTrack> t;
- CallbackData *newcbd = NULL;
+ sp<CallbackData> newcbd;
// We don't attempt to create a new track if we are recycling an
// offloaded track. But, if we are recycling a non-offloaded or we
@@ -2183,8 +2182,8 @@
if (!(reuse && bothOffloaded)) {
ALOGV("creating new AudioTrack");
- if (mCallback != NULL) {
- newcbd = new CallbackData(this);
+ if (mCallback != nullptr) {
+ newcbd = sp<CallbackData>::make(wp<AudioOutput>::fromExisting(this));
t = new AudioTrack(
mStreamType,
sampleRate,
@@ -2192,7 +2191,6 @@
channelMask,
frameCount,
flags,
- CallbackWrapper,
newcbd,
0, // notification frames
mSessionId,
@@ -2219,8 +2217,7 @@
channelMask,
frameCount,
flags,
- NULL, // callback
- NULL, // user data
+ nullptr, // callback
0, // notification frames
mSessionId,
AudioTrack::TRANSFER_DEFAULT,
@@ -2236,8 +2233,7 @@
t->setCallerName("media");
if ((t == 0) || (t->initCheck() != NO_ERROR)) {
ALOGE("Unable to create audio track");
- delete newcbd;
- // t goes out of scope, so reference count drops to zero
+ // t, newcbd goes out of scope, so reference count drops to zero
return NO_INIT;
} else {
// successful AudioTrack initialization implies a legacy stream type was generated
@@ -2271,7 +2267,6 @@
if (mCallbackData != NULL) {
mCallbackData->setOutput(this);
}
- delete newcbd;
return updateTrack();
}
}
@@ -2377,7 +2372,7 @@
if (mCallbackData != NULL) {
// two alternative approaches
#if 1
- CallbackData *callbackData = mCallbackData;
+ sp<CallbackData> callbackData = mCallbackData;
mLock.unlock();
// proper acquisition sequence
callbackData->lock();
@@ -2414,9 +2409,8 @@
// for example, the next player could be prepared and seeked.
//
// Presuming it isn't advisable to force the track over.
- if (mNextOutput->mTrack == NULL) {
+ if (mNextOutput->mTrack == nullptr) {
ALOGD("Recycling track for gapless playback");
- delete mNextOutput->mCallbackData;
mNextOutput->mCallbackData = mCallbackData;
mNextOutput->mRecycledTrack = mTrack;
mNextOutput->mSampleRateHz = mSampleRateHz;
@@ -2424,11 +2418,11 @@
mNextOutput->mFlags = mFlags;
mNextOutput->mFrameSize = mFrameSize;
close_l();
- mCallbackData = NULL; // destruction handled by mNextOutput
+ mCallbackData.clear();
} else {
ALOGW("Ignoring gapless playback because next player has already started");
// remove track in case resource needed for future players.
- if (mCallbackData != NULL) {
+ if (mCallbackData != nullptr) {
mCallbackData->endTrackSwitch(); // release lock for callbacks before close.
}
close_l();
@@ -2467,8 +2461,13 @@
void MediaPlayerService::AudioOutput::pause()
{
ALOGV("pause");
+ // We use pauseAndWait() instead of pause() to ensure tracks ramp to silence before
+ // any flush. We choose 40 ms timeout to allow 1 deep buffer mixer period
+ // to occur. Often waiting is 0 - 20 ms.
+ using namespace std::chrono_literals;
+ constexpr auto TIMEOUT_MS = 40ms;
Mutex::Autolock lock(mLock);
- if (mTrack != 0) mTrack->pause();
+ if (mTrack != 0) mTrack->pauseAndWait(TIMEOUT_MS);
}
void MediaPlayerService::AudioOutput::close()
@@ -2650,76 +2649,71 @@
}
}
-// static
-void MediaPlayerService::AudioOutput::CallbackWrapper(
- int event, void *cookie, void *info) {
- //ALOGV("callbackwrapper");
- CallbackData *data = (CallbackData*)cookie;
- // lock to ensure we aren't caught in the middle of a track switch.
- data->lock();
- AudioOutput *me = data->getOutput();
- AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
- if (me == NULL) {
- // no output set, likely because the track was scheduled to be reused
- // by another player, but the format turned out to be incompatible.
- data->unlock();
- if (buffer != NULL) {
- buffer->size = 0;
- }
+size_t MediaPlayerService::AudioOutput::CallbackData::onMoreData(const AudioTrack::Buffer& buffer) {
+ ALOGD("data callback");
+ lock();
+ sp<AudioOutput> me = getOutput();
+ if (me == nullptr) {
+ unlock();
+ return 0;
+ }
+ size_t actualSize = (*me->mCallback)(
+ me.get(), buffer.raw, buffer.size, me->mCallbackCookie,
+ CB_EVENT_FILL_BUFFER);
+
+ // Log when no data is returned from the callback.
+ // (1) We may have no data (especially with network streaming sources).
+ // (2) We may have reached the EOS and the audio track is not stopped yet.
+ // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
+ // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
+ //
+ // This is a benign busy-wait, with the next data request generated 10 ms or more later;
+ // nevertheless for power reasons, we don't want to see too many of these.
+
+ ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
+ unlock();
+ return actualSize;
+}
+
+void MediaPlayerService::AudioOutput::CallbackData::onStreamEnd() {
+ lock();
+ sp<AudioOutput> me = getOutput();
+ if (me == nullptr) {
+ unlock();
return;
}
+ ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
+ (*me->mCallback)(me.get(), NULL /* buffer */, 0 /* size */,
+ me->mCallbackCookie, CB_EVENT_STREAM_END);
+ unlock();
+}
- switch(event) {
- case AudioTrack::EVENT_MORE_DATA: {
- size_t actualSize = (*me->mCallback)(
- me, buffer->raw, buffer->size, me->mCallbackCookie,
- CB_EVENT_FILL_BUFFER);
- // Log when no data is returned from the callback.
- // (1) We may have no data (especially with network streaming sources).
- // (2) We may have reached the EOS and the audio track is not stopped yet.
- // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
- // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
- //
- // This is a benign busy-wait, with the next data request generated 10 ms or more later;
- // nevertheless for power reasons, we don't want to see too many of these.
-
- ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
-
- buffer->size = actualSize;
- } break;
-
- case AudioTrack::EVENT_STREAM_END:
- // currently only occurs for offloaded callbacks
- ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
- (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
- me->mCallbackCookie, CB_EVENT_STREAM_END);
- break;
-
- case AudioTrack::EVENT_NEW_IAUDIOTRACK :
- ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
- (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
- me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
- break;
-
- case AudioTrack::EVENT_UNDERRUN:
- // This occurs when there is no data available, typically
- // when there is a failure to supply data to the AudioTrack. It can also
- // occur in non-offloaded mode when the audio device comes out of standby.
- //
- // If an AudioTrack underruns it outputs silence. Since this happens suddenly
- // it may sound like an audible pop or glitch.
- //
- // The underrun event is sent once per track underrun; the condition is reset
- // when more data is sent to the AudioTrack.
- ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
- break;
-
- default:
- ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
+void MediaPlayerService::AudioOutput::CallbackData::onNewIAudioTrack() {
+ lock();
+ sp<AudioOutput> me = getOutput();
+ if (me == nullptr) {
+ unlock();
+ return;
}
+ ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
+ (*me->mCallback)(me.get(), NULL /* buffer */, 0 /* size */,
+ me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
+ unlock();
+}
- data->unlock();
+void MediaPlayerService::AudioOutput::CallbackData::onUnderrun() {
+ // This occurs when there is no data available, typically
+ // when there is a failure to supply data to the AudioTrack. It can also
+ // occur in non-offloaded mode when the audio device comes out of standby.
+ //
+ // If an AudioTrack underruns it outputs silence. Since this happens suddenly
+ // it may sound like an audible pop or glitch.
+ //
+ // The underrun event is sent once per track underrun; the condition is reset
+ // when more data is sent to the AudioTrack.
+ ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
+
}
audio_session_t MediaPlayerService::AudioOutput::getSessionId() const
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 98091be..86be3fe 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -30,9 +30,11 @@
#include <media/AidlConversion.h>
#include <media/AudioResamplerPublic.h>
#include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
#include <media/MediaPlayerInterface.h>
#include <media/Metadata.h>
#include <media/stagefright/foundation/ABase.h>
+#include <mediautils/Synchronization.h>
#include <android/content/AttributionSourceState.h>
#include <system/audio.h>
@@ -41,7 +43,6 @@
using content::AttributionSourceState;
-class AudioTrack;
struct AVSyncSettings;
class DeathNotifier;
class IDataSource;
@@ -161,7 +162,7 @@
sp<AudioOutput> mNextOutput;
AudioCallback mCallback;
void * mCallbackCookie;
- CallbackData * mCallbackData;
+ sp<CallbackData> mCallbackData;
audio_stream_type_t mStreamType;
audio_attributes_t * mAttributes;
float mLeftVolume;
@@ -189,15 +190,15 @@
// CallbackData is what is passed to the AudioTrack as the "user" data.
// We need to be able to target this to a different Output on the fly,
// so we can't use the Output itself for this.
- class CallbackData {
+ class CallbackData : public AudioTrack::IAudioTrackCallback {
friend AudioOutput;
public:
- explicit CallbackData(AudioOutput *cookie) {
+ explicit CallbackData(const wp<AudioOutput>& cookie) {
mData = cookie;
mSwitching = false;
}
- AudioOutput * getOutput() const { return mData; }
- void setOutput(AudioOutput* newcookie) { mData = newcookie; }
+ sp<AudioOutput> getOutput() const { return mData.load().promote(); }
+ void setOutput(const wp<AudioOutput>& newcookie) { mData.store(newcookie); }
// lock/unlock are used by the callback before accessing the payload of this object
void lock() const { mLock.lock(); }
void unlock() const { mLock.unlock(); }
@@ -220,8 +221,13 @@
}
mSwitching = false;
}
+ protected:
+ size_t onMoreData(const AudioTrack::Buffer& buffer) override;
+ void onUnderrun() override;
+ void onStreamEnd() override;
+ void onNewIAudioTrack() override;
private:
- AudioOutput * mData;
+ mediautils::atomic_wp<AudioOutput> mData;
mutable Mutex mLock; // a recursive mutex might make this unnecessary.
bool mSwitching;
DISALLOW_EVIL_CONSTRUCTORS(CallbackData);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index a914006..4aa80be 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -126,8 +126,13 @@
}
if ((as == AUDIO_SOURCE_FM_TUNER
- && !(captureAudioOutputAllowed(mAttributionSource)
+ && !(captureAudioOutputAllowed(mAttributionSource)
|| captureTunerAudioInputAllowed(mAttributionSource)))
+ || (as == AUDIO_SOURCE_REMOTE_SUBMIX
+ && !(captureAudioOutputAllowed(mAttributionSource)
+ || modifyAudioRoutingAllowed(mAttributionSource)))
+ || (as == AUDIO_SOURCE_ECHO_REFERENCE
+ && !captureAudioOutputAllowed(mAttributionSource))
|| !recordingAllowed(mAttributionSource, (audio_source_t)as)) {
return PERMISSION_DENIED;
}
diff --git a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index 2aabd53..8c86e16 100644
--- a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -225,10 +225,26 @@
"media.stagefright.thumbnail.prefer_hw_codecs", false);
uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
Vector<AString> matchingCodecs;
+ sp<AMessage> format = new AMessage;
+ status_t err = convertMetaDataToMessage(trackMeta, &format);
+ if (err != OK) {
+ format = NULL;
+ }
+
+ // If decoding thumbnail check decoder supports thumbnail dimensions instead
+ int32_t thumbHeight, thumbWidth;
+ if (thumbnail && format != NULL
+ && trackMeta->findInt32(kKeyThumbnailHeight, &thumbHeight)
+ && trackMeta->findInt32(kKeyThumbnailWidth, &thumbWidth)) {
+ format->setInt32("height", thumbHeight);
+ format->setInt32("width", thumbWidth);
+ }
+
MediaCodecList::findMatchingCodecs(
mime,
false, /* encoder */
flags,
+ format,
&matchingCodecs);
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
@@ -348,11 +364,18 @@
bool preferhw = property_get_bool(
"media.stagefright.thumbnail.prefer_hw_codecs", false);
uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
+ sp<AMessage> format = new AMessage;
+ status_t err = convertMetaDataToMessage(trackMeta, &format);
+ if (err != OK) {
+ format = NULL;
+ }
+
Vector<AString> matchingCodecs;
MediaCodecList::findMatchingCodecs(
mime,
false, /* encoder */
flags,
+ format,
&matchingCodecs);
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index d6de47f..d7785da 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -26,6 +26,7 @@
#include <system/audio.h>
#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/AString.h>
#include <android/content/AttributionSourceState.h>
namespace android {
diff --git a/media/libmediaplayerservice/fuzzer/Android.bp b/media/libmediaplayerservice/fuzzer/Android.bp
new file mode 100644
index 0000000..d83d3c9
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/Android.bp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_media_libmediaplayerservice_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: [
+ "frameworks_av_media_libmediaplayerservice_license",
+ ],
+}
+
+cc_defaults {
+ name: "libmediaplayerserviceFuzzer_defaults",
+ include_dirs: [
+ "frameworks/av/media/libmediaplayerservice",
+ ],
+ static_libs: [
+ "libmediaplayerservice",
+ "liblog",
+ ],
+ shared_libs: [
+ "framework-permission-aidl-cpp",
+ "libbinder",
+ "libcutils",
+ "libmedia",
+ "libstagefright",
+ "libutils",
+ "libstagefright_foundation",
+ ],
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
+
+cc_fuzz {
+ name: "mediarecorder_fuzzer",
+ srcs: [
+ "mediarecorder_fuzzer.cpp",
+ ],
+ defaults: [
+ "libmediaplayerserviceFuzzer_defaults",
+ ],
+ static_libs: [
+ "libstagefright_rtsp",
+ "libbase",
+ ],
+ shared_libs: [
+ "av-types-aidl-cpp",
+ "media_permission-aidl-cpp",
+ "libaudioclient_aidl_conversion",
+ "libandroid_net",
+ "libcamera_client",
+ "libgui",
+ "libmediametrics",
+ ],
+}
+
+cc_fuzz {
+ name: "metadataretriever_fuzzer",
+ srcs: [
+ "metadataretriever_fuzzer.cpp",
+ ],
+ defaults: [
+ "libmediaplayerserviceFuzzer_defaults",
+ ],
+ static_libs: [
+ "libplayerservice_datasource",
+ ],
+ shared_libs: [
+ "libdatasource",
+ "libdrmframework",
+ ],
+}
+
+cc_fuzz {
+ name: "mediaplayer_fuzzer",
+ srcs: [
+ "mediaplayer_fuzzer.cpp",
+ ],
+ defaults: [
+ "libmediaplayerserviceFuzzer_defaults",
+ ],
+ static_libs: [
+ "libplayerservice_datasource",
+ "libstagefright_nuplayer",
+ "libstagefright_rtsp",
+ "libstagefright_timedtext",
+ ],
+ shared_libs: [
+ "android.hardware.media.c2@1.0",
+ "android.hardware.media.omx@1.0",
+ "av-types-aidl-cpp",
+ "libaudioclient_aidl_conversion",
+ "libbase",
+ "libactivitymanager_aidl",
+ "libandroid_net",
+ "libaudioclient",
+ "libcamera_client",
+ "libcodec2_client",
+ "libcrypto",
+ "libdatasource",
+ "libdrmframework",
+ "libgui",
+ "libhidlbase",
+ "liblog",
+ "libmedia_codeclist",
+ "libmedia_omx",
+ "libmediadrm",
+ "libmediametrics",
+ "libmediautils",
+ "libmemunreachable",
+ "libnetd_client",
+ "libpowermanager",
+ "libstagefright_httplive",
+ ],
+}
diff --git a/media/libmediaplayerservice/fuzzer/README.md b/media/libmediaplayerservice/fuzzer/README.md
new file mode 100644
index 0000000..a93c809
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/README.md
@@ -0,0 +1,83 @@
+# Fuzzer for libmediaplayerservice
+## Table of contents
++ [StagefrightMediaRecorder](#StagefrightMediaRecorder)
++ [StagefrightMetadataRetriever](#StagefrightMetadataRetriever)
++ [MediaPlayer](#MediaPlayer)
+
+# <a name="StagefrightMediaRecorder"></a> Fuzzer for StagefrightMediaRecorder
+
+StagefrightMediaRecorder supports the following parameters:
+1. Output Formats (parameter name: `setOutputFormat`)
+2. Audio Encoders (parameter name: `setAudioEncoder`)
+3. Video Encoders (parameter name: `setVideoEncoder`)
+4. Audio Sources (parameter name: `setAudioSource`)
+5. Video Sources (parameter name: `setVideoSource`)
+6. Microphone Direction (parameter name: `setMicrophoneDirection`)
+
+You can find the possible values in the fuzzer's source code.
+
+#### Steps to run
+1. Build the fuzzer
+```
+ $ mm -j$(nproc) mediarecorder_fuzzer
+```
+2. Run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/mediarecorder_fuzzer/mediarecorder_fuzzer
+```
+
+# <a name="StagefrightMetadataRetriever"></a> Fuzzer for StagefrightMetadataRetriever
+
+StagefrightMetadataRetriever supports the following data sources:
+1. Url (parameter name: `url`)
+2. File descriptor (parameter name: `fd`)
+3. DataSource (parameter name: `source`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `url` | Url of data source | Value obtained from FuzzedDataProvider |
+| `fd` | File descriptor value of input file | Value obtained from FuzzedDataProvider |
+| `source` | DataSource object | Data obtained from FuzzedDataProvider |
+
+#### Steps to run
+1. Build the fuzzer
+```
+ $ mm -j$(nproc) metadataretriever_fuzzer
+```
+2. To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/metadataretriever_fuzzer/metadataretriever_fuzzer
+```
+
+# <a name="MediaPlayer"></a> Fuzzer for MediaPlayer
+
+MediaPlayerService supports the following data sources:
+1. Url (parameter name: `url`)
+2. File descriptor (parameter name: `fd`)
+3. IStreamSource (parameter name: `source`)
+4. IDataSource (parameter name: `source`)
+5. RTP Parameters (parameter name: `rtpParams`)
+
+MediaPlayerService supports the following parameters:
+1. Audio sessions (parameter name: `audioSessionId`)
+2. Audio stretch modes (parameter name: `mStretchMode`)
+3. Audio fallback modes (parameter name: `mFallbackMode`)
+4. Media parameter keys (parameter name: `key`)
+5. Audio Stream Types (parameter name: `streamType`)
+6. Media Event Types (parameter name: `msg`)
+7. Media Info Types (parameter name: `ext1`)
+
+You can find the possible values in the fuzzer's source code.
+
+#### Steps to run
+1. Build the fuzzer
+```
+ $ mm -j$(nproc) mediaplayer_fuzzer
+```
+2. To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/mediaplayer_fuzzer/mediaplayer_fuzzer
+```
diff --git a/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
new file mode 100644
index 0000000..7799f44
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <MediaPlayerService.h>
+#include <camera/Camera.h>
+#include <datasource/FileSource.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/IMediaCodecList.h>
+#include <media/IMediaHTTPService.h>
+#include <media/IMediaPlayer.h>
+#include <media/IMediaRecorder.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
+#include <media/stagefright/RemoteDataSource.h>
+#include <media/stagefright/foundation/base64.h>
+#include <thread>
+#include "fuzzer/FuzzedDataProvider.h"
+
+constexpr int32_t kUuidSize = 16;
+constexpr int32_t kMaxSleepTimeInMs = 100;
+constexpr int32_t kMinSleepTimeInMs = 0;
+constexpr int32_t kPlayCountMin = 1;
+constexpr int32_t kPlayCountMax = 10;
+constexpr int32_t kMaxDimension = 8192;
+constexpr int32_t kMinDimension = 0;
+
+using namespace std;
+using namespace android;
+
+constexpr audio_session_t kSupportedAudioSessions[] = {
+ AUDIO_SESSION_DEVICE, AUDIO_SESSION_OUTPUT_STAGE, AUDIO_SESSION_OUTPUT_MIX};
+
+constexpr audio_timestretch_stretch_mode_t kAudioStretchModes[] = {
+ AUDIO_TIMESTRETCH_STRETCH_DEFAULT, AUDIO_TIMESTRETCH_STRETCH_VOICE};
+
+constexpr audio_timestretch_fallback_mode_t kAudioFallbackModes[] = {
+ AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT, AUDIO_TIMESTRETCH_FALLBACK_DEFAULT,
+ AUDIO_TIMESTRETCH_FALLBACK_MUTE, AUDIO_TIMESTRETCH_FALLBACK_FAIL};
+
+constexpr media_parameter_keys kMediaParamKeys[] = {
+ KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS, KEY_PARAMETER_AUDIO_CHANNEL_COUNT,
+ KEY_PARAMETER_PLAYBACK_RATE_PERMILLE, KEY_PARAMETER_AUDIO_ATTRIBUTES,
+ KEY_PARAMETER_RTP_ATTRIBUTES};
+
+constexpr audio_stream_type_t kAudioStreamTypes[] = {
+ AUDIO_STREAM_DEFAULT, AUDIO_STREAM_VOICE_CALL, AUDIO_STREAM_SYSTEM,
+ AUDIO_STREAM_RING, AUDIO_STREAM_MUSIC, AUDIO_STREAM_ALARM,
+ AUDIO_STREAM_NOTIFICATION, AUDIO_STREAM_BLUETOOTH_SCO, AUDIO_STREAM_ENFORCED_AUDIBLE,
+ AUDIO_STREAM_DTMF, AUDIO_STREAM_TTS, AUDIO_STREAM_ASSISTANT};
+
+constexpr media_event_type kMediaEventTypes[] = {MEDIA_NOP,
+ MEDIA_PREPARED,
+ MEDIA_PLAYBACK_COMPLETE,
+ MEDIA_BUFFERING_UPDATE,
+ MEDIA_SEEK_COMPLETE,
+ MEDIA_SET_VIDEO_SIZE,
+ MEDIA_STARTED,
+ MEDIA_PAUSED,
+ MEDIA_STOPPED,
+ MEDIA_SKIPPED,
+ MEDIA_NOTIFY_TIME,
+ MEDIA_TIMED_TEXT,
+ MEDIA_ERROR,
+ MEDIA_INFO,
+ MEDIA_SUBTITLE_DATA,
+ MEDIA_META_DATA,
+ MEDIA_DRM_INFO,
+ MEDIA_TIME_DISCONTINUITY,
+ MEDIA_IMS_RX_NOTICE,
+ MEDIA_AUDIO_ROUTING_CHANGED};
+
+constexpr media_info_type kMediaInfoTypes[] = {
+ MEDIA_INFO_UNKNOWN, MEDIA_INFO_STARTED_AS_NEXT,
+ MEDIA_INFO_RENDERING_START, MEDIA_INFO_VIDEO_TRACK_LAGGING,
+ MEDIA_INFO_BUFFERING_START, MEDIA_INFO_BUFFERING_END,
+ MEDIA_INFO_NETWORK_BANDWIDTH, MEDIA_INFO_BAD_INTERLEAVING,
+ MEDIA_INFO_NOT_SEEKABLE, MEDIA_INFO_METADATA_UPDATE,
+ MEDIA_INFO_PLAY_AUDIO_ERROR, MEDIA_INFO_PLAY_VIDEO_ERROR,
+ MEDIA_INFO_TIMED_TEXT_ERROR};
+
+const char *kUrlPrefix[] = {"data:", "http://", "https://", "rtsp://", "content://", "test://"};
+
+struct TestStreamSource : public IStreamSource {
+ void setListener(const sp<IStreamListener> & /*listener*/) override{};
+ void setBuffers(const Vector<sp<IMemory>> & /*buffers*/) override{};
+ void onBufferAvailable(size_t /*index*/) override{};
+ IBinder *onAsBinder() { return nullptr; };
+};
+
+class BinderDeathNotifier : public IBinder::DeathRecipient {
+ public:
+ void binderDied(const wp<IBinder> &) { abort(); }
+};
+
+class MediaPlayerServiceFuzzer {
+ public:
+ MediaPlayerServiceFuzzer(const uint8_t *data, size_t size)
+ : mFdp(data, size), mDataSourceFd(memfd_create("InputFile", MFD_ALLOW_SEALING)){};
+ ~MediaPlayerServiceFuzzer() { close(mDataSourceFd); };
+ void process(const uint8_t *data, size_t size);
+
+ private:
+ bool setDataSource(const uint8_t *data, size_t size);
+ void invokeMediaPlayer();
+ FuzzedDataProvider mFdp;
+ sp<IMediaPlayer> mMediaPlayer = nullptr;
+ sp<IMediaPlayerClient> mMediaPlayerClient = nullptr;
+ const int32_t mDataSourceFd;
+};
+
+bool MediaPlayerServiceFuzzer::setDataSource(const uint8_t *data, size_t size) {
+ status_t status = -1;
+ enum DataSourceType {http, fd, stream, file, socket, kMaxValue = socket};
+ switch (mFdp.ConsumeEnum<DataSourceType>()) {
+ case http: {
+ KeyedVector<String8, String8> headers;
+ headers.add(String8(mFdp.ConsumeRandomLengthString().c_str()),
+ String8(mFdp.ConsumeRandomLengthString().c_str()));
+
+ uint32_t dataBlobSize = mFdp.ConsumeIntegralInRange<uint16_t>(0, size);
+ vector<uint8_t> uriSuffix = mFdp.ConsumeBytes<uint8_t>(dataBlobSize);
+
+ string uri(mFdp.PickValueInArray(kUrlPrefix));
+ uri += ";base64,";
+ AString out;
+ encodeBase64(uriSuffix.data(), uriSuffix.size(), &out);
+ uri += out.c_str();
+ status = mMediaPlayer->setDataSource(nullptr /*httpService*/, uri.c_str(), &headers);
+ break;
+ }
+ case fd: {
+ write(mDataSourceFd, data, size);
+
+ status = mMediaPlayer->setDataSource(mDataSourceFd, 0, size);
+ break;
+ }
+ case stream: {
+ sp<IStreamSource> streamSource = sp<TestStreamSource>::make();
+ status = mMediaPlayer->setDataSource(streamSource);
+ break;
+ }
+ case file: {
+ write(mDataSourceFd, data, size);
+
+ sp<DataSource> dataSource = new FileSource(dup(mDataSourceFd), 0, size);
+ sp<IDataSource> iDataSource = RemoteDataSource::wrap(dataSource);
+ if (!iDataSource) {
+ return false;
+ }
+ status = mMediaPlayer->setDataSource(iDataSource);
+ break;
+ }
+ case socket: {
+ String8 rtpParams = String8(mFdp.ConsumeRandomLengthString().c_str());
+ struct sockaddr_in endpoint;
+ endpoint.sin_family = mFdp.ConsumeIntegral<unsigned short>();
+ endpoint.sin_port = mFdp.ConsumeIntegral<uint16_t>();
+ mMediaPlayer->setRetransmitEndpoint(&endpoint);
+ status = mMediaPlayer->setDataSource(rtpParams);
+ break;
+ }
+ }
+
+ if (status != 0) {
+ return false;
+ }
+ return true;
+}
+
+void MediaPlayerServiceFuzzer::invokeMediaPlayer() {
+ sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+ String8 name = String8(mFdp.ConsumeRandomLengthString().c_str());
+ uint32_t width = mFdp.ConsumeIntegralInRange<uint32_t>(kMinDimension, kMaxDimension);
+ uint32_t height = mFdp.ConsumeIntegralInRange<uint32_t>(kMinDimension, kMaxDimension);
+ uint32_t pixelFormat = mFdp.ConsumeIntegral<int32_t>();
+ uint32_t flags = mFdp.ConsumeIntegral<int32_t>();
+ sp<SurfaceControl> surfaceControl =
+ composerClient->createSurface(name, width, height, pixelFormat, flags);
+ if (surfaceControl) {
+ sp<Surface> surface = surfaceControl->getSurface();
+ mMediaPlayer->setVideoSurfaceTexture(surface->getIGraphicBufferProducer());
+ }
+
+ BufferingSettings buffering;
+ buffering.mInitialMarkMs = mFdp.ConsumeIntegral<int32_t>();
+ buffering.mResumePlaybackMarkMs = mFdp.ConsumeIntegral<int32_t>();
+ mMediaPlayer->setBufferingSettings(buffering);
+ mMediaPlayer->getBufferingSettings(&buffering);
+
+ mMediaPlayer->prepareAsync();
+ size_t playCount = mFdp.ConsumeIntegralInRange<size_t>(kPlayCountMin, kPlayCountMax);
+ for (size_t Idx = 0; Idx < playCount; ++Idx) {
+ mMediaPlayer->start();
+ this_thread::sleep_for(chrono::milliseconds(
+ mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+ mMediaPlayer->pause();
+ this_thread::sleep_for(chrono::milliseconds(
+ mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+ mMediaPlayer->stop();
+ }
+ bool state;
+ mMediaPlayer->isPlaying(&state);
+
+ AudioPlaybackRate rate;
+ rate.mSpeed = mFdp.ConsumeFloatingPoint<float>();
+ rate.mPitch = mFdp.ConsumeFloatingPoint<float>();
+ rate.mStretchMode = mFdp.PickValueInArray(kAudioStretchModes);
+ rate.mFallbackMode = mFdp.PickValueInArray(kAudioFallbackModes);
+ mMediaPlayer->setPlaybackSettings(rate);
+ mMediaPlayer->getPlaybackSettings(&rate);
+
+ AVSyncSettings *avSyncSettings = new AVSyncSettings();
+ float videoFpsHint = mFdp.ConsumeFloatingPoint<float>();
+ mMediaPlayer->setSyncSettings(*avSyncSettings, videoFpsHint);
+ mMediaPlayer->getSyncSettings(avSyncSettings, &videoFpsHint);
+ delete avSyncSettings;
+
+ mMediaPlayer->seekTo(mFdp.ConsumeIntegral<int32_t>());
+
+ int32_t msec;
+ mMediaPlayer->getCurrentPosition(&msec);
+ mMediaPlayer->getDuration(&msec);
+ mMediaPlayer->reset();
+
+ mMediaPlayer->notifyAt(mFdp.ConsumeIntegral<int64_t>());
+
+ mMediaPlayer->setAudioStreamType(mFdp.PickValueInArray(kAudioStreamTypes));
+ mMediaPlayer->setLooping(mFdp.ConsumeIntegral<int32_t>());
+ float left = mFdp.ConsumeFloatingPoint<float>();
+ float right = mFdp.ConsumeFloatingPoint<float>();
+ mMediaPlayer->setVolume(left, right);
+
+ Parcel request, reply;
+ request.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+ request.setDataPosition(0);
+ mMediaPlayer->invoke(request, &reply);
+
+ Parcel filter;
+ filter.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+ filter.setDataPosition(0);
+ mMediaPlayer->setMetadataFilter(filter);
+
+ bool updateOnly = mFdp.ConsumeBool();
+ bool applyFilter = mFdp.ConsumeBool();
+ mMediaPlayer->getMetadata(updateOnly, applyFilter, &reply);
+ mMediaPlayer->setAuxEffectSendLevel(mFdp.ConsumeFloatingPoint<float>());
+ mMediaPlayer->attachAuxEffect(mFdp.ConsumeIntegral<int32_t>());
+
+ int32_t key = mFdp.PickValueInArray(kMediaParamKeys);
+ request.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+ request.setDataPosition(0);
+ mMediaPlayer->setParameter(key, request);
+ key = mFdp.PickValueInArray(kMediaParamKeys);
+ mMediaPlayer->getParameter(key, &reply);
+
+ struct sockaddr_in endpoint;
+ mMediaPlayer->getRetransmitEndpoint(&endpoint);
+
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+ attributionSource.token = sp<BBinder>::make();
+ const sp<IMediaPlayerService> mpService(IMediaDeathNotifier::getMediaPlayerService());
+ sp<IMediaPlayer> mNextMediaPlayer = mpService->create(
+ mMediaPlayerClient, mFdp.PickValueInArray(kSupportedAudioSessions), attributionSource);
+ mMediaPlayer->setNextPlayer(mNextMediaPlayer);
+
+ const sp<media::VolumeShaper::Configuration> configuration =
+ sp<media::VolumeShaper::Configuration>::make();
+ const sp<media::VolumeShaper::Operation> operation = sp<media::VolumeShaper::Operation>::make();
+ mMediaPlayer->applyVolumeShaper(configuration, operation);
+
+ mMediaPlayer->getVolumeShaperState(mFdp.ConsumeIntegral<int32_t>());
+ uint8_t uuid[kUuidSize];
+ for (int32_t index = 0; index < kUuidSize; ++index) {
+ uuid[index] = mFdp.ConsumeIntegral<uint8_t>();
+ }
+ Vector<uint8_t> drmSessionId;
+ drmSessionId.push_back(mFdp.ConsumeIntegral<uint8_t>());
+ mMediaPlayer->prepareDrm(uuid, drmSessionId);
+ mMediaPlayer->releaseDrm();
+
+ audio_port_handle_t deviceId = mFdp.ConsumeIntegral<int32_t>();
+ mMediaPlayer->setOutputDevice(deviceId);
+ mMediaPlayer->getRoutedDeviceId(&deviceId);
+
+ mMediaPlayer->enableAudioDeviceCallback(mFdp.ConsumeBool());
+
+ sp<MediaPlayer> mediaPlayer = (MediaPlayer *)mMediaPlayer.get();
+
+ int32_t msg = mFdp.PickValueInArray(kMediaEventTypes);
+ int32_t ext1 = mFdp.PickValueInArray(kMediaInfoTypes);
+ int32_t ext2 = mFdp.ConsumeIntegral<int32_t>();
+ Parcel obj;
+ obj.writeInt32(mFdp.ConsumeIntegral<int32_t>());
+ obj.setDataPosition(0);
+ mediaPlayer->notify(msg, ext1, ext2, &obj);
+
+ int32_t mediaPlayerDumpFd = memfd_create("OutputDumpFile", MFD_ALLOW_SEALING);
+ Vector<String16> args;
+ args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+ mediaPlayer->dump(mediaPlayerDumpFd, args);
+ close(mediaPlayerDumpFd);
+
+ mMediaPlayer->disconnect();
+}
+
+void MediaPlayerServiceFuzzer::process(const uint8_t *data, size_t size) {
+ MediaPlayerService::instantiate();
+
+ const sp<IMediaPlayerService> mpService(IMediaDeathNotifier::getMediaPlayerService());
+ if (!mpService) {
+ return;
+ }
+
+ sp<IMediaCodecList> mediaCodecList = mpService->getCodecList();
+
+ sp<IRemoteDisplayClient> remoteDisplayClient;
+ sp<IRemoteDisplay> remoteDisplay = mpService->listenForRemoteDisplay(
+ String16(mFdp.ConsumeRandomLengthString().c_str()) /*opPackageName*/, remoteDisplayClient,
+ String8(mFdp.ConsumeRandomLengthString().c_str()) /*iface*/);
+
+ mpService->addBatteryData(mFdp.ConsumeIntegral<uint32_t>());
+ Parcel reply;
+ mpService->pullBatteryData(&reply);
+
+ sp<MediaPlayerService> mediaPlayerService = (MediaPlayerService *)mpService.get();
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+ attributionSource.token = sp<BBinder>::make();
+ mMediaPlayer = mediaPlayerService->create(
+ mMediaPlayerClient, mFdp.PickValueInArray(kSupportedAudioSessions), attributionSource);
+
+ int32_t mediaPlayerServiceDumpFd = memfd_create("OutputDumpFile", MFD_ALLOW_SEALING);
+ Vector<String16> args;
+ args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+ mediaPlayerService->dump(mediaPlayerServiceDumpFd, args);
+ close(mediaPlayerServiceDumpFd);
+
+ if (!mMediaPlayer) {
+ return;
+ }
+
+ if (setDataSource(data, size)) {
+ invokeMediaPlayer();
+ }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ MediaPlayerServiceFuzzer mpsFuzzer(data, size);
+ ProcessState::self()->startThreadPool();
+ mpsFuzzer.process(data, size);
+ return 0;
+};
diff --git a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
new file mode 100644
index 0000000..b0040fe
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <media/stagefright/foundation/AString.h>
+#include "fuzzer/FuzzedDataProvider.h"
+
+#include <StagefrightRecorder.h>
+#include <camera/Camera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/stagefright/PersistentSurface.h>
+#include <thread>
+
+using namespace std;
+using namespace android;
+using namespace android::hardware;
+
+constexpr video_source kSupportedVideoSources[] = {VIDEO_SOURCE_DEFAULT, VIDEO_SOURCE_CAMERA,
+ VIDEO_SOURCE_SURFACE};
+
+constexpr audio_source_t kSupportedAudioSources[] = {
+ AUDIO_SOURCE_DEFAULT, AUDIO_SOURCE_MIC,
+ AUDIO_SOURCE_VOICE_UPLINK, AUDIO_SOURCE_VOICE_DOWNLINK,
+ AUDIO_SOURCE_VOICE_CALL, AUDIO_SOURCE_CAMCORDER,
+ AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_SOURCE_VOICE_COMMUNICATION,
+ AUDIO_SOURCE_REMOTE_SUBMIX, AUDIO_SOURCE_UNPROCESSED,
+ AUDIO_SOURCE_VOICE_PERFORMANCE, AUDIO_SOURCE_ECHO_REFERENCE,
+ AUDIO_SOURCE_FM_TUNER, AUDIO_SOURCE_HOTWORD};
+
+constexpr audio_microphone_direction_t kSupportedMicrophoneDirections[] = {
+ MIC_DIRECTION_UNSPECIFIED, MIC_DIRECTION_FRONT, MIC_DIRECTION_BACK, MIC_DIRECTION_EXTERNAL};
+
+struct RecordingConfig {
+ output_format outputFormat;
+ audio_encoder audioEncoder;
+ video_encoder videoEncoder;
+};
+
+const struct RecordingConfig kRecordingConfigList[] = {
+ {OUTPUT_FORMAT_AMR_NB, AUDIO_ENCODER_AMR_NB, VIDEO_ENCODER_DEFAULT},
+ {OUTPUT_FORMAT_AMR_WB, AUDIO_ENCODER_AMR_WB, VIDEO_ENCODER_DEFAULT},
+ {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_DEFAULT},
+ {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_HE_AAC, VIDEO_ENCODER_DEFAULT},
+ {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC_ELD, VIDEO_ENCODER_DEFAULT},
+ {OUTPUT_FORMAT_OGG, AUDIO_ENCODER_OPUS, VIDEO_ENCODER_DEFAULT},
+ {OUTPUT_FORMAT_RTP_AVP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_DEFAULT},
+ {OUTPUT_FORMAT_MPEG2TS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
+ {OUTPUT_FORMAT_WEBM, AUDIO_ENCODER_VORBIS, VIDEO_ENCODER_VP8},
+ {OUTPUT_FORMAT_THREE_GPP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
+ {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
+ {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
+ {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_HEVC}};
+
+const string kParametersList[] = {"max-duration",
+ "max-filesize",
+ "interleave-duration-us",
+ "param-movie-time-scale",
+ "param-geotag-longitude",
+ "param-geotag-latitude",
+ "param-track-time-status",
+ "audio-param-sampling-rate",
+ "audio-param-encoding-bitrate",
+ "audio-param-number-of-channels",
+ "audio-param-time-scale",
+ "video-param-rotation-angle-degrees",
+ "video-param-encoding-bitrate",
+ "video-param-bitrate-mode",
+ "video-param-i-frames-interval",
+ "video-param-encoder-profile",
+ "video-param-encoder-level",
+ "video-param-camera-id",
+ "video-param-time-scale",
+ "param-use-64bit-offset",
+ "time-lapse-enable",
+ "time-lapse-fps",
+ "rtp-param-local-ip",
+ "rtp-param-local-port",
+ "rtp-param-remote-port",
+ "rtp-param-self-id",
+ "rtp-param-opponent-id",
+ "rtp-param-payload-type",
+ "rtp-param-ext-cvo-extmap",
+ "rtp-param-ext-cvo-degrees",
+ "video-param-request-i-frame",
+ "rtp-param-set-socket-dscp",
+ "rtp-param-set-socket-network"};
+
+constexpr int32_t kMaxSleepTimeInMs = 100;
+constexpr int32_t kMinSleepTimeInMs = 0;
+constexpr int32_t kMinVideoSize = 2;
+constexpr int32_t kMaxVideoSize = 8192;
+constexpr int32_t kNumRecordMin = 1;
+constexpr int32_t kNumRecordMax = 10;
+
+class TestAudioDeviceCallback : public AudioSystem::AudioDeviceCallback {
+ public:
+ virtual ~TestAudioDeviceCallback() = default;
+
+ void onAudioDeviceUpdate(audio_io_handle_t /*audioIo*/,
+ audio_port_handle_t /*deviceId*/) override{};
+};
+
+class TestCamera : public ICamera {
+ public:
+ virtual ~TestCamera() = default;
+
+ binder::Status disconnect() override { return binder::Status::ok(); };
+ status_t connect(const sp<ICameraClient> & /*client*/) override { return 0; };
+ status_t lock() override { return 0; };
+ status_t unlock() override { return 0; };
+ status_t setPreviewTarget(const sp<IGraphicBufferProducer> & /*bufferProducer*/) override {
+ return 0;
+ };
+ void setPreviewCallbackFlag(int /*flag*/) override{};
+ status_t setPreviewCallbackTarget(
+ const sp<IGraphicBufferProducer> & /*callbackProducer*/) override {
+ return 0;
+ };
+ status_t startPreview() override { return 0; };
+ void stopPreview() override{};
+ bool previewEnabled() override { return true; };
+ status_t startRecording() override { return 0; };
+ void stopRecording() override{};
+ bool recordingEnabled() override { return true; };
+ void releaseRecordingFrame(const sp<IMemory> & /*mem*/) override{};
+ void releaseRecordingFrameHandle(native_handle_t * /*handle*/) override{};
+ void releaseRecordingFrameHandleBatch(const vector<native_handle_t *> & /*handles*/) override{};
+ status_t autoFocus() override { return 0; };
+ status_t cancelAutoFocus() override { return 0; };
+ status_t takePicture(int /*msgType*/) override { return 0; };
+ status_t setParameters(const String8 & /*params*/) override { return 0; };
+ String8 getParameters() const override { return String8(); };
+ status_t sendCommand(int32_t /*cmd*/, int32_t /*arg1*/, int32_t /*arg2*/) override {
+ return 0;
+ };
+ status_t setVideoBufferMode(int32_t /*videoBufferMode*/) override { return 0; };
+ status_t setVideoTarget(const sp<IGraphicBufferProducer> & /*bufferProducer*/) override {
+ return 0;
+ };
+ status_t setAudioRestriction(int32_t /*mode*/) override { return 0; };
+ int32_t getGlobalAudioRestriction() override { return 0; };
+ IBinder *onAsBinder() override { return reinterpret_cast<IBinder *>(this); };
+};
+
+class TestMediaRecorderClient : public IMediaRecorderClient {
+ public:
+ virtual ~TestMediaRecorderClient() = default;
+
+ void notify(int /*msg*/, int /*ext1*/, int /*ext2*/) override{};
+ IBinder *onAsBinder() override { return reinterpret_cast<IBinder *>(this); };
+};
+
+class MediaRecorderClientFuzzer {
+ public:
+ MediaRecorderClientFuzzer(const uint8_t *data, size_t size);
+ ~MediaRecorderClientFuzzer() { close(mMediaRecorderOutputFd); }
+ void process();
+
+ private:
+ void setConfig();
+ void getConfig();
+ void dumpInfo();
+
+ FuzzedDataProvider mFdp;
+ unique_ptr<MediaRecorderBase> mStfRecorder = nullptr;
+ SurfaceComposerClient mComposerClient;
+ sp<SurfaceControl> mSurfaceControl = nullptr;
+ sp<Surface> mSurface = nullptr;
+ const int32_t mMediaRecorderOutputFd;
+};
+
+void MediaRecorderClientFuzzer::getConfig() {
+ int32_t max;
+ mStfRecorder->getMaxAmplitude(&max);
+
+ int32_t deviceId = mFdp.ConsumeIntegral<int32_t>();
+ mStfRecorder->setInputDevice(deviceId);
+ mStfRecorder->getRoutedDeviceId(&deviceId);
+
+ vector<android::media::MicrophoneInfo> activeMicrophones{};
+ mStfRecorder->getActiveMicrophones(&activeMicrophones);
+
+ int32_t portId;
+ mStfRecorder->getPortId(&portId);
+
+ uint64_t bytes;
+ mStfRecorder->getRtpDataUsage(&bytes);
+
+ Parcel parcel;
+ mStfRecorder->getMetrics(&parcel);
+
+ sp<IGraphicBufferProducer> buffer = mStfRecorder->querySurfaceMediaSource();
+}
+
+void MediaRecorderClientFuzzer::dumpInfo() {
+ int32_t dumpFd = memfd_create("DumpFile", MFD_ALLOW_SEALING);
+ Vector<String16> args;
+ args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
+ mStfRecorder->dump(dumpFd, args);
+ close(dumpFd);
+}
+
+void MediaRecorderClientFuzzer::setConfig() {
+ mStfRecorder->setOutputFile(mMediaRecorderOutputFd);
+ mStfRecorder->setAudioSource(mFdp.PickValueInArray(kSupportedAudioSources));
+ mStfRecorder->setVideoSource(mFdp.PickValueInArray(kSupportedVideoSources));
+ mStfRecorder->setPreferredMicrophoneDirection(
+ mFdp.PickValueInArray(kSupportedMicrophoneDirections));
+ mStfRecorder->setPrivacySensitive(mFdp.ConsumeBool());
+ bool isPrivacySensitive;
+ mStfRecorder->isPrivacySensitive(&isPrivacySensitive);
+ mStfRecorder->setVideoSize(mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize),
+ mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize));
+ mStfRecorder->setVideoFrameRate(mFdp.ConsumeIntegral<int32_t>());
+ mStfRecorder->enableAudioDeviceCallback(mFdp.ConsumeBool());
+ mStfRecorder->setPreferredMicrophoneFieldDimension(mFdp.ConsumeFloatingPoint<float>());
+ mStfRecorder->setClientName(String16(mFdp.ConsumeRandomLengthString().c_str()));
+
+ int32_t Idx = mFdp.ConsumeIntegralInRange<int32_t>(0, size(kRecordingConfigList) - 1);
+ mStfRecorder->setOutputFormat(kRecordingConfigList[Idx].outputFormat);
+ mStfRecorder->setAudioEncoder(kRecordingConfigList[Idx].audioEncoder);
+ mStfRecorder->setVideoEncoder(kRecordingConfigList[Idx].videoEncoder);
+
+ int32_t nextOutputFd = memfd_create("NextOutputFile", MFD_ALLOW_SEALING);
+ mStfRecorder->setNextOutputFile(nextOutputFd);
+ close(nextOutputFd);
+
+ for (Idx = 0; Idx < size(kParametersList); ++Idx) {
+ if (mFdp.ConsumeBool()) {
+ int32_t value = mFdp.ConsumeIntegral<int32_t>();
+ mStfRecorder->setParameters(
+ String8((kParametersList[Idx] + "=" + to_string(value)).c_str()));
+ }
+ }
+}
+
+MediaRecorderClientFuzzer::MediaRecorderClientFuzzer(const uint8_t *data, size_t size)
+ : mFdp(data, size), mMediaRecorderOutputFd(memfd_create("OutputFile", MFD_ALLOW_SEALING)) {
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
+ attributionSource.token = sp<BBinder>::make();
+ mStfRecorder = make_unique<StagefrightRecorder>(attributionSource);
+
+ mSurfaceControl = mComposerClient.createSurface(
+ String8(mFdp.ConsumeRandomLengthString().c_str()), mFdp.ConsumeIntegral<uint32_t>(),
+ mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<int32_t>(),
+ mFdp.ConsumeIntegral<int32_t>());
+ if (mSurfaceControl) {
+ mSurface = mSurfaceControl->getSurface();
+ mStfRecorder->setPreviewSurface(mSurface->getIGraphicBufferProducer());
+ }
+
+ sp<TestMediaRecorderClient> listener = sp<TestMediaRecorderClient>::make();
+ mStfRecorder->setListener(listener);
+
+ sp<TestCamera> testCamera = sp<TestCamera>::make();
+ sp<Camera> camera = Camera::create(testCamera);
+ mStfRecorder->setCamera(camera->remote(), camera->getRecordingProxy());
+
+ sp<PersistentSurface> persistentSurface = sp<PersistentSurface>::make();
+ mStfRecorder->setInputSurface(persistentSurface);
+
+ sp<TestAudioDeviceCallback> callback = sp<TestAudioDeviceCallback>::make();
+ mStfRecorder->setAudioDeviceCallback(callback);
+}
+
+void MediaRecorderClientFuzzer::process() {
+ setConfig();
+
+ mStfRecorder->init();
+ mStfRecorder->prepare();
+ size_t numRecord = mFdp.ConsumeIntegralInRange<size_t>(kNumRecordMin, kNumRecordMax);
+ for (size_t Idx = 0; Idx < numRecord; ++Idx) {
+ mStfRecorder->start();
+ this_thread::sleep_for(chrono::milliseconds(
+ mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+ mStfRecorder->pause();
+ this_thread::sleep_for(chrono::milliseconds(
+ mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+ mStfRecorder->resume();
+ this_thread::sleep_for(chrono::milliseconds(
+ mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
+ mStfRecorder->stop();
+ }
+ dumpInfo();
+ getConfig();
+
+ mStfRecorder->close();
+ mStfRecorder->reset();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ MediaRecorderClientFuzzer mrcFuzzer(data, size);
+ mrcFuzzer.process();
+ return 0;
+}
diff --git a/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp
new file mode 100644
index 0000000..a7cb689
--- /dev/null
+++ b/media/libmediaplayerservice/fuzzer/metadataretriever_fuzzer.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <StagefrightMetadataRetriever.h>
+#include <binder/ProcessState.h>
+#include <datasource/FileSource.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/foundation/base64.h>
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+using namespace std;
+using namespace android;
+
+const char *kMimeTypes[] = {MEDIA_MIMETYPE_IMAGE_JPEG, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
+ MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
+ MEDIA_MIMETYPE_VIDEO_AV1, MEDIA_MIMETYPE_VIDEO_AVC,
+ MEDIA_MIMETYPE_VIDEO_HEVC, MEDIA_MIMETYPE_VIDEO_MPEG4,
+ MEDIA_MIMETYPE_VIDEO_H263, MEDIA_MIMETYPE_VIDEO_MPEG2,
+ MEDIA_MIMETYPE_VIDEO_RAW, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+ MEDIA_MIMETYPE_VIDEO_SCRAMBLED, MEDIA_MIMETYPE_VIDEO_DIVX,
+ MEDIA_MIMETYPE_VIDEO_DIVX3, MEDIA_MIMETYPE_VIDEO_XVID,
+ MEDIA_MIMETYPE_VIDEO_MJPEG, MEDIA_MIMETYPE_AUDIO_AMR_NB,
+ MEDIA_MIMETYPE_AUDIO_AMR_WB, MEDIA_MIMETYPE_AUDIO_MPEG,
+ MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+ MEDIA_MIMETYPE_AUDIO_MIDI, MEDIA_MIMETYPE_AUDIO_AAC,
+ MEDIA_MIMETYPE_AUDIO_QCELP, MEDIA_MIMETYPE_AUDIO_VORBIS,
+ MEDIA_MIMETYPE_AUDIO_OPUS, MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+ MEDIA_MIMETYPE_AUDIO_G711_MLAW, MEDIA_MIMETYPE_AUDIO_RAW,
+ MEDIA_MIMETYPE_AUDIO_FLAC, MEDIA_MIMETYPE_AUDIO_AAC_ADTS,
+ MEDIA_MIMETYPE_AUDIO_MSGSM, MEDIA_MIMETYPE_AUDIO_AC3,
+ MEDIA_MIMETYPE_AUDIO_EAC3, MEDIA_MIMETYPE_AUDIO_EAC3_JOC,
+ MEDIA_MIMETYPE_AUDIO_AC4, MEDIA_MIMETYPE_AUDIO_SCRAMBLED,
+ MEDIA_MIMETYPE_AUDIO_ALAC, MEDIA_MIMETYPE_AUDIO_WMA,
+ MEDIA_MIMETYPE_AUDIO_MS_ADPCM, MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM,
+ MEDIA_MIMETYPE_CONTAINER_MPEG4, MEDIA_MIMETYPE_CONTAINER_WAV,
+ MEDIA_MIMETYPE_CONTAINER_OGG, MEDIA_MIMETYPE_CONTAINER_MATROSKA,
+ MEDIA_MIMETYPE_CONTAINER_MPEG2TS, MEDIA_MIMETYPE_CONTAINER_AVI,
+ MEDIA_MIMETYPE_CONTAINER_MPEG2PS, MEDIA_MIMETYPE_CONTAINER_HEIF,
+ MEDIA_MIMETYPE_TEXT_3GPP, MEDIA_MIMETYPE_TEXT_SUBRIP,
+ MEDIA_MIMETYPE_TEXT_VTT, MEDIA_MIMETYPE_TEXT_CEA_608,
+ MEDIA_MIMETYPE_TEXT_CEA_708, MEDIA_MIMETYPE_DATA_TIMED_ID3};
+
+class MetadataRetrieverFuzzer {
+ public:
+ MetadataRetrieverFuzzer(const uint8_t *data, size_t size)
+ : mFdp(data, size),
+ mMdRetriever(new StagefrightMetadataRetriever()),
+ mDataSourceFd(memfd_create("InputFile", MFD_ALLOW_SEALING)) {}
+ ~MetadataRetrieverFuzzer() { close(mDataSourceFd); }
+ bool setDataSource(const uint8_t *data, size_t size);
+ void getData();
+
+ private:
+ FuzzedDataProvider mFdp;
+ sp<StagefrightMetadataRetriever> mMdRetriever = nullptr;
+ const int32_t mDataSourceFd;
+};
+
+void MetadataRetrieverFuzzer::getData() {
+ int64_t timeUs = mFdp.ConsumeIntegral<int64_t>();
+ int32_t option = mFdp.ConsumeIntegral<int32_t>();
+ int32_t colorFormat = mFdp.ConsumeIntegral<int32_t>();
+ bool metaOnly = mFdp.ConsumeBool();
+ mMdRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
+
+ int32_t index = mFdp.ConsumeIntegral<int32_t>();
+ colorFormat = mFdp.ConsumeIntegral<int32_t>();
+ metaOnly = mFdp.ConsumeBool();
+ bool thumbnail = mFdp.ConsumeBool();
+ mMdRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+
+ index = mFdp.ConsumeIntegral<int32_t>();
+ colorFormat = mFdp.ConsumeIntegral<int32_t>();
+ int32_t left = mFdp.ConsumeIntegral<int32_t>();
+ int32_t top = mFdp.ConsumeIntegral<int32_t>();
+ int32_t right = mFdp.ConsumeIntegral<int32_t>();
+ int32_t bottom = mFdp.ConsumeIntegral<int32_t>();
+ mMdRetriever->getImageRectAtIndex(index, colorFormat, left, top, right, bottom);
+
+ index = mFdp.ConsumeIntegral<int32_t>();
+ colorFormat = mFdp.ConsumeIntegral<int32_t>();
+ metaOnly = mFdp.ConsumeBool();
+ mMdRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
+
+ mMdRetriever->extractAlbumArt();
+
+ int32_t keyCode = mFdp.ConsumeIntegral<int32_t>();
+ mMdRetriever->extractMetadata(keyCode);
+}
+
+bool MetadataRetrieverFuzzer::setDataSource(const uint8_t *data, size_t size) {
+ status_t status = -1;
+
+ enum DataSourceChoice {FromHttp, FromFd, FromFileSource, kMaxValue = FromFileSource};
+ switch (mFdp.ConsumeEnum<DataSourceChoice>()) {
+ case FromHttp: {
+ KeyedVector<String8, String8> mHeaders;
+ mHeaders.add(String8(mFdp.ConsumeRandomLengthString().c_str()),
+ String8(mFdp.ConsumeRandomLengthString().c_str()));
+
+ uint32_t dataBlobSize = mFdp.ConsumeIntegralInRange<uint16_t>(0, size);
+ vector<uint8_t> uriSuffix = mFdp.ConsumeBytes<uint8_t>(dataBlobSize);
+
+ string uri("data:");
+ uri += ";base64,";
+ AString out;
+ encodeBase64(uriSuffix.data(), uriSuffix.size(), &out);
+ uri += out.c_str();
+ status = mMdRetriever->setDataSource(nullptr /*httpService*/, uri.c_str(), &mHeaders);
+ break;
+ }
+ case FromFd: {
+ write(mDataSourceFd, data, size);
+
+ status = mMdRetriever->setDataSource(mDataSourceFd, 0, size);
+ break;
+ }
+ case FromFileSource: {
+ write(mDataSourceFd, data, size);
+
+ sp<DataSource> dataSource = new FileSource(dup(mDataSourceFd), 0, size);
+ status = mMdRetriever->setDataSource(dataSource, mFdp.PickValueInArray(kMimeTypes));
+ break;
+ }
+ }
+
+ if (status != 0) {
+ return false;
+ }
+ return true;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ MetadataRetrieverFuzzer mrtFuzzer(data, size);
+ ProcessState::self()->startThreadPool();
+ if (mrtFuzzer.setDataSource(data, size)) {
+ mrtFuzzer.getData();
+ }
+ return 0;
+}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 4a65f71..2828d44 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1673,24 +1673,18 @@
mDrainAudioQueuePending = false;
- if (offloadingAudio()) {
- mAudioSink->pause();
- mAudioSink->flush();
- if (!mPaused) {
- mAudioSink->start();
- }
- } else {
- mAudioSink->pause();
- mAudioSink->flush();
+ mAudioSink->pause();
+ mAudioSink->flush();
+ if (!offloadingAudio()) {
// Call stop() to signal to the AudioSink to completely fill the
// internal buffer before resuming playback.
// FIXME: this is ignored after flush().
mAudioSink->stop();
- if (!mPaused) {
- mAudioSink->start();
- }
mNumFramesWritten = 0;
}
+ if (!mPaused) {
+ mAudioSink->start();
+ }
mNextAudioClockUpdateTimeUs = -1;
} else {
flushQueue(&mVideoQueue);
diff --git a/media/libmediaplayerservice/tests/Android.bp b/media/libmediaplayerservice/tests/Android.bp
index 98626fd..99202b8 100644
--- a/media/libmediaplayerservice/tests/Android.bp
+++ b/media/libmediaplayerservice/tests/Android.bp
@@ -30,7 +30,7 @@
],
static_libs: [
- "resourcemanager_aidl_interface-ndk_platform",
+ "resourcemanager_aidl_interface-ndk",
],
include_dirs: [
diff --git a/media/libmediatranscoding/Android.bp b/media/libmediatranscoding/Android.bp
index 042850c..937650f 100644
--- a/media/libmediatranscoding/Android.bp
+++ b/media/libmediatranscoding/Android.bp
@@ -106,8 +106,8 @@
export_include_dirs: ["include"],
static_libs: [
- "mediatranscoding_aidl_interface-ndk_platform",
- "resourceobserver_aidl_interface-V1-ndk_platform",
+ "mediatranscoding_aidl_interface-ndk",
+ "resourceobserver_aidl_interface-V1-ndk",
"libstatslog_media",
],
diff --git a/media/libmediatranscoding/tests/Android.bp b/media/libmediatranscoding/tests/Android.bp
index 603611a..7a6980f 100644
--- a/media/libmediatranscoding/tests/Android.bp
+++ b/media/libmediatranscoding/tests/Android.bp
@@ -31,7 +31,7 @@
],
static_libs: [
- "mediatranscoding_aidl_interface-ndk_platform",
+ "mediatranscoding_aidl_interface-ndk",
"libmediatranscoding",
],
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index e20f7ab..411b6ef 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -55,8 +55,8 @@
AMediaFormat_getString(srcFormat, AMEDIAFORMAT_KEY_MIME, &srcMime);
if (!AMediaFormat_getString(options, AMEDIAFORMAT_KEY_MIME, &dstMime) ||
strcmp(srcMime, dstMime) == 0) {
- srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, String));
- srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, String));
+ srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, Int32));
+ srcParamsToCopy.push_back(ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, Int32));
}
// ------- Define parameters to copy from the caller's options -------
diff --git a/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h b/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
index 348b4f8..635f67f 100644
--- a/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
+++ b/media/libnbaio/include/media/nbaio/AudioStreamOutSink.h
@@ -42,10 +42,6 @@
//virtual size_t framesUnderrun() const;
//virtual size_t underruns() const;
- // This is an over-estimate, and could dupe the caller into making a blocking write()
- // FIXME Use an audio HAL API to query the buffer emptying status when it's available.
- virtual ssize_t availableToWrite() { return mStreamBufferSizeBytes / mFrameSize; }
-
virtual ssize_t write(const void *buffer, size_t count);
virtual status_t getTimestamp(ExtendedTimestamp ×tamp);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 1aa1848..a4fbbbc 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -3304,10 +3304,12 @@
if (err != OK) {
ALOGE("native_window_set_sideband_stream(%p) failed! (err %d).",
sidebandHandle, err);
- return err;
}
- return OK;
+ native_handle_close(sidebandHandle);
+ native_handle_delete(sidebandHandle);
+
+ return err;
}
status_t ACodec::setVideoPortFormatType(
@@ -5395,21 +5397,21 @@
err = mOMXNode->getParameter(
(OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
&presentation, sizeof(presentation));
- if (err != OK) {
- return err;
+ if (err == OK) {
+ notify->setInt32("aac-encoded-target-level",
+ presentation.nEncodedTargetLevel);
+ notify->setInt32("aac-drc-cut-level", presentation.nDrcCut);
+ notify->setInt32("aac-drc-boost-level", presentation.nDrcBoost);
+ notify->setInt32("aac-drc-heavy-compression",
+ presentation.nHeavyCompression);
+ notify->setInt32("aac-target-ref-level",
+ presentation.nTargetReferenceLevel);
+ notify->setInt32("aac-drc-effect-type",
+ presentation.nDrcEffectType);
+ notify->setInt32("aac-drc-album-mode", presentation.nDrcAlbumMode);
+ notify->setInt32("aac-drc-output-loudness",
+ presentation.nDrcOutputLoudness);
}
- notify->setInt32("aac-encoded-target-level",
- presentation.nEncodedTargetLevel);
- notify->setInt32("aac-drc-cut-level", presentation.nDrcCut);
- notify->setInt32("aac-drc-boost-level", presentation.nDrcBoost);
- notify->setInt32("aac-drc-heavy-compression",
- presentation.nHeavyCompression);
- notify->setInt32("aac-target-ref-level",
- presentation.nTargetReferenceLevel);
- notify->setInt32("aac-drc-effect-type", presentation.nDrcEffectType);
- notify->setInt32("aac-drc-album-mode", presentation.nDrcAlbumMode);
- notify->setInt32("aac-drc-output-loudness",
- presentation.nDrcOutputLoudness);
}
}
break;
@@ -5431,6 +5433,7 @@
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSampleRate);
notify->setInt32("bitrate", params.nBitRate);
+ notify->setInt32("aac-profile", params.eAACProfile);
break;
}
@@ -9205,4 +9208,19 @@
return OK;
}
+status_t ACodec::querySupportedParameters(std::vector<std::string> *names) {
+ if (!names) {
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+status_t ACodec::subscribeToParameters([[maybe_unused]] const std::vector<std::string> &names) {
+ return OK;
+}
+
+status_t ACodec::unsubscribeFromParameters([[maybe_unused]] const std::vector<std::string> &names) {
+ return OK;
+}
+
} // namespace android
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index a052a70..e47e7ff 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -109,6 +109,7 @@
srcs: [
"CodecBase.cpp",
+ "DataConverter.cpp",
"FrameRenderTracker.cpp",
"MediaCodecListWriter.cpp",
"SkipCutBuffer.cpp",
@@ -125,6 +126,7 @@
],
shared_libs: [
+ "libaudioutils",
"libgui",
"libhidlallocatorutils",
"liblog",
@@ -266,7 +268,6 @@
"CallbackMediaSource.cpp",
"CameraSource.cpp",
"CameraSourceTimeLapse.cpp",
- "DataConverter.cpp",
"FrameDecoder.cpp",
"HevcUtils.cpp",
"InterfaceUtils.cpp",
@@ -340,6 +341,7 @@
"android.hardware.media.omx@1.0",
"framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
+ "packagemanager_aidl-cpp",
],
static_libs: [
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index efd4070..01cb9b3 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -262,13 +262,10 @@
}
bool isHDR(const sp<AMessage> &format) {
- uint32_t standard, range, transfer;
+ uint32_t standard, transfer;
if (!format->findInt32("color-standard", (int32_t*)&standard)) {
standard = 0;
}
- if (!format->findInt32("color-range", (int32_t*)&range)) {
- range = 0;
- }
if (!format->findInt32("color-transfer", (int32_t*)&transfer)) {
transfer = 0;
}
@@ -796,8 +793,16 @@
if (overrideMeta == NULL) {
// check if we're dealing with a tiled heif
int32_t tileWidth, tileHeight, gridRows, gridCols;
+ int32_t widthColsProduct = 0;
+ int32_t heightRowsProduct = 0;
if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
- if (mWidth <= tileWidth * gridCols && mHeight <= tileHeight * gridRows) {
+ if (__builtin_mul_overflow(tileWidth, gridCols, &widthColsProduct) ||
+ __builtin_mul_overflow(tileHeight, gridRows, &heightRowsProduct)) {
+ ALOGE("Multiplication overflowed Grid size: %dx%d, Picture size: %dx%d",
+ gridCols, gridRows, tileWidth, tileHeight);
+ return nullptr;
+ }
+ if (mWidth <= widthColsProduct && mHeight <= heightRowsProduct) {
ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
@@ -883,9 +888,18 @@
}
int32_t width, height, stride;
- CHECK(outputFormat->findInt32("width", &width));
- CHECK(outputFormat->findInt32("height", &height));
- CHECK(outputFormat->findInt32("stride", &stride));
+ if (outputFormat->findInt32("width", &width) == false) {
+ ALOGE("MediaImageDecoder::onOutputReceived:width is missing in outputFormat");
+ return ERROR_MALFORMED;
+ }
+ if (outputFormat->findInt32("height", &height) == false) {
+ ALOGE("MediaImageDecoder::onOutputReceived:height is missing in outputFormat");
+ return ERROR_MALFORMED;
+ }
+ if (outputFormat->findInt32("stride", &stride) == false) {
+ ALOGE("MediaImageDecoder::onOutputReceived:stride is missing in outputFormat");
+ return ERROR_MALFORMED;
+ }
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
diff --git a/media/libstagefright/MediaAppender.cpp b/media/libstagefright/MediaAppender.cpp
index 5d80b30..21dcfa1 100644
--- a/media/libstagefright/MediaAppender.cpp
+++ b/media/libstagefright/MediaAppender.cpp
@@ -75,10 +75,21 @@
return status;
}
- if (strcmp("MPEG4Extractor", mExtractor->getName()) == 0) {
+ sp<AMessage> fileFormat;
+ status = mExtractor->getFileFormat(&fileFormat);
+ if (status != OK) {
+ ALOGE("extractor_getFileFormat failed, status :%d", status);
+ return status;
+ }
+
+ AString fileMime;
+ fileFormat->findString("mime", &fileMime);
+ // only compare the end of the file MIME type to allow for vendor customized mime type
+ if (fileMime.endsWith("mp4")){
mFormat = MediaMuxer::OUTPUT_FORMAT_MPEG_4;
} else {
- ALOGE("Unsupported format, extractor name:%s", mExtractor->getName());
+ ALOGE("Unsupported file format, extractor name:%s, fileformat %s",
+ mExtractor->getName(), fileMime.c_str());
return ERROR_UNSUPPORTED;
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c03236a..1ea3f99 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1511,6 +1511,9 @@
uint32_t flags) {
sp<AMessage> msg = new AMessage(kWhatConfigure, this);
+ // TODO: validity check log-session-id: it should be a 32-hex-digit.
+ format->findString("log-session-id", &mLogSessionId);
+
if (mMetricsHandle != 0) {
int32_t profile = 0;
if (format->findInt32("profile", &profile)) {
@@ -1522,11 +1525,11 @@
}
mediametrics_setInt32(mMetricsHandle, kCodecEncoder,
(flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
+
+ mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
}
if (mIsVideo) {
- // TODO: validity check log-session-id: it should be a 32-hex-digit.
- format->findString("log-session-id", &mLogSessionId);
format->findInt32("width", &mVideoWidth);
format->findInt32("height", &mVideoHeight);
if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
@@ -1534,7 +1537,6 @@
}
if (mMetricsHandle != 0) {
- mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
mediametrics_setInt32(mMetricsHandle, kCodecWidth, mVideoWidth);
mediametrics_setInt32(mMetricsHandle, kCodecHeight, mVideoHeight);
mediametrics_setInt32(mMetricsHandle, kCodecRotation, mRotationDegrees);
@@ -3185,8 +3187,11 @@
mediametrics_setInt32(mMetricsHandle, kCodecSecure, 0);
}
- if (mIsVideo) {
- // audio codec is currently ignored.
+ MediaCodecInfo::Attributes attr = mCodecInfo
+ ? mCodecInfo->getAttributes()
+ : MediaCodecInfo::Attributes(0);
+ if (!(attr & MediaCodecInfo::kFlagIsSoftwareOnly)) {
+ // software codec is currently ignored.
mResourceManagerProxy->addResource(
MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
}
@@ -3502,6 +3507,20 @@
case kWhatDrainThisBuffer:
{
+ if ((mFlags & kFlagUseBlockModel) == 0 && mTunneled) {
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+ if (mFlags & kFlagIsAsync) {
+ // In asynchronous mode, output format change is processed immediately.
+ handleOutputFormatChangeIfNeeded(buffer);
+ } else {
+ postActivityNotificationIfPossible();
+ }
+ mBufferChannel->discardBuffer(buffer);
+ break;
+ }
+
/* size_t index = */updateBuffers(kPortIndexOutput, msg);
if (mState == FLUSHING
@@ -4775,8 +4794,8 @@
}
const CryptoPlugin::SubSample *subSamples;
size_t numSubSamples;
- const uint8_t *key;
- const uint8_t *iv;
+ const uint8_t *key = NULL;
+ const uint8_t *iv = NULL;
CryptoPlugin::Mode mode = CryptoPlugin::kMode_Unencrypted;
// We allow the simpler queueInputBuffer API to be used even in
@@ -4791,8 +4810,6 @@
subSamples = &ss;
numSubSamples = 1;
- key = NULL;
- iv = NULL;
pattern.mEncryptBlocks = 0;
pattern.mSkipBlocks = 0;
}
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 6243828..2ffe728 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -509,6 +509,29 @@
}
}
}
+
+ int32_t profile = -1;
+ if (format->findInt32("profile", &profile)) {
+ int32_t level = -1;
+ format->findInt32("level", &level);
+ Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+ capabilities->getSupportedProfileLevels(&profileLevels);
+ auto it = profileLevels.begin();
+ for (; it != profileLevels.end(); ++it) {
+ if (profile != it->mProfile) {
+ continue;
+ }
+ if (level > -1 && level > it->mLevel) {
+ continue;
+ }
+ break;
+ }
+
+ if (it == profileLevels.end()) {
+ ALOGV("Codec does not support profile %d with level %d", profile, level);
+ return false;
+ }
+ }
}
// haven't found a reason to discard this one
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0107c32..b07f8f7 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -943,10 +943,17 @@
sp<MediaCodecBuffer> outbuf;
status_t err = mEncoder->getOutputBuffer(index, &outbuf);
- if (err != OK || outbuf == NULL || outbuf->data() == NULL
- || outbuf->size() == 0) {
+ if (err != OK || outbuf == NULL || outbuf->data() == NULL) {
signalEOS();
break;
+ } else if (outbuf->size() == 0) {
+ // Zero length CSD buffers are not treated as an error
+ if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
+ mEncoder->releaseOutputBuffer(index);
+ } else {
+ signalEOS();
+ }
+ break;
}
MediaBufferBase *mbuf = new MediaBuffer(outbuf->size());
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index 0bc5976..0f5e95e 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -67,7 +67,11 @@
mFd = -1;
}
- free(mOs);
+ if (mOs != nullptr) {
+ ogg_stream_clear(mOs);
+ free(mOs);
+ mOs = nullptr;
+ }
}
status_t OggWriter::initCheck() const {
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index ee9016d..de91533 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -145,7 +145,19 @@
if (available < num) {
int32_t newcapacity = mCapacity + (num - available);
char * newbuffer = new char[newcapacity];
- memcpy(newbuffer, mCutBuffer, mCapacity);
+ if (mWriteHead < mReadHead) {
+ // data isn't continuous, need to memcpy twice
+ // to move previous data to new buffer.
+ size_t copyLeft = mCapacity - mReadHead;
+ memcpy(newbuffer, mCutBuffer + mReadHead, copyLeft);
+ memcpy(newbuffer + copyLeft, mCutBuffer, mWriteHead);
+ mReadHead = 0;
+ mWriteHead += copyLeft;
+ } else {
+ memcpy(newbuffer, mCutBuffer + mReadHead, mWriteHead - mReadHead);
+ mWriteHead -= mReadHead;
+ mReadHead = 0;
+ }
delete [] mCutBuffer;
mCapacity = newcapacity;
mCutBuffer = newbuffer;
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 7ce2968..0987a5b 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -20,13 +20,24 @@
},
{
"exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
+ "name": "CtsMediaAudioTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
},
// TODO: b/149314419
{
- "exclude-filter": "android.media.cts.AudioPlaybackCaptureTest"
+ "exclude-filter": "android.media.audio.cts.AudioPlaybackCaptureTest"
},
{
- "exclude-filter": "android.media.cts.AudioRecordTest"
+ "exclude-filter": "android.media.audio.cts.AudioRecordTest"
}
]
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4c18f87..a6df5bb 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1759,24 +1759,39 @@
if (mime.startsWith("video/") || mime.startsWith("image/")) {
int32_t width;
int32_t height;
- if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
- meta->setInt32(kKeyWidth, width);
- meta->setInt32(kKeyHeight, height);
- } else {
+ if (!msg->findInt32("width", &width) || !msg->findInt32("height", &height)) {
ALOGV("did not find width and/or height");
return BAD_VALUE;
}
+ if (width <= 0 || height <= 0) {
+ ALOGE("Invalid value of width: %d and/or height: %d", width, height);
+ return BAD_VALUE;
+ }
+ meta->setInt32(kKeyWidth, width);
+ meta->setInt32(kKeyHeight, height);
- int32_t sarWidth, sarHeight;
- if (msg->findInt32("sar-width", &sarWidth)
- && msg->findInt32("sar-height", &sarHeight)) {
+ int32_t sarWidth = -1, sarHeight = -1;
+ bool foundWidth, foundHeight;
+ foundWidth = msg->findInt32("sar-width", &sarWidth);
+ foundHeight = msg->findInt32("sar-height", &sarHeight);
+ if (foundWidth || foundHeight) {
+ if (sarWidth <= 0 || sarHeight <= 0) {
+ ALOGE("Invalid value of sarWidth: %d and/or sarHeight: %d", sarWidth, sarHeight);
+ return BAD_VALUE;
+ }
meta->setInt32(kKeySARWidth, sarWidth);
meta->setInt32(kKeySARHeight, sarHeight);
}
- int32_t displayWidth, displayHeight;
- if (msg->findInt32("display-width", &displayWidth)
- && msg->findInt32("display-height", &displayHeight)) {
+ int32_t displayWidth = -1, displayHeight = -1;
+ foundWidth = msg->findInt32("display-width", &displayWidth);
+ foundHeight = msg->findInt32("display-height", &displayHeight);
+ if (foundWidth || foundHeight) {
+ if (displayWidth <= 0 || displayHeight <= 0) {
+ ALOGE("Invalid value of displayWidth: %d and/or displayHeight: %d",
+ displayWidth, displayHeight);
+ return BAD_VALUE;
+ }
meta->setInt32(kKeyDisplayWidth, displayWidth);
meta->setInt32(kKeyDisplayHeight, displayHeight);
}
@@ -1786,17 +1801,29 @@
if (msg->findInt32("is-default", &isPrimary) && isPrimary) {
meta->setInt32(kKeyTrackIsDefault, 1);
}
- int32_t tileWidth, tileHeight, gridRows, gridCols;
- if (msg->findInt32("tile-width", &tileWidth)) {
+ int32_t tileWidth = -1, tileHeight = -1;
+ foundWidth = msg->findInt32("tile-width", &tileWidth);
+ foundHeight = msg->findInt32("tile-height", &tileHeight);
+ if (foundWidth || foundHeight) {
+ if (tileWidth <= 0 || tileHeight <= 0) {
+ ALOGE("Invalid value of tileWidth: %d and/or tileHeight: %d",
+ tileWidth, tileHeight);
+ return BAD_VALUE;
+ }
meta->setInt32(kKeyTileWidth, tileWidth);
- }
- if (msg->findInt32("tile-height", &tileHeight)) {
meta->setInt32(kKeyTileHeight, tileHeight);
}
- if (msg->findInt32("grid-rows", &gridRows)) {
+ int32_t gridRows = -1, gridCols = -1;
+ bool foundRows, foundCols;
+ foundRows = msg->findInt32("grid-rows", &gridRows);
+ foundCols = msg->findInt32("grid-cols", &gridCols);
+ if (foundRows || foundCols) {
+ if (gridRows <= 0 || gridCols <= 0) {
+ ALOGE("Invalid value of gridRows: %d and/or gridCols: %d",
+ gridRows, gridCols);
+ return BAD_VALUE;
+ }
meta->setInt32(kKeyGridRows, gridRows);
- }
- if (msg->findInt32("grid-cols", &gridCols)) {
meta->setInt32(kKeyGridCols, gridCols);
}
}
@@ -1812,6 +1839,14 @@
&cropTop,
&cropRight,
&cropBottom)) {
+ if (cropLeft < 0 || cropLeft > cropRight || cropRight >= width) {
+ ALOGE("Invalid value of cropLeft: %d and/or cropRight: %d", cropLeft, cropRight);
+ return BAD_VALUE;
+ }
+ if (cropTop < 0 || cropTop > cropBottom || cropBottom >= height) {
+ ALOGE("Invalid value of cropTop: %d and/or cropBottom: %d", cropTop, cropBottom);
+ return BAD_VALUE;
+ }
meta->setRect(kKeyCropRect, cropLeft, cropTop, cropRight, cropBottom);
}
@@ -1855,9 +1890,16 @@
ALOGV("did not find channel-count and/or sample-rate");
return BAD_VALUE;
}
+ // channel count can be zero in some cases like mpeg h
+ if (sampleRate <= 0 || numChannels < 0) {
+ ALOGE("Invalid value of channel-count: %d and/or sample-rate: %d",
+ numChannels, sampleRate);
+ return BAD_VALUE;
+ }
meta->setInt32(kKeyChannelCount, numChannels);
meta->setInt32(kKeySampleRate, sampleRate);
int32_t bitsPerSample;
+ // TODO:(b/204430952) add appropriate bound check for bitsPerSample
if (msg->findInt32("bits-per-sample", &bitsPerSample)) {
meta->setInt32(kKeyBitsPerSample, bitsPerSample);
}
diff --git a/media/libstagefright/colorconversion/Android.bp b/media/libstagefright/colorconversion/Android.bp
index 06cebd3..7ff9b10 100644
--- a/media/libstagefright/colorconversion/Android.bp
+++ b/media/libstagefright/colorconversion/Android.bp
@@ -25,10 +25,6 @@
"SoftwareRenderer.cpp",
],
- include_dirs: [
- "frameworks/native/include/media/openmax",
- ],
-
shared_libs: [
"libui",
"libnativewindow",
@@ -37,6 +33,7 @@
header_libs: [
"libstagefright_headers",
"libstagefright_foundation_headers",
+ "media_plugin_headers",
],
static_libs: ["libyuv_static"],
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index acc9e87..b46a271 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -22,10 +22,6 @@
"ZeroFilter.cpp",
],
- include_dirs: [
- "frameworks/native/include/media/openmax",
- ],
-
cflags: [
"-Wno-multichar",
"-Werror",
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index dd2c66f..5f86c22 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -33,18 +33,13 @@
},
host_supported: true,
double_loadable: true,
- include_dirs: [
- "frameworks/av/include",
- "frameworks/native/include",
- "frameworks/native/libs/arect/include",
- "frameworks/native/libs/nativebase/include",
- ],
local_include_dirs: [
"include/media/stagefright/foundation",
],
header_libs: [
+ "av-headers",
// this is only needed for the vendor variant that removes libbinder, but vendor
// target below does not allow adding header_libs.
"libbinder_headers",
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index ada5d81..5c4ec17 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -60,12 +60,66 @@
const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1 = "audio/mha1";
const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1 = "audio/mhm1";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3 = "audio/mhm1.03";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4 = "audio/mhm1.04";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3 = "audio/mhm1.0d";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4 = "audio/mhm1.0e";
const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM = "audio/x-adpcm-ms";
const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM = "audio/x-adpcm-dvi-ima";
-
+const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/vnd.dts";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_HD = "audio/vnd.dts.hd";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD = "audio/vnd.dts.uhd";
+const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCB = "audio/evrcb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCWB = "audio/evrcwb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCNW = "audio/evrcnw";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb+";
+const char *MEDIA_MIMETYPE_AUDIO_APTX = "audio/aptx";
+const char *MEDIA_MIMETYPE_AUDIO_DRA = "audio/vnd.dra";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT = "audio/vnd.dolby.mat";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0 = "audio/vnd.dolby.mat.1.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0 = "audio/vnd.dolby.mat.2.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1 = "audio/vnd.dolby.mat.2.1";
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD = "audio/vnd.dolby.mlp";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4 = "audio/mp4a.40";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN = "audio/mp4a.40.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LC = "audio/mp4a.40.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR = "audio/mp4a.40.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP = "audio/mp4a.40.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1 = "audio/mp4a.40.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE = "audio/mp4a.40.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC = "audio/mp4a.40.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LD = "audio/mp4a.40.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2 = "audio/mp4a.40.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD = "audio/mp4a.40.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE = "audio/mp4a.40.42";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF = "audio/aac-adif";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN = "audio/aac-adts.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC = "audio/aac-adts.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR = "audio/aac-adts.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP = "audio/aac-adts.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1 = "audio/aac-adts.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE = "audio/aac-adts.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC = "audio/aac-adts.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD = "audio/aac-adts.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2 = "audio/aac-adts.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD = "audio/aac-adts.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE = "audio/aac-adts.42";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC = "audio/mp4a-latm.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1 = "audio/mp4a-latm.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2 = "audio/mp4a-latm.29";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC61937 = "audio/x-iec61937";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC60958 = "audio/x-iec60958";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index f5cecef..fb8c299 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -62,12 +62,59 @@
extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4;
extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
extern const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM;
extern const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM;
-
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCWB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCNW;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_APTX;
+extern const char *MEDIA_MIMETYPE_AUDIO_DRA;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC61937;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC60958;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/media/libstagefright/foundation/tests/AVCUtils/Android.bp b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
index 594da56..ee7db21 100644
--- a/media/libstagefright/foundation/tests/AVCUtils/Android.bp
+++ b/media/libstagefright/foundation/tests/AVCUtils/Android.bp
@@ -43,10 +43,6 @@
"libstagefright_foundation",
],
- include_dirs: [
- "frameworks/av/media/libstagefright/foundation",
- ],
-
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libstagefright/foundation/tests/Android.bp b/media/libstagefright/foundation/tests/Android.bp
index e50742e..e72ce43 100644
--- a/media/libstagefright/foundation/tests/Android.bp
+++ b/media/libstagefright/foundation/tests/Android.bp
@@ -18,10 +18,6 @@
"-Wall",
],
- include_dirs: [
- "frameworks/av/include",
- ],
-
shared_libs: [
"liblog",
"libstagefright_foundation",
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 0b0acbf..7acf735 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -29,7 +29,6 @@
],
include_dirs: [
- "frameworks/av/media/libstagefright",
"frameworks/native/include/media/openmax",
],
@@ -65,6 +64,8 @@
header_libs: [
"libbase_headers",
+ "libstagefright_headers",
+ "libstagefright_httplive_headers",
],
static_libs: [
@@ -74,3 +75,8 @@
],
}
+
+cc_library_headers {
+ name: "libstagefright_httplive_headers",
+ export_include_dirs: ["."],
+}
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 3bad015..0d7cadd 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -23,7 +23,7 @@
#include "M3UParser.h"
#include "PlaylistFetcher.h"
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <AnotherPacketSource.h>
#include <cutils/properties.h>
#include <media/MediaHTTPService.h>
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 7a6d487..ceea41d 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -24,7 +24,7 @@
#include <utils/String8.h>
-#include "mpeg2ts/ATSParser.h"
+#include <ATSParser.h>
namespace android {
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index b23aa8a..907b326 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -24,9 +24,9 @@
#include "HTTPDownloader.h"
#include "LiveSession.h"
#include "M3UParser.h"
-#include "include/ID3.h"
-#include "mpeg2ts/AnotherPacketSource.h"
-#include "mpeg2ts/HlsSampleDecryptor.h"
+#include <ID3.h>
+#include <AnotherPacketSource.h>
+#include <HlsSampleDecryptor.h>
#include <datasource/DataURISource.h>
#include <media/stagefright/foundation/ABitReader.h>
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 5d3f9c1..2e28164 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -21,7 +21,7 @@
#include <media/stagefright/foundation/AHandler.h>
#include <openssl/aes.h>
-#include "mpeg2ts/ATSParser.h"
+#include <ATSParser.h>
#include "LiveSession.h"
namespace android {
diff --git a/media/libstagefright/httplive/fuzzer/Android.bp b/media/libstagefright/httplive/fuzzer/Android.bp
new file mode 100644
index 0000000..14097b0
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/Android.bp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_media_libstagefright_httplive_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: [
+ "frameworks_av_media_libstagefright_httplive_license",
+ ],
+}
+
+cc_fuzz {
+ name: "httplive_fuzzer",
+ srcs: [
+ "httplive_fuzzer.cpp",
+ ],
+ static_libs: [
+ "libstagefright_httplive",
+ "libstagefright_id3",
+ "libstagefright_metadatautils",
+ "libstagefright_mpeg2support",
+ "liblog",
+ "libcutils",
+ "libdatasource",
+ "libmedia",
+ "libstagefright",
+ "libutils",
+ ],
+ header_libs: [
+ "libbase_headers",
+ "libstagefright_foundation_headers",
+ "libstagefright_headers",
+ "libstagefright_httplive_headers",
+ ],
+ shared_libs: [
+ "libcrypto",
+ "libstagefright_foundation",
+ "libhidlbase",
+ "libhidlmemory",
+ "android.hidl.allocator@1.0",
+ ],
+ corpus: ["corpus/*"],
+ dictionary: "httplive_fuzzer.dict",
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
diff --git a/media/libstagefright/httplive/fuzzer/README.md b/media/libstagefright/httplive/fuzzer/README.md
new file mode 100644
index 0000000..3a64ea4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/README.md
@@ -0,0 +1,56 @@
+# Fuzzer for libstagefright_httplive
+
+## Plugin Design Considerations
+The fuzzer plugin for libstagefright_httplive is designed based on the understanding of the library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data.Also, several .m3u8 files are hand-crafted and added to the corpus directory to increase the code coverage. This ensures more code paths are reached by the fuzzer.
+
+libstagefright_httplive supports the following parameters:
+1. Final Result (parameter name: `finalResult`)
+2. Flags (parameter name: `flags`)
+3. Time Us (parameter name: `timeUs`)
+4. Track Index (parameter name: `trackIndex`)
+5. Index (parameter name: `index`)
+6. Select (parameter name: `select`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `finalResult` | `-34` to `-1` | Value obtained from FuzzedDataProvider|
+| `flags` | `0` to `1` | Value obtained from FuzzedDataProvider|
+| `timeUs` | `0` to `10000000` | Value obtained from FuzzedDataProvider|
+| `trackIndex` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `index` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `select` | `True` to `False` | Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the httplive module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build httplive_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) httplive_fuzzer
+```
+#### Steps to run
+To run on device
+```
+ $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/lib /data/fuzz/$(TARGET_ARCH)/lib
+ $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/httplive_fuzzer /data/fuzz/$(TARGET_ARCH)/httplive_fuzzer
+ $ adb shell /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/httplive_fuzzer /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/corpus
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libstagefright/httplive/fuzzer/corpus/crypt.key b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
new file mode 100644
index 0000000..f9d5d7f
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
@@ -0,0 +1,2 @@
+Û
+ÏüÐ5Ð_xïHÎ3
diff --git a/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
new file mode 100644
index 0000000..32b0eac
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-ALLOW-CACHE:YES
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-VERSION:3
+#EXT-X-MEDIA-SEQUENCE:1
+#EXT-X-KEY:METHOD=AES-128,URI="../../fuzz/arm64/httplive_fuzzer/corpus/crypt.key"
+#EXTINF:10.000,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5.092,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
new file mode 100644
index 0000000..9338e04
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
@@ -0,0 +1,8 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-MEDIA-SEQUENCE:0
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
new file mode 100644
index 0000000..e1eff58
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
new file mode 100644
index 0000000..37a0189
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
@@ -0,0 +1,6 @@
+#EXTM3U
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=165340,RESOLUTION=256x144,CODECS="mp4a.40.5,avc1.42c00b"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index.m3u8
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=344388,RESOLUTION=426x240,CODECS="mp4a.40.5,avc1.4d4015"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
new file mode 100644
index 0000000..1b7f489
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
new file mode 100644
index 0000000..89ba37c
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
@@ -0,0 +1,15 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
new file mode 100644
index 0000000..2120de4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:11
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-MEDIA-SEQUENCE:0
+#EXT-X-VERSION:4
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:10@0
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:20@10
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:80
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
new file mode 100644
index 0000000..588368a
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0x30303030303030303030303030303030
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
new file mode 100644
index 0000000..b09948e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
@@ -0,0 +1,46 @@
+#EXTM3U
+#EXT-X-VERSION:4
+## Created with Unified Streaming Platform (version=1.11.3-24438)
+#EXT-X-SESSION-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key"
+
+# AUDIO groups
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-64",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-128",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+
+# SUBTITLES groups
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_eng=1000.m3u8"
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="ru",NAME="Russian",AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_rus=1000.m3u8"
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=494000,CODECS="mp4a.40.2,avc1.42C00D",RESOLUTION=224x100,FRAME-RATE=24,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng=401000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=933000,CODECS="mp4a.40.2,avc1.42C016",RESOLUTION=448x200,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=751000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1198000,CODECS="mp4a.40.2,avc1.4D401F",RESOLUTION=784x350,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1001000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1501000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2469000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=2200000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=1025000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng_1=902000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1368000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=2576x1150,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1161000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1815000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=3360x1500,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1583000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=69000,CODECS="mp4a.40.2",AUDIO="audio-aacl-64",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=64008.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=137000,CODECS="mp4a.40.2",AUDIO="audio-aacl-128",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=128002.m3u8
+
+# keyframes
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=54000,CODECS="avc1.42C00D",RESOLUTION=224x100,URI="keyframes/tears-of-steel-aes-video_eng=401000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=100000,CODECS="avc1.42C016",RESOLUTION=448x200,URI="keyframes/tears-of-steel-aes-video_eng=751000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=133000,CODECS="avc1.4D401F",RESOLUTION=784x350,URI="keyframes/tears-of-steel-aes-video_eng=1001000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=120000,CODECS="hvc1.1.6.L150.90",RESOLUTION=1680x750,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=902000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=154000,CODECS="hvc1.1.6.L150.90",RESOLUTION=2576x1150,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1161000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=210000,CODECS="hvc1.1.6.L150.90",RESOLUTION=3360x1500,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1583000.m3u8"
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
new file mode 100644
index 0000000..353d589
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:5
+
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="English stereo",LANGUAGE="en",AUTOSELECT=YES,URI="../../fuzz/arm64/httplive_fuzzer/index1.m3u8"
+
+#EXT-X-STREAM-INF:BANDWIDTH=628000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=320x180,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=928000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=480x270,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index2.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=640x360,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index3.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2528000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=960x540,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
new file mode 100644
index 0000000..eb88422
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
@@ -0,0 +1,17 @@
+#EXTM3U
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="eng",NAME="English",AUTOSELECT=YES,DEFAULT=YES,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="fre",NAME="Français",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="sp",NAME="Espanol",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
new file mode 100644
index 0000000..aa777b3
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <LiveDataSource.h>
+#include <LiveSession.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+#include <media/mediaplayer_common.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/ALooperRoster.h>
+#include <string>
+#include <utils/Log.h>
+
+using namespace std;
+using namespace android;
+
+constexpr char kFileNamePrefix[] = "/data/local/tmp/httplive-";
+constexpr char kFileNameSuffix[] = ".m3u8";
+constexpr char kFileUrlPrefix[] = "file://";
+constexpr int64_t kOffSet = 0;
+constexpr int32_t kReadyMarkMs = 5000;
+constexpr int32_t kPrepareMarkMs = 1500;
+constexpr int32_t kErrorNoMax = -1;
+constexpr int32_t kErrorNoMin = -34;
+constexpr int32_t kMaxTimeUs = 1000;
+constexpr int32_t kRandomStringLength = 64;
+constexpr int32_t kRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+
+constexpr LiveSession::StreamType kValidStreamType[] = {
+ LiveSession::STREAMTYPE_AUDIO, LiveSession::STREAMTYPE_VIDEO,
+ LiveSession::STREAMTYPE_SUBTITLES, LiveSession::STREAMTYPE_METADATA};
+
+constexpr MediaSource::ReadOptions::SeekMode kValidSeekMode[] = {
+ MediaSource::ReadOptions::SeekMode::SEEK_PREVIOUS_SYNC,
+ MediaSource::ReadOptions::SeekMode::SEEK_NEXT_SYNC,
+ MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST_SYNC,
+ MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST,
+ MediaSource::ReadOptions::SeekMode::SEEK_FRAME_INDEX};
+
+constexpr media_track_type kValidMediaTrackType[] = {
+ MEDIA_TRACK_TYPE_UNKNOWN, MEDIA_TRACK_TYPE_VIDEO,
+ MEDIA_TRACK_TYPE_AUDIO, MEDIA_TRACK_TYPE_TIMEDTEXT,
+ MEDIA_TRACK_TYPE_SUBTITLE, MEDIA_TRACK_TYPE_METADATA};
+
+struct TestAHandler : public AHandler {
+public:
+ TestAHandler(std::function<void()> signalEosFunction)
+ : mSignalEosFunction(signalEosFunction) {}
+ virtual ~TestAHandler() {}
+
+protected:
+ void onMessageReceived(const sp<AMessage> &msg) override {
+ int32_t what = -1;
+ msg->findInt32("what", &what);
+ switch (what) {
+ case LiveSession::kWhatError:
+ case LiveSession::kWhatPrepared:
+ case LiveSession::kWhatPreparationFailed: {
+ mSignalEosFunction();
+ break;
+ }
+ }
+ return;
+ }
+
+private:
+ std::function<void()> mSignalEosFunction;
+};
+
+struct TestMediaHTTPConnection : public MediaHTTPConnection {
+public:
+ TestMediaHTTPConnection() {}
+ virtual ~TestMediaHTTPConnection() {}
+
+ virtual bool connect(const char * /*uri*/,
+ const KeyedVector<String8, String8> * /*headers*/) {
+ return true;
+ }
+
+ virtual void disconnect() { return; }
+
+ virtual ssize_t readAt(off64_t /*offset*/, void * /*data*/, size_t size) {
+ return size;
+ }
+
+ virtual off64_t getSize() { return 0; }
+ virtual status_t getMIMEType(String8 * /*mimeType*/) { return NO_ERROR; }
+ virtual status_t getUri(String8 * /*uri*/) { return NO_ERROR; }
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPConnection);
+};
+
+struct TestMediaHTTPService : public MediaHTTPService {
+public:
+ TestMediaHTTPService() {}
+ ~TestMediaHTTPService(){};
+
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() {
+ mediaHTTPConnection = sp<TestMediaHTTPConnection>::make();
+ return mediaHTTPConnection;
+ }
+
+private:
+ sp<TestMediaHTTPConnection> mediaHTTPConnection = nullptr;
+ DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPService);
+};
+
+class HttpLiveFuzzer {
+public:
+ void process(const uint8_t *data, size_t size);
+ void deInitLiveSession();
+ ~HttpLiveFuzzer() { deInitLiveSession(); }
+
+private:
+ void invokeLiveDataSource();
+ void createM3U8File(const uint8_t *data, size_t size);
+ void initLiveDataSource();
+ void invokeLiveSession();
+ void initLiveSession();
+ void invokeDequeueAccessUnit();
+ void invokeConnectAsync();
+ void invokeSeekTo();
+ void invokeGetConfig();
+ void signalEos();
+ string generateFileName();
+ sp<LiveDataSource> mLiveDataSource = nullptr;
+ sp<LiveSession> mLiveSession = nullptr;
+ sp<ALooper> mLiveLooper = nullptr;
+ sp<TestMediaHTTPService> httpService = nullptr;
+ sp<TestAHandler> mHandler = nullptr;
+ FuzzedDataProvider *mFDP = nullptr;
+ bool mEosReached = false;
+ std::mutex mDownloadCompleteMutex;
+ std::condition_variable mConditionalVariable;
+};
+
+string HttpLiveFuzzer::generateFileName() {
+ return kFileNamePrefix + to_string(getpid()) + kFileNameSuffix;
+}
+
+void HttpLiveFuzzer::createM3U8File(const uint8_t *data, size_t size) {
+ ofstream m3u8File;
+ string currentFileName = generateFileName();
+ m3u8File.open(currentFileName, ios::out | ios::binary);
+ m3u8File.write((char *)data, size);
+ m3u8File.close();
+}
+
+void HttpLiveFuzzer::initLiveDataSource() {
+ mLiveDataSource = sp<LiveDataSource>::make();
+}
+
+void HttpLiveFuzzer::invokeLiveDataSource() {
+ initLiveDataSource();
+ size_t size = mFDP->ConsumeIntegralInRange<size_t>(kRangeMin, kRangeMax);
+ sp<ABuffer> buffer = new ABuffer(size);
+ mLiveDataSource->queueBuffer(buffer);
+ uint8_t *data = new uint8_t[size];
+ mLiveDataSource->readAtNonBlocking(kOffSet, data, size);
+ int32_t finalResult = mFDP->ConsumeIntegralInRange(kErrorNoMin, kErrorNoMax);
+ mLiveDataSource->queueEOS(finalResult);
+ mLiveDataSource->reset();
+ mLiveDataSource->countQueuedBuffers();
+ mLiveDataSource->initCheck();
+ delete[] data;
+}
+
+void HttpLiveFuzzer::initLiveSession() {
+ ALooperRoster looperRoster;
+ mHandler =
+ sp<TestAHandler>::make(std::bind(&HttpLiveFuzzer::signalEos, this));
+ mLiveLooper = sp<ALooper>::make();
+ mLiveLooper->setName("http live");
+ mLiveLooper->start();
+ sp<AMessage> notify = sp<AMessage>::make(0, mHandler);
+ httpService = new TestMediaHTTPService();
+ uint32_t flags = mFDP->ConsumeIntegral<uint32_t>();
+ mLiveSession = sp<LiveSession>::make(notify, flags, httpService);
+ mLiveLooper->registerHandler(mLiveSession);
+ looperRoster.registerHandler(mLiveLooper, mHandler);
+}
+
+void HttpLiveFuzzer::invokeDequeueAccessUnit() {
+ LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+ sp<ABuffer> buffer;
+ mLiveSession->dequeueAccessUnit(stream, &buffer);
+}
+
+void HttpLiveFuzzer::invokeSeekTo() {
+ int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(0, kMaxTimeUs);
+ MediaSource::ReadOptions::SeekMode mode =
+ mFDP->PickValueInArray(kValidSeekMode);
+ mLiveSession->seekTo(timeUs, mode);
+}
+
+void HttpLiveFuzzer::invokeGetConfig() {
+ mLiveSession->getTrackCount();
+ size_t trackIndex = mFDP->ConsumeIntegral<size_t>();
+ mLiveSession->getTrackInfo(trackIndex);
+ media_track_type type = mFDP->PickValueInArray(kValidMediaTrackType);
+ mLiveSession->getSelectedTrack(type);
+ sp<MetaData> meta;
+ LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+ mLiveSession->getStreamFormatMeta(stream, &meta);
+ mLiveSession->getKeyForStream(stream);
+ if (stream != LiveSession::STREAMTYPE_SUBTITLES) {
+ mLiveSession->getSourceTypeForStream(stream);
+ }
+}
+
+void HttpLiveFuzzer::invokeConnectAsync() {
+ string currentFileName = generateFileName();
+ string url = kFileUrlPrefix + currentFileName;
+ string str_1 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+ string str_2 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+
+ KeyedVector<String8, String8> headers;
+ headers.add(String8(str_1.c_str()), String8(str_2.c_str()));
+ mLiveSession->connectAsync(url.c_str(), &headers);
+}
+
+void HttpLiveFuzzer::invokeLiveSession() {
+ initLiveSession();
+ BufferingSettings bufferingSettings;
+ bufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+ bufferingSettings.mResumePlaybackMarkMs = kReadyMarkMs;
+ mLiveSession->setBufferingSettings(bufferingSettings);
+ invokeConnectAsync();
+ std::unique_lock waitForDownloadComplete(mDownloadCompleteMutex);
+ mConditionalVariable.wait(waitForDownloadComplete,
+ [this] { return mEosReached; });
+ if (mLiveSession->isSeekable()) {
+ invokeSeekTo();
+ }
+ invokeDequeueAccessUnit();
+ size_t index = mFDP->ConsumeIntegral<size_t>();
+ bool select = mFDP->ConsumeBool();
+ mLiveSession->selectTrack(index, select);
+ mLiveSession->hasDynamicDuration();
+ int64_t firstTimeUs =
+ mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+ int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+ int32_t discontinuitySeq = mFDP->ConsumeIntegral<int32_t>();
+ mLiveSession->calculateMediaTimeUs(firstTimeUs, timeUs, discontinuitySeq);
+ invokeGetConfig();
+}
+
+void HttpLiveFuzzer::process(const uint8_t *data, size_t size) {
+ mFDP = new FuzzedDataProvider(data, size);
+ createM3U8File(data, size);
+ invokeLiveDataSource();
+ invokeLiveSession();
+ delete mFDP;
+}
+
+void HttpLiveFuzzer::deInitLiveSession() {
+ if (mLiveSession != nullptr) {
+ mLiveSession->disconnect();
+ mLiveLooper->unregisterHandler(mLiveSession->id());
+ mLiveLooper->stop();
+ }
+ mLiveSession.clear();
+ mLiveLooper.clear();
+}
+
+void HttpLiveFuzzer::signalEos() {
+ mEosReached = true;
+ {
+ std::lock_guard<std::mutex> waitForDownloadComplete(mDownloadCompleteMutex);
+ }
+ mConditionalVariable.notify_one();
+ return;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ HttpLiveFuzzer httpliveFuzzer;
+ httpliveFuzzer.process(data, size);
+ return 0;
+}
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
new file mode 100644
index 0000000..703cc7e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
@@ -0,0 +1,15 @@
+#m3u8-Tags
+kw1="#EXTM3U"
+kw2="#EXT-X-VERSION:"
+kw3="#EXT-X-TARGETDURATION:"
+kw4="#EXT-X-PLAYLIST-TYPE:"
+kw5="#EXTINF:"
+kw6="#EXT-X-ENDLIST"
+kw7="#EXT-X-MEDIA-SEQUENCE:"
+kw8="#EXT-X-KEY:METHOD=NONE"
+kw9="#EXT-X-DISCONTINUITY:"
+kw10="#EXT-X-DISCONTINUITY-SEQUENCE:0"
+kw11="#EXT-X-STREAM-INF:BANDWIDTH="
+kw12="#EXT-X-STREAM-INF:CODECS="
+kw13="#EXT-X-BYTERANGE:"
+kw14="#EXT-X-MEDIA"
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index c84cc10..632b32c 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -71,6 +71,9 @@
virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface);
virtual void initiateStart();
virtual void initiateShutdown(bool keepComponentAllocated = false);
+ virtual status_t querySupportedParameters(std::vector<std::string> *names) override;
+ virtual status_t subscribeToParameters(const std::vector<std::string> &names) override;
+ virtual status_t unsubscribeFromParameters(const std::vector<std::string> &names) override;
status_t queryCapabilities(
const char* owner, const char* name,
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 4237e8c..3a01925 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -364,7 +364,7 @@
inline static const char *asString_AV1Profile(int32_t i, const char *def = "??") {
switch (i) {
case AV1ProfileMain8: return "Main8";
- case AV1ProfileMain10: return "Main10HDR";
+ case AV1ProfileMain10: return "Main10";
case AV1ProfileMain10HDR10: return "Main10HDR10";
case AV1ProfileMain10HDR10Plus: return "Main10HDRPlus";
default: return def;
diff --git a/media/libstagefright/mpeg2ts/test/Android.bp b/media/libstagefright/mpeg2ts/test/Android.bp
index 464b039..34a8d3e 100644
--- a/media/libstagefright/mpeg2ts/test/Android.bp
+++ b/media/libstagefright/mpeg2ts/test/Android.bp
@@ -57,11 +57,6 @@
"libstagefright_mpeg2support",
],
- include_dirs: [
- "frameworks/av/media/extractors/",
- "frameworks/av/media/libstagefright/",
- ],
-
header_libs: [
"libmedia_headers",
"libaudioclient_headers",
diff --git a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
index 79c233b..7f25d78 100644
--- a/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
+++ b/media/libstagefright/mpeg2ts/test/Mpeg2tsUnitTest.cpp
@@ -27,8 +27,8 @@
#include <media/stagefright/MetaDataBase.h>
#include <media/stagefright/foundation/AUtils.h>
-#include "mpeg2ts/ATSParser.h"
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <ATSParser.h>
+#include <AnotherPacketSource.h>
#include "Mpeg2tsUnitTestEnvironment.h"
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 3f4d662..30cdbc9 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -295,6 +295,10 @@
}
void AAVCAssembler::checkSpsUpdated(const sp<ABuffer> &buffer) {
+ if (buffer->size() == 0) {
+ android_errorWriteLog(0x534e4554, "204077881");
+ return;
+ }
const uint8_t *data = buffer->data();
unsigned nalType = data[0] & 0x1f;
if (nalType == 0x7) {
diff --git a/media/libstagefright/rtsp/JitterCalculator.cpp b/media/libstagefright/rtsp/JitterCalculator.cpp
index 93b5a83..7e60be2 100644
--- a/media/libstagefright/rtsp/JitterCalculator.cpp
+++ b/media/libstagefright/rtsp/JitterCalculator.cpp
@@ -38,14 +38,13 @@
mInterArrivalJitterUs = inter;
}
-void JitterCalc::putBaseData(int64_t rtpTime, int64_t arrivalTimeUs) {
- // A RTP time wraps around after UINT32_MAX. We must consider this case.
- const int64_t UINT32_MSB = 0x80000000;
- int64_t overflowMask = (mFirstTimeStamp & UINT32_MSB & ~rtpTime) << 1;
- int64_t tempRtpTime = overflowMask | rtpTime;
+void JitterCalc::putBaseData(uint32_t rtpTime, int64_t arrivalTimeUs) {
+ // A RTP time wraps around after UINT32_MAX. Overflow can present.
+ uint32_t diff = 0;
+ __builtin_usub_overflow(rtpTime, mFirstTimeStamp, &diff);
// Base jitter implementation can be various
- int64_t scheduledTimeUs = (tempRtpTime - (int64_t)mFirstTimeStamp) * 1000000ll / mClockRate;
+ int64_t scheduledTimeUs = ((int32_t)diff) * 1000000ll / mClockRate;
int64_t elapsedTimeUs = arrivalTimeUs - mFirstArrivalTimeUs;
int64_t correctionTimeUs = elapsedTimeUs - scheduledTimeUs; // additional propagation delay;
mBaseJitterUs = (mBaseJitterUs * 15 + correctionTimeUs) / 16;
@@ -53,18 +52,13 @@
(long long)mBaseJitterUs, (long long)correctionTimeUs);
}
-void JitterCalc::putInterArrivalData(int64_t rtpTime, int64_t arrivalTimeUs) {
- const int64_t UINT32_MSB = 0x80000000;
- int64_t tempRtpTime = rtpTime;
- int64_t tempLastTimeStamp = mLastTimeStamp;
-
- // A RTP time wraps around after UINT32_MAX. We must consider this case.
- int64_t overflowMask = (mLastTimeStamp ^ rtpTime) & UINT32_MSB;
- tempRtpTime |= ((overflowMask & ~rtpTime) << 1);
- tempLastTimeStamp |= ((overflowMask & ~mLastTimeStamp) << 1);
+void JitterCalc::putInterArrivalData(uint32_t rtpTime, int64_t arrivalTimeUs) {
+ // A RTP time wraps around after UINT32_MAX. Overflow can present.
+ uint32_t diff = 0;
+ __builtin_usub_overflow(rtpTime, mLastTimeStamp, &diff);
// 6.4.1 of RFC3550 defines this interarrival jitter value.
- int64_t diffTimeStampUs = abs(tempRtpTime - tempLastTimeStamp) * 1000000ll / mClockRate;
+ int64_t diffTimeStampUs = abs((int32_t)diff) * 1000000ll / mClockRate;
int64_t diffArrivalUs = arrivalTimeUs - mLastArrivalTimeUs; // Can't be minus
ALOGV("diffTimeStampUs %lld \t\t diffArrivalUs %lld",
(long long)diffTimeStampUs, (long long)diffArrivalUs);
@@ -72,7 +66,7 @@
int64_t varianceUs = diffArrivalUs - diffTimeStampUs;
mInterArrivalJitterUs = (mInterArrivalJitterUs * 15 + abs(varianceUs)) / 16;
- mLastTimeStamp = (uint32_t)rtpTime;
+ mLastTimeStamp = rtpTime;
mLastArrivalTimeUs = arrivalTimeUs;
}
diff --git a/media/libstagefright/rtsp/JitterCalculator.h b/media/libstagefright/rtsp/JitterCalculator.h
index ff36f1f..4f3b761 100644
--- a/media/libstagefright/rtsp/JitterCalculator.h
+++ b/media/libstagefright/rtsp/JitterCalculator.h
@@ -40,8 +40,8 @@
JitterCalc(int32_t clockRate);
void init(uint32_t rtpTime, int64_t arrivalTimeUs, int32_t base, int32_t inter);
- void putInterArrivalData(int64_t rtpTime, int64_t arrivalTime);
- void putBaseData(int64_t rtpTime, int64_t arrivalTimeUs);
+ void putInterArrivalData(uint32_t rtpTime, int64_t arrivalTime);
+ void putBaseData(uint32_t rtpTime, int64_t arrivalTimeUs);
int32_t getBaseJitterMs();
int32_t getInterArrivalJitterMs();
};
diff --git a/media/libstagefright/rtsp/MyTransmitter.h b/media/libstagefright/rtsp/MyTransmitter.h
deleted file mode 100644
index bf44aff..0000000
--- a/media/libstagefright/rtsp/MyTransmitter.h
+++ /dev/null
@@ -1,984 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MY_TRANSMITTER_H_
-
-#define MY_TRANSMITTER_H_
-
-#include "ARTPConnection.h"
-
-#include <arpa/inet.h>
-#include <sys/socket.h>
-
-#include <openssl/md5.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/base64.h>
-#include <media/stagefright/foundation/hexdump.h>
-
-#ifdef ANDROID
-#include "VideoSource.h"
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaCodecSource.h>
-#endif
-
-namespace android {
-
-#define TRACK_SUFFIX "trackid=1"
-#define PT 96
-#define PT_STR "96"
-
-#define USERNAME "bcast"
-#define PASSWORD "test"
-
-static int uniformRand(int limit) {
- return ((double)rand() * limit) / RAND_MAX;
-}
-
-static bool GetAttribute(const char *s, const char *key, AString *value) {
- value->clear();
-
- size_t keyLen = strlen(key);
-
- for (;;) {
- const char *colonPos = strchr(s, ';');
-
- size_t len =
- (colonPos == NULL) ? strlen(s) : colonPos - s;
-
- if (len >= keyLen + 1 && s[keyLen] == '=' && !strncmp(s, key, keyLen)) {
- value->setTo(&s[keyLen + 1], len - keyLen - 1);
- return true;
- }
-
- if (colonPos == NULL) {
- return false;
- }
-
- s = colonPos + 1;
- }
-}
-
-struct MyTransmitter : public AHandler {
- MyTransmitter(const char *url, const sp<ALooper> &looper)
- : mServerURL(url),
- mLooper(looper),
- mConn(new ARTSPConnection),
- mConnected(false),
- mAuthType(NONE),
- mRTPSocket(-1),
- mRTCPSocket(-1),
- mSourceID(rand()),
- mSeqNo(uniformRand(65536)),
- mRTPTimeBase(rand()),
- mNumSamplesSent(0),
- mNumRTPSent(0),
- mNumRTPOctetsSent(0),
- mLastRTPTime(0),
- mLastNTPTime(0) {
- mStreamURL = mServerURL;
- mStreamURL.append("/bazong.sdp");
-
- mTrackURL = mStreamURL;
- mTrackURL.append("/");
- mTrackURL.append(TRACK_SUFFIX);
-
- mLooper->registerHandler(this);
- mLooper->registerHandler(mConn);
-
- sp<AMessage> reply = new AMessage('conn', this);
- mConn->connect(mServerURL.c_str(), reply);
-
-#ifdef ANDROID
- int width = 640;
- int height = 480;
-
- sp<MediaSource> source = new VideoSource(width, height);
-
- sp<AMessage> encMeta = new AMessage;
- encMeta->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
- encMeta->setInt32("width", width);
- encMeta->setInt32("height", height);
- encMeta->setInt32("frame-rate", 30);
- encMeta->setInt32("bitrate", 256000);
- encMeta->setInt32("i-frame-interval", 10);
-
- sp<ALooper> encLooper = new ALooper;
- encLooper->setName("rtsp_transmitter");
- encLooper->start();
-
- mEncoder = MediaCodecSource::Create(encLooper, encMeta, source);
-
- mEncoder->start();
-
- MediaBuffer *buffer;
- CHECK_EQ(mEncoder->read(&buffer), (status_t)OK);
- CHECK(buffer != NULL);
-
- makeH264SPropParamSets(buffer);
-
- buffer->release();
- buffer = NULL;
-#endif
- }
-
- uint64_t ntpTime() {
- struct timeval tv;
- gettimeofday(&tv, NULL);
-
- uint64_t nowUs = tv.tv_sec * 1000000ll + tv.tv_usec;
-
- nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
-
- uint64_t hi = nowUs / 1000000ll;
- uint64_t lo = ((1ll << 32) * (nowUs % 1000000ll)) / 1000000ll;
-
- return (hi << 32) | lo;
- }
-
- void issueAnnounce() {
- AString sdp;
- sdp = "v=0\r\n";
-
- sdp.append("o=- ");
-
- uint64_t ntp = ntpTime();
- sdp.append(ntp);
- sdp.append(" ");
- sdp.append(ntp);
- sdp.append(" IN IP4 127.0.0.0\r\n");
-
- sdp.append(
- "s=Sample\r\n"
- "i=Playing around with ANNOUNCE\r\n"
- "c=IN IP4 ");
-
- struct in_addr addr;
- addr.s_addr = htonl(mServerIP);
-
- sdp.append(inet_ntoa(addr));
-
- sdp.append(
- "\r\n"
- "t=0 0\r\n"
- "a=range:npt=now-\r\n");
-
-#ifdef ANDROID
- sp<MetaData> meta = mEncoder->getFormat();
- int32_t width, height;
- CHECK(meta->findInt32(kKeyWidth, &width));
- CHECK(meta->findInt32(kKeyHeight, &height));
-
- sdp.append(
- "m=video 0 RTP/AVP " PT_STR "\r\n"
- "b=AS 320000\r\n"
- "a=rtpmap:" PT_STR " H264/90000\r\n");
-
- sdp.append("a=cliprect 0,0,");
- sdp.append(height);
- sdp.append(",");
- sdp.append(width);
- sdp.append("\r\n");
-
- sdp.append(
- "a=framesize:" PT_STR " ");
- sdp.append(width);
- sdp.append("-");
- sdp.append(height);
- sdp.append("\r\n");
-
- sdp.append(
- "a=fmtp:" PT_STR " profile-level-id=42C015;sprop-parameter-sets=");
-
- sdp.append(mSeqParamSet);
- sdp.append(",");
- sdp.append(mPicParamSet);
- sdp.append(";packetization-mode=1\r\n");
-#else
- sdp.append(
- "m=audio 0 RTP/AVP " PT_STR "\r\n"
- "a=rtpmap:" PT_STR " L8/8000/1\r\n");
-#endif
-
- sdp.append("a=control:" TRACK_SUFFIX "\r\n");
-
- AString request;
- request.append("ANNOUNCE ");
- request.append(mStreamURL);
- request.append(" RTSP/1.0\r\n");
-
- addAuthentication(&request, "ANNOUNCE", mStreamURL.c_str());
-
- request.append("Content-Type: application/sdp\r\n");
- request.append("Content-Length: ");
- request.append(sdp.size());
- request.append("\r\n");
-
- request.append("\r\n");
- request.append(sdp);
-
- sp<AMessage> reply = new AMessage('anno', this);
- mConn->sendRequest(request.c_str(), reply);
- }
-
- void H(const AString &s, AString *out) {
- out->clear();
-
- MD5_CTX m;
- MD5_Init(&m);
- MD5_Update(&m, s.c_str(), s.size());
-
- uint8_t key[16];
- MD5_Final(key, &m);
-
- for (size_t i = 0; i < 16; ++i) {
- char nibble = key[i] >> 4;
- if (nibble <= 9) {
- nibble += '0';
- } else {
- nibble += 'a' - 10;
- }
- out->append(&nibble, 1);
-
- nibble = key[i] & 0x0f;
- if (nibble <= 9) {
- nibble += '0';
- } else {
- nibble += 'a' - 10;
- }
- out->append(&nibble, 1);
- }
- }
-
- void authenticate(const sp<ARTSPResponse> &response) {
- ssize_t i = response->mHeaders.indexOfKey("www-authenticate");
- CHECK_GE(i, 0);
-
- AString value = response->mHeaders.valueAt(i);
-
- if (!strncmp(value.c_str(), "Basic", 5)) {
- mAuthType = BASIC;
- } else {
- CHECK(!strncmp(value.c_str(), "Digest", 6));
- mAuthType = DIGEST;
-
- i = value.find("nonce=");
- CHECK_GE(i, 0);
- CHECK_EQ(value.c_str()[i + 6], '\"');
- ssize_t j = value.find("\"", i + 7);
- CHECK_GE(j, 0);
-
- mNonce.setTo(value, i + 7, j - i - 7);
- }
-
- issueAnnounce();
- }
-
- void addAuthentication(
- AString *request, const char *method, const char *url) {
- if (mAuthType == NONE) {
- return;
- }
-
- if (mAuthType == BASIC) {
- request->append("Authorization: Basic YmNhc3Q6dGVzdAo=\r\n");
- return;
- }
-
- CHECK_EQ((int)mAuthType, (int)DIGEST);
-
- AString A1;
- A1.append(USERNAME);
- A1.append(":");
- A1.append("Streaming Server");
- A1.append(":");
- A1.append(PASSWORD);
-
- AString A2;
- A2.append(method);
- A2.append(":");
- A2.append(url);
-
- AString HA1, HA2;
- H(A1, &HA1);
- H(A2, &HA2);
-
- AString tmp;
- tmp.append(HA1);
- tmp.append(":");
- tmp.append(mNonce);
- tmp.append(":");
- tmp.append(HA2);
-
- AString digest;
- H(tmp, &digest);
-
- request->append("Authorization: Digest ");
- request->append("nonce=\"");
- request->append(mNonce);
- request->append("\", ");
- request->append("username=\"" USERNAME "\", ");
- request->append("uri=\"");
- request->append(url);
- request->append("\", ");
- request->append("response=\"");
- request->append(digest);
- request->append("\"");
- request->append("\r\n");
- }
-
- virtual void onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case 'conn':
- {
- int32_t result;
- CHECK(msg->findInt32("result", &result));
-
- LOG(INFO) << "connection request completed with result "
- << result << " (" << strerror(-result) << ")";
-
- if (result != OK) {
- (new AMessage('quit', this))->post();
- break;
- }
-
- mConnected = true;
-
- CHECK(msg->findInt32("server-ip", (int32_t *)&mServerIP));
-
- issueAnnounce();
- break;
- }
-
- case 'anno':
- {
- int32_t result;
- CHECK(msg->findInt32("result", &result));
-
- LOG(INFO) << "ANNOUNCE completed with result "
- << result << " (" << strerror(-result) << ")";
-
- sp<RefBase> obj;
- CHECK(msg->findObject("response", &obj));
- sp<ARTSPResponse> response;
-
- if (result == OK) {
- response = static_cast<ARTSPResponse *>(obj.get());
- CHECK(response != NULL);
-
- if (response->mStatusCode == 401) {
- if (mAuthType != NONE) {
- LOG(INFO) << "FAILED to authenticate";
- (new AMessage('quit', this))->post();
- break;
- }
-
- authenticate(response);
- break;
- }
- }
-
- if (result != OK || response->mStatusCode != 200) {
- (new AMessage('quit', this))->post();
- break;
- }
-
- unsigned rtpPort;
- ARTPConnection::MakePortPair(&mRTPSocket, &mRTCPSocket, &rtpPort);
-
- // (new AMessage('poll', this))->post();
-
- AString request;
- request.append("SETUP ");
- request.append(mTrackURL);
- request.append(" RTSP/1.0\r\n");
-
- addAuthentication(&request, "SETUP", mTrackURL.c_str());
-
- request.append("Transport: RTP/AVP;unicast;client_port=");
- request.append(rtpPort);
- request.append("-");
- request.append(rtpPort + 1);
- request.append(";mode=record\r\n");
- request.append("\r\n");
-
- sp<AMessage> reply = new AMessage('setu', this);
- mConn->sendRequest(request.c_str(), reply);
- break;
- }
-
-#if 0
- case 'poll':
- {
- fd_set rs;
- FD_ZERO(&rs);
- FD_SET(mRTCPSocket, &rs);
-
- struct timeval tv;
- tv.tv_sec = 0;
- tv.tv_usec = 0;
-
- int res = select(mRTCPSocket + 1, &rs, NULL, NULL, &tv);
-
- if (res == 1) {
- sp<ABuffer> buffer = new ABuffer(65536);
- ssize_t n = recv(mRTCPSocket, buffer->data(), buffer->size(), 0);
-
- if (n <= 0) {
- LOG(ERROR) << "recv returned " << n;
- } else {
- LOG(INFO) << "recv returned " << n << " bytes of data.";
-
- hexdump(buffer->data(), n);
- }
- }
-
- msg->post(50000);
- break;
- }
-#endif
-
- case 'setu':
- {
- int32_t result;
- CHECK(msg->findInt32("result", &result));
-
- LOG(INFO) << "SETUP completed with result "
- << result << " (" << strerror(-result) << ")";
-
- sp<RefBase> obj;
- CHECK(msg->findObject("response", &obj));
- sp<ARTSPResponse> response;
-
- if (result == OK) {
- response = static_cast<ARTSPResponse *>(obj.get());
- CHECK(response != NULL);
- }
-
- if (result != OK || response->mStatusCode != 200) {
- (new AMessage('quit', this))->post();
- break;
- }
-
- ssize_t i = response->mHeaders.indexOfKey("session");
- CHECK_GE(i, 0);
- mSessionID = response->mHeaders.valueAt(i);
- i = mSessionID.find(";");
- if (i >= 0) {
- // Remove options, i.e. ";timeout=90"
- mSessionID.erase(i, mSessionID.size() - i);
- }
-
- i = response->mHeaders.indexOfKey("transport");
- CHECK_GE(i, 0);
- AString transport = response->mHeaders.valueAt(i);
-
- LOG(INFO) << "transport = '" << transport << "'";
-
- AString value;
- CHECK(GetAttribute(transport.c_str(), "server_port", &value));
-
- unsigned rtpPort, rtcpPort;
- CHECK_EQ(sscanf(value.c_str(), "%u-%u", &rtpPort, &rtcpPort), 2);
-
- CHECK(GetAttribute(transport.c_str(), "source", &value));
-
- memset(mRemoteAddr.sin_zero, 0, sizeof(mRemoteAddr.sin_zero));
- mRemoteAddr.sin_family = AF_INET;
- mRemoteAddr.sin_addr.s_addr = inet_addr(value.c_str());
- mRemoteAddr.sin_port = htons(rtpPort);
-
- mRemoteRTCPAddr = mRemoteAddr;
- mRemoteRTCPAddr.sin_port = htons(rtpPort + 1);
-
- CHECK_EQ(0, connect(mRTPSocket,
- (const struct sockaddr *)&mRemoteAddr,
- sizeof(mRemoteAddr)));
-
- CHECK_EQ(0, connect(mRTCPSocket,
- (const struct sockaddr *)&mRemoteRTCPAddr,
- sizeof(mRemoteRTCPAddr)));
-
- uint32_t x = ntohl(mRemoteAddr.sin_addr.s_addr);
- LOG(INFO) << "sending data to "
- << (x >> 24)
- << "."
- << ((x >> 16) & 0xff)
- << "."
- << ((x >> 8) & 0xff)
- << "."
- << (x & 0xff)
- << ":"
- << rtpPort;
-
- AString request;
- request.append("RECORD ");
- request.append(mStreamURL);
- request.append(" RTSP/1.0\r\n");
-
- addAuthentication(&request, "RECORD", mStreamURL.c_str());
-
- request.append("Session: ");
- request.append(mSessionID);
- request.append("\r\n");
- request.append("\r\n");
-
- sp<AMessage> reply = new AMessage('reco', this);
- mConn->sendRequest(request.c_str(), reply);
- break;
- }
-
- case 'reco':
- {
- int32_t result;
- CHECK(msg->findInt32("result", &result));
-
- LOG(INFO) << "RECORD completed with result "
- << result << " (" << strerror(-result) << ")";
-
- sp<RefBase> obj;
- CHECK(msg->findObject("response", &obj));
- sp<ARTSPResponse> response;
-
- if (result == OK) {
- response = static_cast<ARTSPResponse *>(obj.get());
- CHECK(response != NULL);
- }
-
- if (result != OK) {
- (new AMessage('quit', this))->post();
- break;
- }
-
- (new AMessage('more', this))->post();
- (new AMessage('sr ', this))->post();
- (new AMessage('aliv', this))->post(30000000ll);
- break;
- }
-
- case 'aliv':
- {
- if (!mConnected) {
- break;
- }
-
- AString request;
- request.append("OPTIONS ");
- request.append(mStreamURL);
- request.append(" RTSP/1.0\r\n");
-
- addAuthentication(&request, "RECORD", mStreamURL.c_str());
-
- request.append("Session: ");
- request.append(mSessionID);
- request.append("\r\n");
- request.append("\r\n");
-
- sp<AMessage> reply = new AMessage('opts', this);
- mConn->sendRequest(request.c_str(), reply);
- break;
- }
-
- case 'opts':
- {
- int32_t result;
- CHECK(msg->findInt32("result", &result));
-
- LOG(INFO) << "OPTIONS completed with result "
- << result << " (" << strerror(-result) << ")";
-
- if (!mConnected) {
- break;
- }
-
- (new AMessage('aliv', this))->post(30000000ll);
- break;
- }
-
- case 'more':
- {
- if (!mConnected) {
- break;
- }
-
- sp<ABuffer> buffer = new ABuffer(65536);
- uint8_t *data = buffer->data();
- data[0] = 0x80;
- data[1] = (1 << 7) | PT; // M-bit
- data[2] = (mSeqNo >> 8) & 0xff;
- data[3] = mSeqNo & 0xff;
- data[8] = mSourceID >> 24;
- data[9] = (mSourceID >> 16) & 0xff;
- data[10] = (mSourceID >> 8) & 0xff;
- data[11] = mSourceID & 0xff;
-
-#ifdef ANDROID
- MediaBuffer *mediaBuf = NULL;
- for (;;) {
- CHECK_EQ(mEncoder->read(&mediaBuf), (status_t)OK);
- if (mediaBuf->range_length() > 0) {
- break;
- }
- mediaBuf->release();
- mediaBuf = NULL;
- }
-
- int64_t timeUs;
- CHECK(mediaBuf->meta_data()->findInt64(kKeyTime, &timeUs));
-
- uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100ll);
-
- const uint8_t *mediaData =
- (const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
-
- CHECK(!memcmp("\x00\x00\x00\x01", mediaData, 4));
-
- CHECK_LE(mediaBuf->range_length() - 4 + 12, buffer->size());
-
- memcpy(&data[12],
- mediaData + 4, mediaBuf->range_length() - 4);
-
- buffer->setRange(0, mediaBuf->range_length() - 4 + 12);
-
- mediaBuf->release();
- mediaBuf = NULL;
-#else
- uint32_t rtpTime = mRTPTimeBase + mNumRTPSent * 128;
- memset(&data[12], 0, 128);
- buffer->setRange(0, 12 + 128);
-#endif
-
- data[4] = rtpTime >> 24;
- data[5] = (rtpTime >> 16) & 0xff;
- data[6] = (rtpTime >> 8) & 0xff;
- data[7] = rtpTime & 0xff;
-
- ssize_t n = send(
- mRTPSocket, data, buffer->size(), 0);
- if (n < 0) {
- LOG(ERROR) << "send failed (" << strerror(errno) << ")";
- }
- CHECK_EQ(n, (ssize_t)buffer->size());
-
- ++mSeqNo;
-
- ++mNumRTPSent;
- mNumRTPOctetsSent += buffer->size() - 12;
-
- mLastRTPTime = rtpTime;
- mLastNTPTime = ntpTime();
-
-#ifdef ANDROID
- if (mNumRTPSent < 60 * 25) { // 60 secs worth
- msg->post(40000);
-#else
- if (mNumRTPOctetsSent < 8000 * 60) {
- msg->post(1000000ll * 128 / 8000);
-#endif
- } else {
- LOG(INFO) << "That's enough, pausing.";
-
- AString request;
- request.append("PAUSE ");
- request.append(mStreamURL);
- request.append(" RTSP/1.0\r\n");
-
- addAuthentication(&request, "PAUSE", mStreamURL.c_str());
-
- request.append("Session: ");
- request.append(mSessionID);
- request.append("\r\n");
- request.append("\r\n");
-
- sp<AMessage> reply = new AMessage('paus', this);
- mConn->sendRequest(request.c_str(), reply);
- }
- break;
- }
-
- case 'sr ':
- {
- if (!mConnected) {
- break;
- }
-
- sp<ABuffer> buffer = new ABuffer(65536);
- buffer->setRange(0, 0);
-
- addSR(buffer);
- addSDES(buffer);
-
- uint8_t *data = buffer->data();
- ssize_t n = send(
- mRTCPSocket, data, buffer->size(), 0);
- CHECK_EQ(n, (ssize_t)buffer->size());
-
- msg->post(3000000);
- break;
- }
-
- case 'paus':
- {
- int32_t result;
- CHECK(msg->findInt32("result", &result));
-
- LOG(INFO) << "PAUSE completed with result "
- << result << " (" << strerror(-result) << ")";
-
- sp<RefBase> obj;
- CHECK(msg->findObject("response", &obj));
- sp<ARTSPResponse> response;
-
- AString request;
- request.append("TEARDOWN ");
- request.append(mStreamURL);
- request.append(" RTSP/1.0\r\n");
-
- addAuthentication(&request, "TEARDOWN", mStreamURL.c_str());
-
- request.append("Session: ");
- request.append(mSessionID);
- request.append("\r\n");
- request.append("\r\n");
-
- sp<AMessage> reply = new AMessage('tear', this);
- mConn->sendRequest(request.c_str(), reply);
- break;
- }
-
- case 'tear':
- {
- int32_t result;
- CHECK(msg->findInt32("result", &result));
-
- LOG(INFO) << "TEARDOWN completed with result "
- << result << " (" << strerror(-result) << ")";
-
- sp<RefBase> obj;
- CHECK(msg->findObject("response", &obj));
- sp<ARTSPResponse> response;
-
- if (result == OK) {
- response = static_cast<ARTSPResponse *>(obj.get());
- CHECK(response != NULL);
- }
-
- (new AMessage('quit', this))->post();
- break;
- }
-
- case 'disc':
- {
- LOG(INFO) << "disconnect completed";
-
- mConnected = false;
- (new AMessage('quit', this))->post();
- break;
- }
-
- case 'quit':
- {
- if (mConnected) {
- mConn->disconnect(new AMessage('disc', this));
- break;
- }
-
- if (mRTPSocket >= 0) {
- close(mRTPSocket);
- mRTPSocket = -1;
- }
-
- if (mRTCPSocket >= 0) {
- close(mRTCPSocket);
- mRTCPSocket = -1;
- }
-
-#ifdef ANDROID
- mEncoder->stop();
- mEncoder.clear();
-#endif
-
- mLooper->stop();
- break;
- }
-
- default:
- TRESPASS();
- }
- }
-
-protected:
- virtual ~MyTransmitter() {
- }
-
-private:
- enum AuthType {
- NONE,
- BASIC,
- DIGEST
- };
-
- AString mServerURL;
- AString mTrackURL;
- AString mStreamURL;
-
- sp<ALooper> mLooper;
- sp<ARTSPConnection> mConn;
- bool mConnected;
- uint32_t mServerIP;
- AuthType mAuthType;
- AString mNonce;
- AString mSessionID;
- int mRTPSocket, mRTCPSocket;
- uint32_t mSourceID;
- uint32_t mSeqNo;
- uint32_t mRTPTimeBase;
- struct sockaddr_in mRemoteAddr;
- struct sockaddr_in mRemoteRTCPAddr;
- size_t mNumSamplesSent;
- uint32_t mNumRTPSent;
- uint32_t mNumRTPOctetsSent;
- uint32_t mLastRTPTime;
- uint64_t mLastNTPTime;
-
-#ifdef ANDROID
- sp<MediaSource> mEncoder;
- AString mSeqParamSet;
- AString mPicParamSet;
-
- void makeH264SPropParamSets(MediaBuffer *buffer) {
- static const char kStartCode[] = "\x00\x00\x00\x01";
-
- const uint8_t *data =
- (const uint8_t *)buffer->data() + buffer->range_offset();
- size_t size = buffer->range_length();
-
- CHECK_GE(size, 0u);
- CHECK(!memcmp(kStartCode, data, 4));
-
- data += 4;
- size -= 4;
-
- size_t startCodePos = 0;
- while (startCodePos + 3 < size
- && memcmp(kStartCode, &data[startCodePos], 4)) {
- ++startCodePos;
- }
-
- CHECK_LT(startCodePos + 3, size);
-
- encodeBase64(data, startCodePos, &mSeqParamSet);
-
- encodeBase64(&data[startCodePos + 4], size - startCodePos - 4,
- &mPicParamSet);
- }
-#endif
-
- void addSR(const sp<ABuffer> &buffer) {
- uint8_t *data = buffer->data() + buffer->size();
-
- data[0] = 0x80 | 0;
- data[1] = 200; // SR
- data[2] = 0;
- data[3] = 6;
- data[4] = mSourceID >> 24;
- data[5] = (mSourceID >> 16) & 0xff;
- data[6] = (mSourceID >> 8) & 0xff;
- data[7] = mSourceID & 0xff;
-
- data[8] = mLastNTPTime >> (64 - 8);
- data[9] = (mLastNTPTime >> (64 - 16)) & 0xff;
- data[10] = (mLastNTPTime >> (64 - 24)) & 0xff;
- data[11] = (mLastNTPTime >> 32) & 0xff;
- data[12] = (mLastNTPTime >> 24) & 0xff;
- data[13] = (mLastNTPTime >> 16) & 0xff;
- data[14] = (mLastNTPTime >> 8) & 0xff;
- data[15] = mLastNTPTime & 0xff;
-
- data[16] = (mLastRTPTime >> 24) & 0xff;
- data[17] = (mLastRTPTime >> 16) & 0xff;
- data[18] = (mLastRTPTime >> 8) & 0xff;
- data[19] = mLastRTPTime & 0xff;
-
- data[20] = mNumRTPSent >> 24;
- data[21] = (mNumRTPSent >> 16) & 0xff;
- data[22] = (mNumRTPSent >> 8) & 0xff;
- data[23] = mNumRTPSent & 0xff;
-
- data[24] = mNumRTPOctetsSent >> 24;
- data[25] = (mNumRTPOctetsSent >> 16) & 0xff;
- data[26] = (mNumRTPOctetsSent >> 8) & 0xff;
- data[27] = mNumRTPOctetsSent & 0xff;
-
- buffer->setRange(buffer->offset(), buffer->size() + 28);
- }
-
- void addSDES(const sp<ABuffer> &buffer) {
- uint8_t *data = buffer->data() + buffer->size();
- data[0] = 0x80 | 1;
- data[1] = 202; // SDES
- data[4] = mSourceID >> 24;
- data[5] = (mSourceID >> 16) & 0xff;
- data[6] = (mSourceID >> 8) & 0xff;
- data[7] = mSourceID & 0xff;
-
- size_t offset = 8;
-
- data[offset++] = 1; // CNAME
-
- static const char *kCNAME = "andih@laptop";
- data[offset++] = strlen(kCNAME);
-
- memcpy(&data[offset], kCNAME, strlen(kCNAME));
- offset += strlen(kCNAME);
-
- data[offset++] = 7; // NOTE
-
- static const char *kNOTE = "Hell's frozen over.";
- data[offset++] = strlen(kNOTE);
-
- memcpy(&data[offset], kNOTE, strlen(kNOTE));
- offset += strlen(kNOTE);
-
- data[offset++] = 0;
-
- if ((offset % 4) > 0) {
- size_t count = 4 - (offset % 4);
- switch (count) {
- case 3:
- data[offset++] = 0;
- case 2:
- data[offset++] = 0;
- case 1:
- data[offset++] = 0;
- }
- }
-
- size_t numWords = (offset / 4) - 1;
- data[2] = numWords >> 8;
- data[3] = numWords & 0xff;
-
- buffer->setRange(buffer->offset(), buffer->size() + offset);
- }
-
- DISALLOW_EVIL_CONSTRUCTORS(MyTransmitter);
-};
-
-} // namespace android
-
-#endif // MY_TRANSMITTER_H_
diff --git a/media/libstagefright/tests/HEVC/Android.bp b/media/libstagefright/tests/HEVC/Android.bp
index 91bf385..7a0ba52 100644
--- a/media/libstagefright/tests/HEVC/Android.bp
+++ b/media/libstagefright/tests/HEVC/Android.bp
@@ -44,10 +44,6 @@
"libstagefright_foundation",
],
- include_dirs: [
- "frameworks/av/media/libstagefright",
- ],
-
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
index 324a042..c43e1f8 100644
--- a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
+++ b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
@@ -21,7 +21,7 @@
#include <fstream>
#include <media/stagefright/foundation/ABitReader.h>
-#include "include/HevcUtils.h"
+#include <HevcUtils.h>
#include "HEVCUtilsTestEnvironment.h"
diff --git a/media/libstagefright/tests/extractorFactory/Android.bp b/media/libstagefright/tests/extractorFactory/Android.bp
index 13d5b89..a067284 100644
--- a/media/libstagefright/tests/extractorFactory/Android.bp
+++ b/media/libstagefright/tests/extractorFactory/Android.bp
@@ -51,10 +51,6 @@
"libstagefright_foundation",
],
- include_dirs: [
- "frameworks/av/media/libstagefright",
- ],
-
// TODO: (b/150181583)
compile_multilib: "first",
diff --git a/media/libstagefright/tests/fuzzers/Android.bp b/media/libstagefright/tests/fuzzers/Android.bp
index 0097830..250ffb9 100644
--- a/media/libstagefright/tests/fuzzers/Android.bp
+++ b/media/libstagefright/tests/fuzzers/Android.bp
@@ -74,6 +74,7 @@
srcs: [
"FrameDecoderFuzzer.cpp",
],
+ corpus: ["corpus/*"],
defaults: ["libstagefright_fuzzer_defaults"],
}
@@ -86,9 +87,6 @@
dictionary: "dictionaries/formats.dict",
defaults: ["libstagefright_fuzzer_defaults"],
static_libs: [
- "libstagefright_webm",
"libdatasource",
- "libstagefright_esds",
- "libogg",
],
}
diff --git a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
index c251479..b346718 100644
--- a/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
+++ b/media/libstagefright/tests/fuzzers/FrameDecoderFuzzer.cpp
@@ -46,12 +46,15 @@
}
while (fdp.remaining_bytes()) {
- switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 3)) {
- case 0:
- decoder->init(/*frameTimeUs*/ fdp.ConsumeIntegral<int64_t>(),
- /*option*/ fdp.ConsumeIntegral<int>(),
- /*colorFormat*/ fdp.ConsumeIntegral<int>());
+ uint8_t switchCase = fdp.ConsumeIntegralInRange<uint8_t>(0, 3);
+ switch (switchCase) {
+ case 0: {
+ int64_t frameTimeUs = fdp.ConsumeIntegral<int64_t>();
+ int option = fdp.ConsumeIntegral<int>();
+ int colorFormat = fdp.ConsumeIntegral<int>();
+ decoder->init(frameTimeUs, option, colorFormat);
break;
+ }
case 1:
decoder->extractFrame();
break;
diff --git a/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
new file mode 100644
index 0000000..698e21d
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/corpus/color_format_rgb_565.dat
Binary files differ
diff --git a/media/libstagefright/tests/writer/Android.bp b/media/libstagefright/tests/writer/Android.bp
index 38d5ecc..49fb569 100644
--- a/media/libstagefright/tests/writer/Android.bp
+++ b/media/libstagefright/tests/writer/Android.bp
@@ -52,10 +52,6 @@
"libogg",
],
- include_dirs: [
- "frameworks/av/media/libstagefright",
- ],
-
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libstagefright/tests/writer/WriterTest.cpp b/media/libstagefright/tests/writer/WriterTest.cpp
index d170e7c..398c592 100644
--- a/media/libstagefright/tests/writer/WriterTest.cpp
+++ b/media/libstagefright/tests/writer/WriterTest.cpp
@@ -36,7 +36,7 @@
#include <media/stagefright/MPEG2TSWriter.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/OggWriter.h>
-#include <webm/WebmWriter.h>
+#include <WebmWriter.h>
#include "WriterTestEnvironment.h"
#include "WriterUtility.h"
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 32a22ba..4209aea 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -33,7 +33,7 @@
"WebmWriter.cpp",
],
- include_dirs: ["frameworks/av/include"],
+ export_include_dirs: ["."],
shared_libs: [
"libdatasource",
@@ -44,6 +44,7 @@
],
header_libs: [
+ "av-headers",
"libmedia_headers",
"media_ndk_headers",
],
diff --git a/media/mtp/OWNERS b/media/mtp/OWNERS
index 1928ba8..54d3d4a 100644
--- a/media/mtp/OWNERS
+++ b/media/mtp/OWNERS
@@ -1,6 +1,5 @@
set noparent
-marcone@google.com
jsharkey@android.com
jameswei@google.com
rmojumder@google.com
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 1ae2b44..0e2de4e 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -452,17 +452,19 @@
uint32_t flags) {
sp<AMessage> nativeFormat;
AMediaFormat_getFormat(format, &nativeFormat);
- ALOGV("configure with format: %s", nativeFormat->debugString(0).c_str());
+ // create our shallow copy, so we aren't victim to any later changes.
+ sp<AMessage> dupNativeFormat = nativeFormat->dup();
+ ALOGV("configure with format: %s", dupNativeFormat->debugString(0).c_str());
sp<Surface> surface = NULL;
if (window != NULL) {
surface = (Surface*) window;
}
- status_t err = mData->mCodec->configure(nativeFormat, surface,
+ status_t err = mData->mCodec->configure(dupNativeFormat, surface,
crypto ? crypto->mCrypto : NULL, flags);
if (err != OK) {
ALOGE("configure: err(%d), failed with format: %s",
- err, nativeFormat->debugString(0).c_str());
+ err, dupNativeFormat->debugString(0).c_str());
}
return translate_error(err);
}
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index c1793ce..69ab242 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -200,8 +200,11 @@
AString tmp;
if (mData->mFormat->findString(name, &tmp)) {
String8 ret(tmp.c_str());
- mData->mStringCache.add(String8(name), ret);
- *out = ret.string();
+ ssize_t i = mData->mStringCache.add(String8(name), ret);
+ if (i < 0) {
+ return false;
+ }
+ *out = mData->mStringCache.valueAt(i).string();
return true;
}
return false;
@@ -351,6 +354,11 @@
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA = "mpeg-user-data";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER = "mpeg2-stream-header";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS = "mpegh-compatible-sets";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION =
+ "mpegh-profile-level-indication";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT =
+ "mpegh-reference-channel-layout";
EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
diff --git a/media/ndk/OWNERS b/media/ndk/OWNERS
index 9dc441e..83644f0 100644
--- a/media/ndk/OWNERS
+++ b/media/ndk/OWNERS
@@ -1,3 +1,4 @@
-marcone@google.com
+essick@google.com
+lajos@google.com
# For AImage/AImageReader
include platform/frameworks/av:/camera/OWNERS
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index fbd855d..2d2fcc0 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -320,6 +320,34 @@
extern const char* AMEDIAFORMAT_VIDEO_QP_P_MAX __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_P_MIN __INTRODUCED_IN(31);
+/**
+ * MPEG-H audio profile and level compatibility.
+ *
+ * See FDAmd_2 of ISO_IEC_23008-3;2019 MHAProfileAndLevelCompatibilitySetBox.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio profile level indication.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord mpegh3daProfileLevelIndication.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio reference channel layout.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord referenceChannelLayout
+ * and ISO_IEC_23001‐8 ChannelConfiguration value.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT __INTRODUCED_IN(32);
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 7e9e57e..6f275c7 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -126,6 +126,9 @@
AMEDIAFORMAT_KEY_MIME; # var introduced=21
AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER; # var introduced=29
+ AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS; # var introduced=32
+ AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION; # var introduced=32
+ AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT; # var introduced=32
AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN; # var introduced=29
AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
diff --git a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
index b17541d..75d73bf 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
@@ -30,7 +30,8 @@
android:roundIcon="@mipmap/ic_launcher_round"
android:supportsRtl="true"
android:theme="@style/AppTheme">
- <activity android:name="com.android.media.samplevideoencoder.MainActivity">
+ <activity android:name="com.android.media.samplevideoencoder.MainActivity"
+ android:exported="true">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
@@ -42,4 +43,4 @@
android:targetPackage="com.android.media.samplevideoencoder"
android:label="SampleVideoEncoder Test"/>
-</manifest>
\ No newline at end of file
+</manifest>
diff --git a/media/tests/benchmark/MediaBenchmarkTest/Android.bp b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
index 2e06da5..4b44dcf 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/Android.bp
+++ b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
@@ -69,7 +69,6 @@
java_defaults {
name: "MediaBenchmark-defaults",
- sdk_version: "system_current",
min_sdk_version: "28",
- target_sdk_version: "29",
+ target_sdk_version: "30",
}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/build.gradle b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
index b2aee1a..b222d47 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/build.gradle
+++ b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
@@ -17,21 +17,21 @@
buildscript {
repositories {
google()
- jcenter()
+ mavenCentral()
}
dependencies {
- classpath 'com.android.tools.build:gradle:3.5.0'
+ classpath 'com.android.tools.build:gradle:4.2.1'
}
}
apply plugin: 'com.android.application'
android {
- compileSdkVersion 29
+ compileSdkVersion 30
defaultConfig {
applicationId "com.android.media.benchmark"
minSdkVersion 28
- targetSdkVersion 29
+ targetSdkVersion 30
versionCode 1
versionName "1.0"
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
@@ -57,20 +57,20 @@
externalNativeBuild {
cmake {
path "src/main/cpp/CMakeLists.txt"
- version "3.10.2"
+ version "3.18.1"
}
}
}
repositories {
google()
- jcenter()
+ mavenCentral()
}
dependencies {
implementation fileTree(dir: 'libs', include: ['*.jar'])
- implementation 'androidx.appcompat:appcompat:1.1.0'
- testImplementation 'junit:junit:4.12'
- androidTestImplementation 'androidx.test:runner:1.2.0'
- androidTestImplementation 'androidx.test.ext:junit:1.1.1'
+ implementation 'androidx.appcompat:appcompat:1.3.0'
+ testImplementation 'junit:junit:4.13.2'
+ androidTestImplementation 'androidx.test:runner:1.3.0'
+ androidTestImplementation 'androidx.test.ext:junit:1.1.2'
}
\ No newline at end of file
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
index af92424..0192d68 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
@@ -9,7 +9,6 @@
cc_test_library {
name: "libmediabenchmark_jni",
- sdk_version: "current",
defaults: [
"libmediabenchmark_common-defaults",
diff --git a/media/tests/benchmark/src/native/common/Android.bp b/media/tests/benchmark/src/native/common/Android.bp
index 6b54c6a..718d217 100644
--- a/media/tests/benchmark/src/native/common/Android.bp
+++ b/media/tests/benchmark/src/native/common/Android.bp
@@ -55,7 +55,6 @@
cc_defaults {
name: "libmediabenchmark-defaults",
- sdk_version: "current",
stl: "c++_shared",
shared_libs: [
diff --git a/media/tests/benchmark/src/native/extractor/Extractor.cpp b/media/tests/benchmark/src/native/extractor/Extractor.cpp
index f0bb3b9..3bdfbad 100644
--- a/media/tests/benchmark/src/native/extractor/Extractor.cpp
+++ b/media/tests/benchmark/src/native/extractor/Extractor.cpp
@@ -124,9 +124,7 @@
int64_t sTime = mStats->getCurTime();
if (mExtractor) {
- // TODO: (b/140128505) Multiple calls result in DoS.
- // Uncomment call to AMediaExtractor_delete() once this is resolved
- // AMediaExtractor_delete(mExtractor);
+ AMediaExtractor_delete(mExtractor);
mExtractor = nullptr;
}
int64_t eTime = mStats->getCurTime();
diff --git a/media/tests/benchmark/tests/Android.bp b/media/tests/benchmark/tests/Android.bp
index 0fbd20d..9a8caa3 100644
--- a/media/tests/benchmark/tests/Android.bp
+++ b/media/tests/benchmark/tests/Android.bp
@@ -33,7 +33,12 @@
srcs: ["ExtractorTest.cpp"],
- static_libs: ["libmediabenchmark_extractor"]
+ static_libs: ["libmediabenchmark_extractor"],
+
+ shared_libs: [
+ "libbase",
+ "libbinder_ndk",
+ ],
}
cc_test {
@@ -50,6 +55,11 @@
"libmediabenchmark_extractor",
"libmediabenchmark_decoder",
],
+
+ shared_libs: [
+ "libbase",
+ "libbinder_ndk",
+ ],
}
cc_test {
diff --git a/media/tests/benchmark/tests/DecoderTest.cpp b/media/tests/benchmark/tests/DecoderTest.cpp
index 81ef02a..3666724 100644
--- a/media/tests/benchmark/tests/DecoderTest.cpp
+++ b/media/tests/benchmark/tests/DecoderTest.cpp
@@ -21,6 +21,8 @@
#include <iostream>
#include <limits>
+#include <android/binder_process.h>
+
#include "BenchmarkTestEnvironment.h"
#include "Decoder.h"
@@ -175,6 +177,7 @@
"c2.android.hevc.decoder", true)));
int main(int argc, char **argv) {
+ ABinderProcess_startThreadPool();
gEnv = new BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/tests/benchmark/tests/ExtractorTest.cpp b/media/tests/benchmark/tests/ExtractorTest.cpp
index d14d15b..27ee9ba 100644
--- a/media/tests/benchmark/tests/ExtractorTest.cpp
+++ b/media/tests/benchmark/tests/ExtractorTest.cpp
@@ -19,6 +19,8 @@
#include <gtest/gtest.h>
+#include <android/binder_process.h>
+
#include "BenchmarkTestEnvironment.h"
#include "Extractor.h"
@@ -73,6 +75,7 @@
0)));
int main(int argc, char **argv) {
+ ABinderProcess_startThreadPool();
gEnv = new BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index bfe73d5..c1c7df5 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -34,6 +34,7 @@
"SchedulingPolicyService.cpp",
"ServiceUtilities.cpp",
"TimeCheck.cpp",
+ "TimerThread.cpp",
],
static_libs: [
"libc_malloc_debug_backtrace",
@@ -51,6 +52,7 @@
"libpermission",
"android.hardware.graphics.bufferqueue@1.0",
"android.hidl.token@1.0-utils",
+ "packagemanager_aidl-cpp",
],
export_static_lib_headers: [
"libbatterystats_aidl",
@@ -81,9 +83,48 @@
export_include_dirs: ["include"],
}
+cc_library {
+ name: "libmediautils_vendor",
+ vendor_available: true, // required for platform/hardware/interfaces
+ srcs: [
+ "MemoryLeakTrackUtil.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+ shared_libs: [
+ "liblog",
+ "libutils",
+ ],
+
+ static_libs: [
+ "libc_malloc_debug_backtrace",
+ ],
+
+ header_libs: [
+ "bionic_libc_platform_headers",
+ ],
+
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
+
+
cc_library_headers {
name: "libmediautils_headers",
vendor_available: true, // required for platform/hardware/interfaces
export_include_dirs: ["include"],
}
+
+cc_test {
+ name: "libmediautils_test",
+ srcs: ["TimerThread-test.cpp"],
+ shared_libs: [
+ "libmediautils",
+ "libutils",
+ ]
+}
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 9c7b863..3ee7626 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -45,6 +45,7 @@
static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
static const String16 sModifyPhoneState("android.permission.MODIFY_PHONE_STATE");
static const String16 sModifyAudioRouting("android.permission.MODIFY_AUDIO_ROUTING");
+static const String16 sCallAudioInterception("android.permission.CALL_AUDIO_INTERCEPTION");
static String16 resolveCallingPackage(PermissionController& permissionController,
const std::optional<String16> opPackageName, uid_t uid) {
@@ -71,6 +72,7 @@
switch (source) {
case AUDIO_SOURCE_HOTWORD:
return AppOpsManager::OP_RECORD_AUDIO_HOTWORD;
+ case AUDIO_SOURCE_ECHO_REFERENCE: // fallthrough
case AUDIO_SOURCE_REMOTE_SUBMIX:
return AppOpsManager::OP_RECORD_AUDIO_OUTPUT;
case AUDIO_SOURCE_VOICE_DOWNLINK:
@@ -101,7 +103,11 @@
AttributionSourceState myAttributionSource;
myAttributionSource.uid = VALUE_OR_FATAL(android::legacy2aidl_uid_t_int32_t(getuid()));
myAttributionSource.pid = VALUE_OR_FATAL(android::legacy2aidl_pid_t_int32_t(getpid()));
- myAttributionSource.token = sp<BBinder>::make();
+ if (callerAttributionSource.token != nullptr) {
+ myAttributionSource.token = callerAttributionSource.token;
+ } else {
+ myAttributionSource.token = sp<BBinder>::make();
+ }
myAttributionSource.next.push_back(nextAttributionSource);
return std::optional<AttributionSourceState>{myAttributionSource};
@@ -304,6 +310,17 @@
return ok;
}
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+
+ // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+ bool ok = PermissionCache::checkPermission(sCallAudioInterception, pid, uid);
+ if (!ok) ALOGE("%s(): android.permission.CALL_AUDIO_INTERCEPTION denied for uid %d",
+ __func__, uid);
+ return ok;
+}
+
AttributionSourceState getCallingAttributionSource() {
AttributionSourceState attributionSource = AttributionSourceState();
attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 819e146..2b765cc 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -16,13 +16,25 @@
#define LOG_TAG "TimeCheck"
-#include <utils/Log.h>
-#include <mediautils/TimeCheck.h>
+#include <optional>
+#include <sstream>
+
#include <mediautils/EventLog.h>
+#include <mediautils/TimeCheck.h>
+#include <utils/Log.h>
#include "debuggerd/handler.h"
namespace android {
+namespace {
+
+std::string formatTime(std::chrono::system_clock::time_point t) {
+ auto msSinceEpoch = std::chrono::round<std::chrono::milliseconds>(t.time_since_epoch());
+ return (std::ostringstream() << msSinceEpoch.count()).str();
+}
+
+} // namespace
+
// Audio HAL server pids vector used to generate audio HAL processes tombstone
// when audioserver watchdog triggers.
// We use a lockless storage to avoid potential deadlocks in the context of watchdog
@@ -58,84 +70,39 @@
}
/* static */
-sp<TimeCheck::TimeCheckThread> TimeCheck::getTimeCheckThread()
-{
- static sp<TimeCheck::TimeCheckThread> sTimeCheckThread = new TimeCheck::TimeCheckThread();
+TimerThread* TimeCheck::getTimeCheckThread() {
+ static TimerThread* sTimeCheckThread = new TimerThread();
return sTimeCheckThread;
}
-TimeCheck::TimeCheck(const char *tag, uint32_t timeoutMs)
- : mEndTimeNs(getTimeCheckThread()->startMonitoring(tag, timeoutMs))
-{
-}
+TimeCheck::TimeCheck(const char* tag, uint32_t timeoutMs)
+ : mTimerHandle(getTimeCheckThread()->scheduleTask(
+ [tag, startTime = std::chrono::system_clock::now()] { crash(tag, startTime); },
+ std::chrono::milliseconds(timeoutMs))) {}
TimeCheck::~TimeCheck() {
- getTimeCheckThread()->stopMonitoring(mEndTimeNs);
+ getTimeCheckThread()->cancelTask(mTimerHandle);
}
-TimeCheck::TimeCheckThread::~TimeCheckThread()
-{
- AutoMutex _l(mMutex);
- requestExit();
- mMonitorRequests.clear();
- mCond.signal();
-}
+/* static */
+void TimeCheck::crash(const char* tag, std::chrono::system_clock::time_point startTime) {
+ std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
-nsecs_t TimeCheck::TimeCheckThread::startMonitoring(const char *tag, uint32_t timeoutMs) {
- Mutex::Autolock _l(mMutex);
- nsecs_t endTimeNs = systemTime() + milliseconds(timeoutMs);
- for (; mMonitorRequests.indexOfKey(endTimeNs) >= 0; ++endTimeNs);
- mMonitorRequests.add(endTimeNs, tag);
- mCond.signal();
- return endTimeNs;
-}
-
-void TimeCheck::TimeCheckThread::stopMonitoring(nsecs_t endTimeNs) {
- Mutex::Autolock _l(mMutex);
- mMonitorRequests.removeItem(endTimeNs);
- mCond.signal();
-}
-
-bool TimeCheck::TimeCheckThread::threadLoop()
-{
- status_t status = TIMED_OUT;
- {
- AutoMutex _l(mMutex);
-
- if (exitPending()) {
- return false;
+ // Generate audio HAL processes tombstones and allow time to complete
+ // before forcing restart
+ std::vector<pid_t> pids = getAudioHalPids();
+ if (pids.size() != 0) {
+ for (const auto& pid : pids) {
+ ALOGI("requesting tombstone for pid: %d", pid);
+ sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
}
-
- nsecs_t endTimeNs = INT64_MAX;
- const char *tag = "<unspecified>";
- // KeyedVector mMonitorRequests is ordered so take first entry as next timeout
- if (mMonitorRequests.size() != 0) {
- endTimeNs = mMonitorRequests.keyAt(0);
- tag = mMonitorRequests.valueAt(0);
- }
-
- const nsecs_t waitTimeNs = endTimeNs - systemTime();
- if (waitTimeNs > 0) {
- status = mCond.waitRelative(mMutex, waitTimeNs);
- }
- if (status != NO_ERROR) {
- // Generate audio HAL processes tombstones and allow time to complete
- // before forcing restart
- std::vector<pid_t> pids = getAudioHalPids();
- if (pids.size() != 0) {
- for (const auto& pid : pids) {
- ALOGI("requesting tombstone for pid: %d", pid);
- sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
- }
- sleep(1);
- } else {
- ALOGI("No HAL process pid available, skipping tombstones");
- }
- LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
- LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
- }
+ sleep(1);
+ } else {
+ ALOGI("No HAL process pid available, skipping tombstones");
}
- return true;
+ LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+ LOG_ALWAYS_FATAL("TimeCheck timeout for %s (start=%s, end=%s)", tag,
+ formatTime(startTime).c_str(), formatTime(endTime).c_str());
}
-}; // namespace android
+}; // namespace android
diff --git a/media/utils/TimerThread-test.cpp b/media/utils/TimerThread-test.cpp
new file mode 100644
index 0000000..ee8a811
--- /dev/null
+++ b/media/utils/TimerThread-test.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <chrono>
+#include <thread>
+#include <gtest/gtest.h>
+#include <mediautils/TimerThread.h>
+
+using namespace std::chrono_literals;
+
+namespace android {
+namespace {
+
+constexpr auto kJitter = 10ms;
+
+TEST(TimerThread, Basic) {
+ std::atomic<bool> taskRan = false;
+ TimerThread thread;
+ thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+ std::this_thread::sleep_for(100ms - kJitter);
+ ASSERT_FALSE(taskRan);
+ std::this_thread::sleep_for(2 * kJitter);
+ ASSERT_TRUE(taskRan);
+}
+
+TEST(TimerThread, Cancel) {
+ std::atomic<bool> taskRan = false;
+ TimerThread thread;
+ TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+ std::this_thread::sleep_for(100ms - kJitter);
+ ASSERT_FALSE(taskRan);
+ thread.cancelTask(handle);
+ std::this_thread::sleep_for(2 * kJitter);
+ ASSERT_FALSE(taskRan);
+}
+
+TEST(TimerThread, CancelAfterRun) {
+ std::atomic<bool> taskRan = false;
+ TimerThread thread;
+ TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+ std::this_thread::sleep_for(100ms + kJitter);
+ ASSERT_TRUE(taskRan);
+ thread.cancelTask(handle);
+}
+
+TEST(TimerThread, MultipleTasks) {
+ std::array<std::atomic<bool>, 6> taskRan;
+ TimerThread thread;
+
+ auto startTime = std::chrono::steady_clock::now();
+
+ thread.scheduleTask([&taskRan] { taskRan[0] = true; }, 300ms);
+ thread.scheduleTask([&taskRan] { taskRan[1] = true; }, 100ms);
+ thread.scheduleTask([&taskRan] { taskRan[2] = true; }, 200ms);
+ thread.scheduleTask([&taskRan] { taskRan[3] = true; }, 400ms);
+ auto handle4 = thread.scheduleTask([&taskRan] { taskRan[4] = true; }, 200ms);
+ thread.scheduleTask([&taskRan] { taskRan[5] = true; }, 200ms);
+
+ // Task 1 should trigger around 100ms.
+ std::this_thread::sleep_until(startTime + 100ms - kJitter);
+ ASSERT_FALSE(taskRan[0]);
+ ASSERT_FALSE(taskRan[1]);
+ ASSERT_FALSE(taskRan[2]);
+ ASSERT_FALSE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_FALSE(taskRan[5]);
+
+ std::this_thread::sleep_until(startTime + 100ms + kJitter);
+ ASSERT_FALSE(taskRan[0]);
+ ASSERT_TRUE(taskRan[1]);
+ ASSERT_FALSE(taskRan[2]);
+ ASSERT_FALSE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_FALSE(taskRan[5]);
+
+ // Cancel task 4 before it gets a chance to run.
+ thread.cancelTask(handle4);
+
+ // Tasks 2 and 5 should trigger around 200ms.
+ std::this_thread::sleep_until(startTime + 200ms - kJitter);
+ ASSERT_FALSE(taskRan[0]);
+ ASSERT_TRUE(taskRan[1]);
+ ASSERT_FALSE(taskRan[2]);
+ ASSERT_FALSE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_FALSE(taskRan[5]);
+
+ std::this_thread::sleep_until(startTime + 200ms + kJitter);
+ ASSERT_FALSE(taskRan[0]);
+ ASSERT_TRUE(taskRan[1]);
+ ASSERT_TRUE(taskRan[2]);
+ ASSERT_FALSE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_TRUE(taskRan[5]);
+
+ // Task 0 should trigger around 300ms.
+ std::this_thread::sleep_until(startTime + 300ms - kJitter);
+ ASSERT_FALSE(taskRan[0]);
+ ASSERT_TRUE(taskRan[1]);
+ ASSERT_TRUE(taskRan[2]);
+ ASSERT_FALSE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_TRUE(taskRan[5]);
+
+ std::this_thread::sleep_until(startTime + 300ms + kJitter);
+ ASSERT_TRUE(taskRan[0]);
+ ASSERT_TRUE(taskRan[1]);
+ ASSERT_TRUE(taskRan[2]);
+ ASSERT_FALSE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_TRUE(taskRan[5]);
+
+ // Task 3 should trigger around 400ms.
+ std::this_thread::sleep_until(startTime + 400ms - kJitter);
+ ASSERT_TRUE(taskRan[0]);
+ ASSERT_TRUE(taskRan[1]);
+ ASSERT_TRUE(taskRan[2]);
+ ASSERT_FALSE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_TRUE(taskRan[5]);
+
+ std::this_thread::sleep_until(startTime + 400ms + kJitter);
+ ASSERT_TRUE(taskRan[0]);
+ ASSERT_TRUE(taskRan[1]);
+ ASSERT_TRUE(taskRan[2]);
+ ASSERT_TRUE(taskRan[3]);
+ ASSERT_FALSE(taskRan[4]);
+ ASSERT_TRUE(taskRan[5]);
+}
+
+
+} // namespace
+} // namespace android
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
new file mode 100644
index 0000000..3c95798
--- /dev/null
+++ b/media/utils/TimerThread.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TimerThread"
+
+#include <optional>
+
+#include <mediautils/TimerThread.h>
+#include <utils/ThreadDefs.h>
+
+namespace android {
+
+TimerThread::TimerThread() : mThread([this] { threadFunc(); }) {
+ pthread_setname_np(mThread.native_handle(), "TimeCheckThread");
+ pthread_setschedprio(mThread.native_handle(), PRIORITY_URGENT_AUDIO);
+}
+
+TimerThread::~TimerThread() {
+ {
+ std::lock_guard _l(mMutex);
+ mShouldExit = true;
+ mCond.notify_all();
+ }
+ mThread.join();
+}
+
+TimerThread::Handle TimerThread::scheduleTaskAtDeadline(std::function<void()>&& func,
+ TimePoint deadline) {
+ std::lock_guard _l(mMutex);
+
+ // To avoid key collisions, advance by 1 tick until the key is unique.
+ for (; mMonitorRequests.find(deadline) != mMonitorRequests.end();
+ deadline += TimePoint::duration(1))
+ ;
+ mMonitorRequests.emplace(deadline, std::move(func));
+ mCond.notify_all();
+ return deadline;
+}
+
+void TimerThread::cancelTask(Handle handle) {
+ std::lock_guard _l(mMutex);
+ mMonitorRequests.erase(handle);
+}
+
+void TimerThread::threadFunc() {
+ std::unique_lock _l(mMutex);
+
+ while (!mShouldExit) {
+ if (!mMonitorRequests.empty()) {
+ TimePoint nextDeadline = mMonitorRequests.begin()->first;
+ if (nextDeadline < std::chrono::steady_clock::now()) {
+ // Deadline expired.
+ mMonitorRequests.begin()->second();
+ mMonitorRequests.erase(mMonitorRequests.begin());
+ }
+ mCond.wait_until(_l, nextDeadline);
+ } else {
+ mCond.wait(_l);
+ }
+ }
+}
+
+} // namespace android
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index c1698dc..c4dc24f 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -18,6 +18,7 @@
"libutils",
"libbinder",
"framework-permission-aidl-cpp",
+ "packagemanager_aidl-cpp",
],
cflags: [
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 6e52512..51e8d7a 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -17,7 +17,7 @@
#include <fcntl.h>
#include <functional>
-#include <type_traits>
+#include <type_traits>
#include <android/content/AttributionSourceState.h>
#include "fuzzer/FuzzedDataProvider.h"
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 734313c..2fe2451 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -104,6 +104,7 @@
bool dumpAllowed();
bool modifyPhoneStateAllowed(const AttributionSourceState& attributionSource);
bool bypassInterruptionPolicyAllowed(const AttributionSourceState& attributionSource);
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource);
void purgePermissionCache();
int32_t getOpForSource(audio_source_t source);
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index 5ba6d7c..0d6e80d 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -14,62 +14,33 @@
* limitations under the License.
*/
+#pragma once
-#ifndef ANDROID_TIME_CHECK_H
-#define ANDROID_TIME_CHECK_H
-
-#include <utils/KeyedVector.h>
-#include <utils/Thread.h>
#include <vector>
+#include <mediautils/TimerThread.h>
+
namespace android {
// A class monitoring execution time for a code block (scoped variable) and causing an assert
// if it exceeds a certain time
class TimeCheck {
-public:
-
+ public:
// The default timeout is chosen to be less than system server watchdog timeout
static constexpr uint32_t kDefaultTimeOutMs = 5000;
- TimeCheck(const char *tag, uint32_t timeoutMs = kDefaultTimeOutMs);
- ~TimeCheck();
- static void setAudioHalPids(const std::vector<pid_t>& pids);
- static std::vector<pid_t> getAudioHalPids();
+ TimeCheck(const char* tag, uint32_t timeoutMs = kDefaultTimeOutMs);
+ ~TimeCheck();
+ static void setAudioHalPids(const std::vector<pid_t>& pids);
+ static std::vector<pid_t> getAudioHalPids();
-private:
-
- class TimeCheckThread : public Thread {
- public:
-
- TimeCheckThread() {}
- virtual ~TimeCheckThread() override;
-
- nsecs_t startMonitoring(const char *tag, uint32_t timeoutMs);
- void stopMonitoring(nsecs_t endTimeNs);
-
- private:
-
- // RefBase
- virtual void onFirstRef() override { run("TimeCheckThread", PRIORITY_URGENT_AUDIO); }
-
- // Thread
- virtual bool threadLoop() override;
-
- Condition mCond;
- Mutex mMutex;
- // using the end time in ns as key is OK given the risk is low that two entries
- // are added in such a way that <add time> + <timeout> are the same for both.
- KeyedVector< nsecs_t, const char*> mMonitorRequests;
- };
-
- static sp<TimeCheckThread> getTimeCheckThread();
+ private:
+ static TimerThread* getTimeCheckThread();
static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
+ static void crash(const char* tag, std::chrono::system_clock::time_point startTime);
- const nsecs_t mEndTimeNs;
+ const TimerThread::Handle mTimerHandle;
};
-}; // namespace android
-
-#endif // ANDROID_TIME_CHECK_H
+}; // namespace android
diff --git a/media/utils/include/mediautils/TimerThread.h b/media/utils/include/mediautils/TimerThread.h
new file mode 100644
index 0000000..cf457b8
--- /dev/null
+++ b/media/utils/include/mediautils/TimerThread.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <functional>
+#include <map>
+#include <mutex>
+#include <thread>
+
+#include <android-base/thread_annotations.h>
+
+namespace android {
+
+/**
+ * A thread for deferred execution of tasks, with cancellation.
+ */
+class TimerThread {
+ public:
+ using Handle = std::chrono::steady_clock::time_point;
+
+ TimerThread();
+ ~TimerThread();
+
+ /**
+ * Schedule a task to be executed in the future (`timeout` duration from now).
+ * Returns a handle that can be used for cancellation.
+ */
+ template <typename R, typename P>
+ Handle scheduleTask(std::function<void()>&& func, std::chrono::duration<R, P> timeout) {
+ auto deadline = std::chrono::steady_clock::now() + std::chrono::milliseconds(timeout);
+ return scheduleTaskAtDeadline(std::move(func), deadline);
+ }
+
+ /**
+ * Cancel a task, previously scheduled with scheduleTask().
+ * If the task has already executed, this is a no-op.
+ */
+ void cancelTask(Handle handle);
+
+ private:
+ using TimePoint = std::chrono::steady_clock::time_point;
+
+ std::condition_variable mCond;
+ std::mutex mMutex;
+ std::thread mThread;
+ std::map<TimePoint, std::function<void()>> mMonitorRequests GUARDED_BY(mMutex);
+ bool mShouldExit GUARDED_BY(mMutex) = false;
+
+ void threadFunc();
+ Handle scheduleTaskAtDeadline(std::function<void()>&& func, TimePoint deadline);
+};
+
+} // namespace android
diff --git a/services/OWNERS b/services/OWNERS
index f0b5e2f..17e605d 100644
--- a/services/OWNERS
+++ b/services/OWNERS
@@ -1,9 +1,6 @@
-chz@google.com
elaurent@google.com
essick@google.com
etalvala@google.com
-gkasten@google.com
hunga@google.com
-marcone@google.com
nchalko@google.com
quxiangfang@google.com
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index b91f302..763c070 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -41,6 +41,7 @@
"FastThreadState.cpp",
"NBAIO_Tee.cpp",
"PatchPanel.cpp",
+ "PropertyUtils.cpp",
"SpdifStreamOut.cpp",
"StateQueue.cpp",
"Threads.cpp",
@@ -54,6 +55,7 @@
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioflinger-aidl-cpp",
"audioclient-types-aidl-cpp",
"av-types-aidl-cpp",
@@ -81,6 +83,7 @@
"libmedia_helper",
"libshmemcompat",
"libvibrator",
+ "packagemanager_aidl-cpp",
],
static_libs: [
@@ -90,6 +93,7 @@
],
header_libs: [
+ "libaaudio_headers",
"libaudioclient_headers",
"libaudiohal_headers",
"libmedia_headers",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 65a163f..ec414e0 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -57,6 +57,7 @@
#include "AudioFlinger.h"
#include "NBAIO_Tee.h"
+#include "PropertyUtils.h"
#include <media/AudioResamplerPublic.h>
@@ -64,6 +65,7 @@
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_aec.h>
#include <system/audio_effects/effect_hapticgenerator.h>
+#include <system/audio_effects/effect_spatializer.h>
#include <audio_utils/primitives.h>
@@ -102,7 +104,11 @@
namespace android {
+#define MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION 7.0
+
using media::IEffectClient;
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
using android::content::AttributionSourceState;
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
@@ -300,6 +306,11 @@
mDevicesFactoryHalCallback = new DevicesFactoryHalCallbackImpl;
mDevicesFactoryHal->setCallbackOnce(mDevicesFactoryHalCallback);
+
+ if (mDevicesFactoryHal->getHalVersion() <= MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+ mAAudioBurstsPerBuffer = getAAudioMixerBurstCountFromSystemProperty();
+ mAAudioHwBurstMinMicros = getAAudioHardwareBurstMinUsecFromSystemProperty();
+ }
}
status_t AudioFlinger::setAudioHalPids(const std::vector<pid_t>& pids) {
@@ -335,12 +346,50 @@
return NO_ERROR;
}
-// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
-const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
- if (mAudioVibratorInfos.empty()) {
- return nullptr;
+status_t AudioFlinger::getMmapPolicyInfos(
+ AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+ Mutex::Autolock _l(mLock);
+ if (const auto it = mPolicyInfos.find(policyType); it != mPolicyInfos.end()) {
+ *policyInfos = it->second;
+ return NO_ERROR;
}
- return &mAudioVibratorInfos.front();
+ if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+ AutoMutex lock(mHardwareLock);
+ for (size_t i = 0; i < mAudioHwDevs.size(); ++i) {
+ AudioHwDevice *dev = mAudioHwDevs.valueAt(i);
+ std::vector<AudioMMapPolicyInfo> infos;
+ status_t status = dev->getMmapPolicyInfos(policyType, &infos);
+ if (status != NO_ERROR) {
+ ALOGE("Failed to query mmap policy info of %d, error %d",
+ mAudioHwDevs.keyAt(i), status);
+ continue;
+ }
+ policyInfos->insert(policyInfos->end(), infos.begin(), infos.end());
+ }
+ mPolicyInfos[policyType] = *policyInfos;
+ } else {
+ getMmapPolicyInfosFromSystemProperty(policyType, policyInfos);
+ mPolicyInfos[policyType] = *policyInfos;
+ }
+ return NO_ERROR;
+}
+
+int32_t AudioFlinger::getAAudioMixerBurstCount() {
+ Mutex::Autolock _l(mLock);
+ return mAAudioBurstsPerBuffer;
+}
+
+int32_t AudioFlinger::getAAudioHardwareBurstMinUsec() {
+ Mutex::Autolock _l(mLock);
+ return mAAudioHwBurstMinMicros;
+}
+
+// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
+std::optional<media::AudioVibratorInfo> AudioFlinger::getDefaultVibratorInfo_l() {
+ if (mAudioVibratorInfos.empty()) {
+ return {};
+ }
+ return mAudioVibratorInfos.front();
}
AudioFlinger::~AudioFlinger()
@@ -566,10 +615,12 @@
String8 result;
result.append("Clients:\n");
+ result.append(" pid heap_size\n");
for (size_t i = 0; i < mClients.size(); ++i) {
sp<Client> client = mClients.valueAt(i).promote();
if (client != 0) {
- result.appendFormat(" pid: %d\n", client->pid());
+ result.appendFormat("%6d %12zu\n", client->pid(),
+ client->heap()->getMemoryHeap()->getSize());
}
}
@@ -695,7 +746,7 @@
// dump all hardware devs
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
- dev->dump(fd);
+ dev->dump(fd, args);
}
mPatchPanel.dump(fd);
@@ -997,8 +1048,9 @@
}
}
}
-
- setAudioHwSyncForSession_l(thread, sessionId);
+ if ((output.flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) == AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
+ setAudioHwSyncForSession_l(thread, sessionId);
+ }
}
if (lStatus != NO_ERROR) {
@@ -1873,13 +1925,13 @@
}
}
-void AudioFlinger::ioConfigChanged(audio_io_config_event event,
+void AudioFlinger::ioConfigChanged(audio_io_config_event_t event,
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid) {
+ media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
+ legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(event));
media::AudioIoDescriptor descAidl = VALUE_OR_FATAL(
legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(ioDesc));
- media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
- legacy2aidl_audio_io_config_event_AudioIoConfigEvent(event));
Mutex::Autolock _l(mClientLock);
size_t size = mNotificationClients.size();
@@ -2142,6 +2194,20 @@
goto Exit;
}
+ if (recordTrack->isFastTrack()) {
+ output.serverConfig = {
+ thread->sampleRate(),
+ thread->channelMask(),
+ thread->format()
+ };
+ } else {
+ output.serverConfig = {
+ recordTrack->sampleRate(),
+ recordTrack->channelMask(),
+ recordTrack->format()
+ };
+ }
+
// Check if one effect chain was awaiting for an AudioRecord to be created on this
// session and move it to this thread.
sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
@@ -2272,6 +2338,17 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
+ if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+ if (int32_t mixerBursts = dev->getAAudioMixerBurstCount();
+ mixerBursts > mAAudioBurstsPerBuffer) {
+ mAAudioBurstsPerBuffer = mixerBursts;
+ }
+ if (int32_t hwBurstMinMicros = dev->getAAudioHardwareBurstMinUsec();
+ hwBurstMinMicros < mAAudioHwBurstMinMicros || mAAudioHwBurstMinMicros == 0) {
+ mAAudioHwBurstMinMicros = hwBurstMinMicros;
+ }
+ }
+
mAudioHwDevs.add(handle, audioDevice);
ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);
@@ -2455,6 +2532,10 @@
ThreadBase *thread = (ThreadBase *)mRecordThreads.valueAt(i).get();
thread->systemReady();
}
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ ThreadBase *thread = (ThreadBase *)mMmapThreads.valueAt(i).get();
+ thread->systemReady();
+ }
return NO_ERROR;
}
@@ -2501,7 +2582,8 @@
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig __unused,
audio_devices_t deviceType,
const String8& address,
audio_output_flags_t flags)
@@ -2529,16 +2611,16 @@
// Check only for Normal Mixing mode
if (kEnableExtendedPrecision) {
// Specify format (uncomment one below to choose)
- //config->format = AUDIO_FORMAT_PCM_FLOAT;
- //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
- //config->format = AUDIO_FORMAT_PCM_32_BIT;
- //config->format = AUDIO_FORMAT_PCM_8_24_BIT;
- // ALOGV("openOutput_l() upgrading format to %#08x", config->format);
+ //halConfig->format = AUDIO_FORMAT_PCM_FLOAT;
+ //halConfig->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ //halConfig->format = AUDIO_FORMAT_PCM_32_BIT;
+ //halConfig->format = AUDIO_FORMAT_PCM_8_24_BIT;
+ // ALOGV("openOutput_l() upgrading format to %#08x", halConfig->format);
}
if (kEnableExtendedChannels) {
// Specify channel mask (uncomment one below to choose)
- //config->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch
- //config->channel_mask = audio_channel_mask_from_representation_and_bits(
+ //halConfig->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch
+ //halConfig->channel_mask = audio_channel_mask_from_representation_and_bits(
// AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1); // another 4ch example
}
}
@@ -2549,7 +2631,7 @@
*output,
deviceType,
flags,
- config,
+ halConfig,
address.string());
mHardwareStatus = AUDIO_HW_IDLE;
@@ -2564,13 +2646,25 @@
return thread;
} else {
sp<PlaybackThread> thread;
- if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ if (flags == (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST
+ | AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
+#ifdef MULTICHANNEL_EFFECT_CHAIN
+ thread = new SpatializerThread(this, outputStream, *output,
+ mSystemReady, mixerConfig);
+ ALOGD("openOutput_l() created spatializer output: ID %d thread %p",
+ *output, thread.get());
+#else
+ ALOGE("openOutput_l() cannot create spatializer thread "
+ "without #define MULTICHANNEL_EFFECT_CHAIN");
+#endif
+ } else if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread = new OffloadThread(this, outputStream, *output, mSystemReady);
ALOGV("openOutput_l() created offload output: ID %d thread %p",
*output, thread.get());
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
- || !isValidPcmSinkFormat(config->format)
- || !isValidPcmSinkChannelMask(config->channel_mask)) {
+ || !isValidPcmSinkFormat(halConfig->format)
+ || !isValidPcmSinkChannelMask(halConfig->channel_mask)) {
thread = new DirectOutputThread(this, outputStream, *output, mSystemReady);
ALOGV("openOutput_l() created direct output: ID %d thread %p",
*output, thread.get());
@@ -2597,8 +2691,10 @@
{
audio_module_handle_t module = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_module_handle_t(request.module));
- audio_config_t config = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioConfig_audio_config_t(request.config));
+ audio_config_t halConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(request.halConfig, false /*isInput*/));
+ audio_config_base_t mixerConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(request.mixerConfig, false/*isInput*/));
sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
aidl2legacy_DeviceDescriptorBase(request.device));
audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
@@ -2611,9 +2707,9 @@
"Channels %#x, flags %#x",
this, module,
device->toString().c_str(),
- config.sample_rate,
- config.format,
- config.channel_mask,
+ halConfig.sample_rate,
+ halConfig.format,
+ halConfig.channel_mask,
flags);
audio_devices_t deviceType = device->type();
@@ -2625,7 +2721,8 @@
Mutex::Autolock _l(mLock);
- sp<ThreadBase> thread = openOutput_l(module, &output, &config, deviceType, address, flags);
+ sp<ThreadBase> thread = openOutput_l(module, &output, &halConfig,
+ &mixerConfig, deviceType, address, flags);
if (thread != 0) {
if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
@@ -2650,7 +2747,8 @@
mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
}
response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
- response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ response->config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(halConfig, false /*isInput*/));
response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
response->flags = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
@@ -2736,9 +2834,7 @@
mMmapThreads.removeItem(output);
ALOGD("closing mmapThread %p", mmapThread.get());
}
- const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
- ioDesc->mIoHandle = output;
- ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+ ioConfigChanged(AUDIO_OUTPUT_CLOSED, sp<AudioIoDescriptor>::make(output));
mPatchPanel.notifyStreamClosed(output);
}
// The thread entity (active unit of execution) is no longer running here,
@@ -2811,16 +2907,16 @@
{
Mutex::Autolock _l(mLock);
- if (request.device.type == AUDIO_DEVICE_NONE) {
+ AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceTypeAddress(request.device));
+ if (device.mType == AUDIO_DEVICE_NONE) {
return BAD_VALUE;
}
audio_io_handle_t input = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_io_handle_t(request.input));
audio_config_t config = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioConfig_audio_config_t(request.config));
- AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioDeviceTypeAddress(request.device));
+ aidl2legacy_AudioConfig_audio_config_t(request.config, true /*isInput*/));
sp<ThreadBase> thread = openInput_l(
VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_module_handle_t(request.module)),
@@ -2828,13 +2924,14 @@
&config,
device.mType,
device.address().c_str(),
- VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSourceType_audio_source_t(request.source)),
+ VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSource_audio_source_t(request.source)),
VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_input_flags_t_mask(request.flags)),
AUDIO_DEVICE_NONE,
String8{});
response->input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
- response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ response->config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
response->device = request.device;
if (thread != 0) {
@@ -2996,9 +3093,7 @@
dumpToThreadLog_l(mmapThread);
mMmapThreads.removeItem(input);
}
- const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
- ioDesc->mIoHandle = input;
- ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
+ ioConfigChanged(AUDIO_INPUT_CLOSED, sp<AudioIoDescriptor>::make(input));
}
// FIXME: calling thread->exit() without mLock held should not be needed anymore now that
// we have a different lock for notification client
@@ -3729,6 +3824,15 @@
goto Exit;
}
+ // Only audio policy service can create a spatializer effect
+ if ((memcmp(&descOut.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0) &&
+ (callingUid != AID_AUDIOSERVER || currentPid != getpid())) {
+ ALOGW("%s: attempt to create a spatializer effect from uid/pid %d/%d",
+ __func__, callingUid, currentPid);
+ lStatus = PERMISSION_DENIED;
+ goto Exit;
+ }
+
if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
// if the output returned by getOutputForEffect() is removed before we lock the
// mutex below, the call to checkPlaybackThread_l(io) below will detect it
@@ -3744,7 +3848,7 @@
ALOGV("%s device type %#x address %s", __func__, device.mType, device.getAddress());
handle = mDeviceEffectManager.createEffect_l(
&descOut, device, client, effectClient, mPatchPanel.patches_l(),
- &enabledOut, &lStatus, probe);
+ &enabledOut, &lStatus, probe, request.notifyFramesProcessed);
if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
// remove local strong reference to Client with mClientLock held
Mutex::Autolock _cl(mClientLock);
@@ -3797,7 +3901,8 @@
io = mPlaybackThreads.keyAt(0);
}
ALOGV("createEffect() got io %d for effect %s", io, descOut.name);
- } else if (checkPlaybackThread_l(io) != nullptr) {
+ } else if (checkPlaybackThread_l(io) != nullptr
+ && sessionId != AUDIO_SESSION_OUTPUT_STAGE) {
// allow only one effect chain per sessionId on mPlaybackThreads.
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
const audio_io_handle_t checkIo = mPlaybackThreads.keyAt(i);
@@ -3863,7 +3968,8 @@
}
}
handle = thread->createEffect_l(client, effectClient, priority, sessionId,
- &descOut, &enabledOut, &lStatus, pinned, probe);
+ &descOut, &enabledOut, &lStatus, pinned, probe,
+ request.notifyFramesProcessed);
if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
// remove local strong reference to Client with mClientLock held
Mutex::Autolock _cl(mClientLock);
@@ -4178,6 +4284,7 @@
case TransactionCode::LIST_AUDIO_PATCHES:
case TransactionCode::SET_AUDIO_PORT_CONFIG:
case TransactionCode::SET_RECORD_SILENCED:
+ case TransactionCode::AUDIO_POLICY_READY:
ALOGW("%s: transaction %d received from PID %d",
__func__, code, IPCThreadState::self()->getCallingPid());
// return status only for non void methods
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index fff61f8..8c546cc 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -269,6 +269,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
virtual status_t systemReady();
+ virtual status_t audioPolicyReady() { mAudioPolicyReady.store(true); return NO_ERROR; }
+ bool isAudioPolicyReady() const { return mAudioPolicyReady.load(); }
+
virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
@@ -279,6 +282,14 @@
virtual status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs);
+ virtual status_t getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+ virtual int32_t getAAudioMixerBurstCount();
+
+ virtual int32_t getAAudioHardwareBurstMinUsec();
+
status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
const std::function<status_t()>& delegate) override;
@@ -309,7 +320,7 @@
void updateDownStreamPatches_l(const struct audio_patch *patch,
const std::set<audio_io_handle_t> streams);
- const media::AudioVibratorInfo* getDefaultVibratorInfo_l();
+ std::optional<media::AudioVibratorInfo> getDefaultVibratorInfo_l();
private:
// FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
@@ -735,7 +746,8 @@
const String8& outputDeviceAddress);
sp<ThreadBase> openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
audio_devices_t deviceType,
const String8& address,
audio_output_flags_t flags);
@@ -746,7 +758,7 @@
// no range check, AudioFlinger::mLock held
bool streamMute_l(audio_stream_type_t stream) const
{ return mStreamTypes[stream].mute; }
- void ioConfigChanged(audio_io_config_event event,
+ void ioConfigChanged(audio_io_config_event_t event,
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid = 0);
@@ -986,6 +998,7 @@
DeviceEffectManager mDeviceEffectManager;
bool mSystemReady;
+ std::atomic_bool mAudioPolicyReady{};
mediautils::UidInfo mUidInfo;
@@ -999,6 +1012,11 @@
// Keep in sync with java definition in media/java/android/media/AudioRecord.java
static constexpr int32_t kMaxSharedAudioHistoryMs = 5000;
+
+ std::map<media::audio::common::AudioMMapPolicyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo>> mPolicyInfos;
+ int32_t mAAudioBurstsPerBuffer = 0;
+ int32_t mAAudioHwBurstMinMicros = 0;
};
#undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 16b25f6..dee6161 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -29,6 +29,9 @@
namespace android {
+using media::audio::common::AudioMMapPolicyInfo;
+using media::audio::common::AudioMMapPolicyType;
+
// ----------------------------------------------------------------------------
status_t AudioHwDevice::openOutputStream(
@@ -102,5 +105,18 @@
return mHwDevice->getAudioPort(port);
}
+status_t AudioHwDevice::getMmapPolicyInfos(
+ AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) const {
+ return mHwDevice->getMmapPolicyInfos(policyType, policyInfos);
+}
+
+int32_t AudioHwDevice::getAAudioMixerBurstCount() const {
+ return mHwDevice->getAAudioMixerBurstCount();
+}
+
+int32_t AudioHwDevice::getAAudioHardwareBurstMinUsec() const {
+ return mHwDevice->getAAudioHardwareBurstMinUsec();
+}
+
}; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index fc2c693..8c5d239 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -22,6 +22,8 @@
#include <stdlib.h>
#include <sys/types.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+#include <android/media/audio/common/AudioMMapPolicyType.h>
#include <media/audiohal/DeviceHalInterface.h>
#include <utils/Errors.h>
#include <system/audio.h>
@@ -85,6 +87,14 @@
status_t getAudioPort(struct audio_port_v7 *port) const;
+ status_t getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) const;
+
+ int32_t getAAudioMixerBurstCount() const;
+
+ int32_t getAAudioHardwareBurstMinUsec() const;
+
private:
const audio_module_handle_t mHandle;
const char * const mModuleName;
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index cecd52b..53ac5cb 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -77,7 +77,8 @@
const std::map<audio_patch_handle_t, PatchPanel::Patch>& patches,
int *enabled,
status_t *status,
- bool probe) {
+ bool probe,
+ bool notifyFramesProcessed) {
sp<DeviceEffectProxy> effect;
sp<EffectHandle> handle;
status_t lStatus;
@@ -95,10 +96,12 @@
effect = iter->second;
} else {
effect = new DeviceEffectProxy(device, mMyCallback,
- descriptor, mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT));
+ descriptor, mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT),
+ notifyFramesProcessed);
}
// create effect handle and connect it to effect module
- handle = new EffectHandle(effect, client, effectClient, 0 /*priority*/);
+ handle = new EffectHandle(effect, client, effectClient, 0 /*priority*/,
+ notifyFramesProcessed);
lStatus = handle->initCheck();
if (lStatus == NO_ERROR) {
lStatus = effect->addHandle(handle.get());
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index a05f5fe..d2faa70 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -37,7 +37,8 @@
const std::map<audio_patch_handle_t, PatchPanel::Patch>& patches,
int *enabled,
status_t *status,
- bool probe);
+ bool probe,
+ bool notifyFramesProcessed);
void createAudioPatch(audio_patch_handle_t handle, const PatchPanel::Patch& patch);
void releaseAudioPatch(audio_patch_handle_t handle);
@@ -161,10 +162,16 @@
bool isOffload() const override { return false; }
bool isOffloadOrDirect() const override { return false; }
bool isOffloadOrMmap() const override { return false; }
+ bool isSpatializer() const override { return false; }
uint32_t sampleRate() const override { return 0; }
- audio_channel_mask_t channelMask() const override { return AUDIO_CHANNEL_NONE; }
- uint32_t channelCount() const override { return 0; }
+ audio_channel_mask_t inChannelMask(int id __unused) const override {
+ return AUDIO_CHANNEL_NONE;
+ }
+ uint32_t inChannelCount(int id __unused) const override { return 0; }
+ audio_channel_mask_t outChannelMask() const override { return AUDIO_CHANNEL_NONE; }
+ uint32_t outChannelCount() const override { return 0; }
+
audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
@@ -190,6 +197,10 @@
wp<EffectChain> chain() const override { return nullptr; }
+ bool isAudioPolicyReady() const override {
+ return mManager.audioFlinger().isAudioPolicyReady();
+ }
+
int newEffectId() { return mManager.audioFlinger().nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT); }
status_t addEffectToHal(audio_port_handle_t deviceId,
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index b267d88..ca7ffdb 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -24,9 +24,11 @@
#include "Configuration.h"
#include <utils/Log.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_downmix.h>
#include <system/audio_effects/effect_dynamicsprocessing.h>
#include <system/audio_effects/effect_hapticgenerator.h>
#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_spatializer.h>
#include <system/audio_effects/effect_visualizer.h>
#include <audio_utils/channels.h>
#include <audio_utils/primitives.h>
@@ -242,6 +244,12 @@
{
Mutex::Autolock _l(mLock);
+
+ if ((isInternal_l() && !mPolicyRegistered)
+ || !getCallback()->isAudioPolicyReady()) {
+ return NO_ERROR;
+ }
+
// register effect when first handle is attached and unregister when last handle is removed
if (mPolicyRegistered != mHandles.size() > 0) {
doRegister = true;
@@ -642,6 +650,13 @@
mState = IDLE;
}
break;
+ case ACTIVE:
+ for (size_t i = 0; i < mHandles.size(); i++) {
+ if (!mHandles[i]->disconnected()) {
+ mHandles[i]->framesProcessed(mConfig.inputCfg.buffer.frameCount);
+ }
+ }
+ break;
default: //IDLE , ACTIVE, DESTROYED
break;
}
@@ -875,9 +890,9 @@
// similar to output EFFECT_FLAG_TYPE_INSERT/REPLACE,
// in which case input channel masks should be used here.
callback = getCallback();
- channelMask = callback->channelMask();
+ channelMask = callback->inChannelMask(mId);
mConfig.inputCfg.channels = channelMask;
- mConfig.outputCfg.channels = channelMask;
+ mConfig.outputCfg.channels = callback->outChannelMask();
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_MONO) {
@@ -940,11 +955,7 @@
// Auxiliary effect:
// accumulates in output buffer: input buffer != output buffer
// Therefore: accumulate <=> input buffer != output buffer
- if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
- mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
- } else {
- mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
- }
+ mConfig.outputCfg.accessMode = requiredEffectBufferAccessMode();
mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.inputCfg.buffer.frameCount = callback->frameCount();
@@ -1600,7 +1611,7 @@
return status;
}
-status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo)
+status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo)
{
if (mStatus != NO_ERROR) {
return mStatus;
@@ -1610,15 +1621,17 @@
return INVALID_OPERATION;
}
+ const size_t paramCount = 3;
std::vector<uint8_t> request(
- sizeof(effect_param_t) + sizeof(int32_t) + 2 * sizeof(float));
+ sizeof(effect_param_t) + sizeof(int32_t) + paramCount * sizeof(float));
effect_param_t *param = (effect_param_t*) request.data();
param->psize = sizeof(int32_t);
- param->vsize = 2 * sizeof(float);
+ param->vsize = paramCount * sizeof(float);
*(int32_t*)param->data = HG_PARAM_VIBRATOR_INFO;
float* vibratorInfoPtr = reinterpret_cast<float*>(param->data + sizeof(int32_t));
- vibratorInfoPtr[0] = vibratorInfo->resonantFrequency;
- vibratorInfoPtr[1] = vibratorInfo->qFactor;
+ vibratorInfoPtr[0] = vibratorInfo.resonantFrequency;
+ vibratorInfoPtr[1] = vibratorInfo.qFactor;
+ vibratorInfoPtr[2] = vibratorInfo.maxAmplitude;
std::vector<uint8_t> response;
status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
if (status == NO_ERROR) {
@@ -1708,10 +1721,11 @@
AudioFlinger::EffectHandle::EffectHandle(const sp<EffectBase>& effect,
const sp<AudioFlinger::Client>& client,
const sp<media::IEffectClient>& effectClient,
- int32_t priority)
+ int32_t priority, bool notifyFramesProcessed)
: BnEffect(),
mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
- mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false)
+ mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false),
+ mNotifyFramesProcessed(notifyFramesProcessed)
{
ALOGV("constructor %p client %p", this, client.get());
@@ -2020,6 +2034,13 @@
}
}
+void AudioFlinger::EffectHandle::framesProcessed(int32_t frames) const
+{
+ if (mEffectClient != 0 && mNotifyFramesProcessed) {
+ mEffectClient->framesProcessed(frames);
+ }
+}
+
void AudioFlinger::EffectHandle::dumpToBuffer(char* buffer, size_t size)
{
bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
@@ -2048,11 +2069,11 @@
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX),
mEffectCallback(new EffectCallback(wp<EffectChain>(this), thread))
{
- mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
sp<ThreadBase> p = thread.promote();
if (p == nullptr) {
return;
}
+ mStrategy = p->getStrategyForStream(AUDIO_STREAM_MUSIC);
mMaxTailBuffers = ((kProcessTailDurationMs * p->sampleRate()) / 1000) /
p->frameCount();
}
@@ -2125,8 +2146,8 @@
if (mInBuffer == NULL) {
return;
}
- const size_t frameSize =
- audio_bytes_per_sample(EFFECT_BUFFER_FORMAT) * mEffectCallback->channelCount();
+ const size_t frameSize = audio_bytes_per_sample(EFFECT_BUFFER_FORMAT)
+ * mEffectCallback->inChannelCount(mEffects[0]->id());
memset(mInBuffer->audioBuffer()->raw, 0, mEffectCallback->frameCount() * frameSize);
mInBuffer->commit();
@@ -2212,11 +2233,9 @@
// addEffect_l() must be called with ThreadBase::mLock and EffectChain::mLock held
status_t AudioFlinger::EffectChain::addEffect_ll(const sp<EffectModule>& effect)
{
- effect_descriptor_t desc = effect->desc();
- uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
-
effect->setCallback(mEffectCallback);
+ effect_descriptor_t desc = effect->desc();
if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
// Auxiliary effects are inserted at the beginning of mEffects vector as
// they are processed first and accumulated in chain input buffer
@@ -2236,97 +2255,139 @@
numSamples * sizeof(int32_t), &halBuffer);
#endif
if (result != OK) return result;
+
+ effect->configure();
+
effect->setInBuffer(halBuffer);
// auxiliary effects output samples to chain input buffer for further processing
// by insert effects
effect->setOutBuffer(mInBuffer);
} else {
- // Insert effects are inserted at the end of mEffects vector as they are processed
- // after track and auxiliary effects.
- // Insert effect order as a function of indicated preference:
- // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
- // another effect is present
- // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
- // last effect claiming first position
- // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
- // first effect claiming last position
- // else if EFFECT_FLAG_INSERT_ANY insert after first or before last
- // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
- // already present
-
- size_t size = mEffects.size();
- size_t idx_insert = size;
- ssize_t idx_insert_first = -1;
- ssize_t idx_insert_last = -1;
-
- for (size_t i = 0; i < size; i++) {
- effect_descriptor_t d = mEffects[i]->desc();
- uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
- uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
- if (iMode == EFFECT_FLAG_TYPE_INSERT) {
- // check invalid effect chaining combinations
- if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
- iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
- ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s",
- desc.name, d.name);
- return INVALID_OPERATION;
- }
- // remember position of first insert effect and by default
- // select this as insert position for new effect
- if (idx_insert == size) {
- idx_insert = i;
- }
- // remember position of last insert effect claiming
- // first position
- if (iPref == EFFECT_FLAG_INSERT_FIRST) {
- idx_insert_first = i;
- }
- // remember position of first insert effect claiming
- // last position
- if (iPref == EFFECT_FLAG_INSERT_LAST &&
- idx_insert_last == -1) {
- idx_insert_last = i;
- }
- }
+ ssize_t idx_insert = getInsertIndex(desc);
+ if (idx_insert < 0) {
+ return INVALID_OPERATION;
}
- // modify idx_insert from first position if needed
- if (insertPref == EFFECT_FLAG_INSERT_LAST) {
- if (idx_insert_last != -1) {
- idx_insert = idx_insert_last;
- } else {
- idx_insert = size;
- }
- } else {
- if (idx_insert_first != -1) {
- idx_insert = idx_insert_first + 1;
- }
- }
-
- // always read samples from chain input buffer
- effect->setInBuffer(mInBuffer);
-
- // if last effect in the chain, output samples to chain
- // output buffer, otherwise to chain input buffer
- if (idx_insert == size) {
- if (idx_insert != 0) {
- mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
- mEffects[idx_insert-1]->configure();
- }
- effect->setOutBuffer(mOutBuffer);
- } else {
- effect->setOutBuffer(mInBuffer);
- }
+ size_t previousSize = mEffects.size();
mEffects.insertAt(effect, idx_insert);
- ALOGV("addEffect_l() effect %p, added in chain %p at rank %zu", effect.get(), this,
- idx_insert);
+ effect->configure();
+
+ // - By default:
+ // All effects read samples from chain input buffer.
+ // The last effect in the chain, writes samples to chain output buffer,
+ // otherwise to chain input buffer
+ // - In the OUTPUT_STAGE chain of a spatializer mixer thread:
+ // The spatializer effect (first effect) reads samples from the input buffer
+ // and writes samples to the output buffer.
+ // All other effects read and writes samples to the output buffer
+ if (mEffectCallback->isSpatializer()
+ && mSessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ effect->setOutBuffer(mOutBuffer);
+ if (idx_insert == 0) {
+ if (previousSize != 0) {
+ mEffects[1]->configure();
+ mEffects[1]->setInBuffer(mOutBuffer);
+ mEffects[1]->updateAccessMode(); // reconfig if neeeded.
+ }
+ effect->setInBuffer(mInBuffer);
+ } else {
+ effect->setInBuffer(mOutBuffer);
+ }
+ } else {
+ effect->setInBuffer(mInBuffer);
+ if (idx_insert == previousSize) {
+ if (idx_insert != 0) {
+ mEffects[idx_insert-1]->configure();
+ mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
+ mEffects[idx_insert - 1]->updateAccessMode(); // reconfig if neeeded.
+ }
+ effect->setOutBuffer(mOutBuffer);
+ } else {
+ effect->setOutBuffer(mInBuffer);
+ }
+ }
+ ALOGV("%s effect %p, added in chain %p at rank %zu",
+ __func__, effect.get(), this, idx_insert);
}
effect->configure();
return NO_ERROR;
}
+ssize_t AudioFlinger::EffectChain::getInsertIndex(const effect_descriptor_t& desc) {
+ // Insert effects are inserted at the end of mEffects vector as they are processed
+ // after track and auxiliary effects.
+ // Insert effect order as a function of indicated preference:
+ // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
+ // another effect is present
+ // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
+ // last effect claiming first position
+ // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
+ // first effect claiming last position
+ // else if EFFECT_FLAG_INSERT_ANY insert after first or before last
+ // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
+ // already present
+ // Spatializer or Downmixer effects are inserted in first position because
+ // they adapt the channel count for all other effects in the chain
+ if ((memcmp(&desc.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0)
+ || (memcmp(&desc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0)) {
+ return 0;
+ }
+
+ size_t size = mEffects.size();
+ uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
+ ssize_t idx_insert;
+ ssize_t idx_insert_first = -1;
+ ssize_t idx_insert_last = -1;
+
+ idx_insert = size;
+ for (size_t i = 0; i < size; i++) {
+ effect_descriptor_t d = mEffects[i]->desc();
+ uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
+ uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
+ if (iMode == EFFECT_FLAG_TYPE_INSERT) {
+ // check invalid effect chaining combinations
+ if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
+ iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
+ ALOGW("%s could not insert effect %s: exclusive conflict with %s",
+ __func__, desc.name, d.name);
+ return -1;
+ }
+ // remember position of first insert effect and by default
+ // select this as insert position for new effect
+ if (idx_insert == size) {
+ idx_insert = i;
+ }
+ // remember position of last insert effect claiming
+ // first position
+ if (iPref == EFFECT_FLAG_INSERT_FIRST) {
+ idx_insert_first = i;
+ }
+ // remember position of first insert effect claiming
+ // last position
+ if (iPref == EFFECT_FLAG_INSERT_LAST &&
+ idx_insert_last == -1) {
+ idx_insert_last = i;
+ }
+ }
+ }
+
+ // modify idx_insert from first position if needed
+ if (insertPref == EFFECT_FLAG_INSERT_LAST) {
+ if (idx_insert_last != -1) {
+ idx_insert = idx_insert_last;
+ } else {
+ idx_insert = size;
+ }
+ } else {
+ if (idx_insert_first != -1) {
+ idx_insert = idx_insert_first + 1;
+ }
+ }
+ return idx_insert;
+}
+
// removeEffect_l() must be called with ThreadBase::mLock held
size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect,
bool release)
@@ -2350,14 +2411,21 @@
if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
if (i == size - 1 && i != 0) {
- mEffects[i - 1]->setOutBuffer(mOutBuffer);
mEffects[i - 1]->configure();
+ mEffects[i - 1]->setOutBuffer(mOutBuffer);
}
}
mEffects.removeAt(i);
+
+ // make sure the input buffer configuration for the new first effect in the chain
+ // is updated if needed (can switch from HAL channel mask to mixer channel mask)
+ if (i == 0 && size > 1) {
+ mEffects[0]->configure();
+ mEffects[0]->setInBuffer(mInBuffer);
+ }
+
ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
this, i);
-
break;
}
}
@@ -2901,27 +2969,26 @@
}
bool AudioFlinger::EffectChain::EffectCallback::isOffload() const {
- sp<ThreadBase> t = thread().promote();
- if (t == nullptr) {
- return false;
- }
- return t->type() == ThreadBase::OFFLOAD;
+ return mThreadType == ThreadBase::OFFLOAD;
}
bool AudioFlinger::EffectChain::EffectCallback::isOffloadOrDirect() const {
- sp<ThreadBase> t = thread().promote();
- if (t == nullptr) {
- return false;
- }
- return t->type() == ThreadBase::OFFLOAD || t->type() == ThreadBase::DIRECT;
+ return mThreadType == ThreadBase::OFFLOAD || mThreadType == ThreadBase::DIRECT;
}
bool AudioFlinger::EffectChain::EffectCallback::isOffloadOrMmap() const {
- sp<ThreadBase> t = thread().promote();
- if (t == nullptr) {
+ switch (mThreadType) {
+ case ThreadBase::OFFLOAD:
+ case ThreadBase::MMAP_PLAYBACK:
+ case ThreadBase::MMAP_CAPTURE:
+ return true;
+ default:
return false;
}
- return t->isOffloadOrMmap();
+}
+
+bool AudioFlinger::EffectChain::EffectCallback::isSpatializer() const {
+ return mThreadType == ThreadBase::SPATIALIZER;
}
uint32_t AudioFlinger::EffectChain::EffectCallback::sampleRate() const {
@@ -2932,20 +2999,68 @@
return t->sampleRate();
}
-audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::inChannelMask(int id) const {
sp<ThreadBase> t = thread().promote();
if (t == nullptr) {
return AUDIO_CHANNEL_NONE;
}
- return t->channelMask();
+ sp<EffectChain> c = chain().promote();
+ if (c == nullptr) {
+ return AUDIO_CHANNEL_NONE;
+ }
+
+ if (mThreadType == ThreadBase::SPATIALIZER) {
+ if (c->sessionId() == AUDIO_SESSION_OUTPUT_STAGE) {
+ if (c->isFirstEffect(id)) {
+ return t->mixerChannelMask();
+ } else {
+ return t->channelMask();
+ }
+ } else if (!audio_is_global_session(c->sessionId())) {
+ if ((t->hasAudioSession_l(c->sessionId()) & ThreadBase::SPATIALIZED_SESSION) != 0) {
+ return t->mixerChannelMask();
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
}
-uint32_t AudioFlinger::EffectChain::EffectCallback::channelCount() const {
+uint32_t AudioFlinger::EffectChain::EffectCallback::inChannelCount(int id) const {
+ return audio_channel_count_from_out_mask(inChannelMask(id));
+}
+
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::outChannelMask() const {
sp<ThreadBase> t = thread().promote();
if (t == nullptr) {
- return 0;
+ return AUDIO_CHANNEL_NONE;
}
- return t->channelCount();
+ sp<EffectChain> c = chain().promote();
+ if (c == nullptr) {
+ return AUDIO_CHANNEL_NONE;
+ }
+
+ if (mThreadType == ThreadBase::SPATIALIZER) {
+ if (!audio_is_global_session(c->sessionId())) {
+ if ((t->hasAudioSession_l(c->sessionId()) & ThreadBase::SPATIALIZED_SESSION) != 0) {
+ return t->mixerChannelMask();
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
+}
+
+uint32_t AudioFlinger::EffectChain::EffectCallback::outChannelCount() const {
+ return audio_channel_count_from_out_mask(outChannelMask());
}
audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::hapticChannelMask() const {
@@ -3143,7 +3258,10 @@
} else {
mHalEffect->setDevices({mDevice});
}
- *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/);
+ mHalEffect->configure();
+
+ *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
+ mNotifyFramesProcessed);
status = (*handle)->initCheck();
if (status == OK) {
status = mHalEffect->addHandle((*handle).get());
@@ -3169,7 +3287,8 @@
int enabled;
*handle = thread->createEffect_l(nullptr, nullptr, 0, AUDIO_SESSION_DEVICE,
const_cast<effect_descriptor_t *>(&mDescriptor),
- &enabled, &status, false, false /*probe*/);
+ &enabled, &status, false, false /*probe*/,
+ mNotifyFramesProcessed);
ALOGV("%s thread->createEffect_l status %d", __func__, status);
} else {
status = BAD_VALUE;
@@ -3189,8 +3308,14 @@
}
void AudioFlinger::DeviceEffectProxy::onReleasePatch(audio_patch_handle_t patchHandle) {
- Mutex::Autolock _l(mProxyLock);
- mEffectHandles.erase(patchHandle);
+ sp<EffectHandle> effect;
+ {
+ Mutex::Autolock _l(mProxyLock);
+ if (mEffectHandles.find(patchHandle) != mEffectHandles.end()) {
+ effect = mEffectHandles.at(patchHandle);
+ mEffectHandles.erase(patchHandle);
+ }
+ }
}
@@ -3198,6 +3323,7 @@
{
Mutex::Autolock _l(mProxyLock);
if (effect == mHalEffect) {
+ mHalEffect->release_l();
mHalEffect.clear();
mDevicePort.id = AUDIO_PORT_HANDLE_NONE;
}
@@ -3345,7 +3471,7 @@
if (proxy == nullptr) {
return NO_INIT;
}
- return proxy->addEffectToHal(effect);
+ return proxy->removeEffectFromHal(effect);
}
bool AudioFlinger::DeviceEffectProxy::ProxyCallback::isOutput() const {
@@ -3364,7 +3490,8 @@
return proxy->sampleRate();
}
-audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelMask(
+ int id __unused) const {
sp<DeviceEffectProxy> proxy = mProxy.promote();
if (proxy == nullptr) {
return AUDIO_CHANNEL_OUT_STEREO;
@@ -3372,7 +3499,7 @@
return proxy->channelMask();
}
-uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelCount() const {
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelCount(int id __unused) const {
sp<DeviceEffectProxy> proxy = mProxy.promote();
if (proxy == nullptr) {
return 2;
@@ -3380,4 +3507,38 @@
return proxy->channelCount();
}
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelMask() const {
+ sp<DeviceEffectProxy> proxy = mProxy.promote();
+ if (proxy == nullptr) {
+ return AUDIO_CHANNEL_OUT_STEREO;
+ }
+ return proxy->channelMask();
+}
+
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelCount() const {
+ sp<DeviceEffectProxy> proxy = mProxy.promote();
+ if (proxy == nullptr) {
+ return 2;
+ }
+ return proxy->channelCount();
+}
+
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectEnable(
+ const sp<EffectBase>& effectBase) {
+ sp<EffectModule> effect = effectBase->asEffectModule();
+ if (effect == nullptr) {
+ return;
+ }
+ effect->start();
+}
+
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectDisable(
+ const sp<EffectBase>& effectBase) {
+ sp<EffectModule> effect = effectBase->asEffectModule();
+ if (effect == nullptr) {
+ return;
+ }
+ effect->stop();
+}
+
} // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index a727e04..e2bea67 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -33,9 +33,12 @@
virtual bool isOffload() const = 0;
virtual bool isOffloadOrDirect() const = 0;
virtual bool isOffloadOrMmap() const = 0;
+ virtual bool isSpatializer() const = 0;
virtual uint32_t sampleRate() const = 0;
- virtual audio_channel_mask_t channelMask() const = 0;
- virtual uint32_t channelCount() const = 0;
+ virtual audio_channel_mask_t inChannelMask(int id) const = 0;
+ virtual uint32_t inChannelCount(int id) const = 0;
+ virtual audio_channel_mask_t outChannelMask() const = 0;
+ virtual uint32_t outChannelCount() const = 0;
virtual audio_channel_mask_t hapticChannelMask() const = 0;
virtual size_t frameCount() const = 0;
@@ -64,6 +67,8 @@
virtual void resetVolume() = 0;
virtual wp<EffectChain> chain() const = 0;
+
+ virtual bool isAudioPolicyReady() const = 0;
};
// EffectBase(EffectModule) and EffectChain classes both have their own mutex to protect
@@ -164,6 +169,16 @@
void dump(int fd, const Vector<String16>& args);
+protected:
+ bool isInternal_l() const {
+ for (auto handle : mHandles) {
+ if (handle->client() != nullptr) {
+ return false;
+ }
+ }
+ return true;
+ }
+
private:
friend class AudioFlinger; // for mHandles
bool mPinned = false;
@@ -240,6 +255,13 @@
return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
}
+ // Updates the access mode if it is out of date. May issue a new effect configure.
+ void updateAccessMode() {
+ if (requiredEffectBufferAccessMode() != mConfig.outputCfg.accessMode) {
+ configure();
+ }
+ }
+
status_t setDevices(const AudioDeviceTypeAddrVector &devices);
status_t setInputDevice(const AudioDeviceTypeAddr &device);
status_t setVolume(uint32_t *left, uint32_t *right, bool controller);
@@ -259,7 +281,7 @@
bool isHapticGenerator() const;
status_t setHapticIntensity(int id, int intensity);
- status_t setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo);
+ status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo);
void dump(int fd, const Vector<String16>& args);
@@ -275,6 +297,11 @@
status_t stop_l();
status_t removeEffectFromHal_l();
status_t sendSetAudioDevicesCommand(const AudioDeviceTypeAddrVector &devices, uint32_t cmdCode);
+ effect_buffer_access_e requiredEffectBufferAccessMode() const {
+ return mConfig.inputCfg.buffer.raw == mConfig.outputCfg.buffer.raw
+ ? EFFECT_BUFFER_ACCESS_WRITE : EFFECT_BUFFER_ACCESS_ACCUMULATE;
+ }
+
effect_config_t mConfig; // input and output audio configuration
sp<EffectHalInterface> mEffectInterface; // Effect module HAL
@@ -327,7 +354,7 @@
EffectHandle(const sp<EffectBase>& effect,
const sp<AudioFlinger::Client>& client,
const sp<media::IEffectClient>& effectClient,
- int32_t priority);
+ int32_t priority, bool notifyFramesProcessed);
virtual ~EffectHandle();
virtual status_t initCheck();
@@ -342,6 +369,8 @@
android::binder::Status disconnect() override;
android::binder::Status getCblk(media::SharedFileRegion* _aidl_return) override;
+ sp<Client> client() const { return mClient; }
+
private:
void disconnect(bool unpinIfLast);
@@ -356,6 +385,8 @@
void setEnabled(bool enabled);
bool enabled() const { return mEnabled; }
+ void framesProcessed(int32_t frames) const;
+
// Getters
wp<EffectBase> effect() const { return mEffect; }
int id() const {
@@ -389,6 +420,8 @@
bool mEnabled; // cached enable state: needed when the effect is
// restored after being suspended
bool mDisconnected; // Set to true by disconnect()
+ const bool mNotifyFramesProcessed; // true if the client callback event
+ // EVENT_FRAMES_PROCESSED must be generated
};
// the EffectChain class represents a group of effects associated to one audio session.
@@ -511,6 +544,8 @@
sp<EffectCallbackInterface> effectCallback() const { return mEffectCallback; }
wp<ThreadBase> thread() const { return mEffectCallback->thread(); }
+ bool isFirstEffect(int id) const { return !mEffects.isEmpty() && id == mEffects[0]->id(); }
+
void dump(int fd, const Vector<String16>& args);
private:
@@ -530,6 +565,12 @@
: mChain(owner)
, mThread(thread)
, mAudioFlinger(*gAudioFlinger) {
+ sp<ThreadBase> base = thread.promote();
+ if (base != nullptr) {
+ mThreadType = base->type();
+ } else {
+ mThreadType = ThreadBase::MIXER; // assure a consistent value.
+ }
}
status_t createEffectHal(const effect_uuid_t *pEffectUuid,
@@ -542,10 +583,13 @@
bool isOffload() const override;
bool isOffloadOrDirect() const override;
bool isOffloadOrMmap() const override;
+ bool isSpatializer() const override;
uint32_t sampleRate() const override;
- audio_channel_mask_t channelMask() const override;
- uint32_t channelCount() const override;
+ audio_channel_mask_t inChannelMask(int id) const override;
+ uint32_t inChannelCount(int id) const override;
+ audio_channel_mask_t outChannelMask() const override;
+ uint32_t outChannelCount() const override;
audio_channel_mask_t hapticChannelMask() const override;
size_t frameCount() const override;
uint32_t latency() const override;
@@ -566,16 +610,22 @@
wp<EffectChain> chain() const override { return mChain; }
+ bool isAudioPolicyReady() const override {
+ return mAudioFlinger.isAudioPolicyReady();
+ }
+
wp<ThreadBase> thread() const { return mThread.load(); }
- void setThread(const wp<ThreadBase>& thread) {
+ void setThread(const sp<ThreadBase>& thread) {
mThread = thread;
+ mThreadType = thread->type();
}
private:
const wp<EffectChain> mChain;
mediautils::atomic_wp<ThreadBase> mThread;
AudioFlinger &mAudioFlinger; // implementation detail: outer instance always exists.
+ ThreadBase::type_t mThreadType;
};
friend class AudioFlinger; // for mThread, mEffects
@@ -612,6 +662,8 @@
void setVolumeForOutput_l(uint32_t left, uint32_t right);
+ ssize_t getInsertIndex(const effect_descriptor_t& desc);
+
mutable Mutex mLock; // mutex protecting effect list
Vector< sp<EffectModule> > mEffects; // list of effect modules
audio_session_t mSessionId; // audio session ID
@@ -643,11 +695,11 @@
public:
DeviceEffectProxy (const AudioDeviceTypeAddr& device,
const sp<DeviceEffectManagerCallback>& callback,
- effect_descriptor_t *desc, int id)
+ effect_descriptor_t *desc, int id, bool notifyFramesProcessed)
: EffectBase(callback, desc, id, AUDIO_SESSION_DEVICE, false),
mDevice(device), mManagerCallback(callback),
- mMyCallback(new ProxyCallback(wp<DeviceEffectProxy>(this),
- callback)) {}
+ mMyCallback(new ProxyCallback(wp<DeviceEffectProxy>(this), callback)),
+ mNotifyFramesProcessed(notifyFramesProcessed) {}
status_t setEnabled(bool enabled, bool fromHandle) override;
sp<DeviceEffectProxy> asDeviceEffectProxy() override { return this; }
@@ -692,10 +744,13 @@
bool isOffload() const override { return false; }
bool isOffloadOrDirect() const override { return false; }
bool isOffloadOrMmap() const override { return false; }
+ bool isSpatializer() const override { return false; }
uint32_t sampleRate() const override;
- audio_channel_mask_t channelMask() const override;
- uint32_t channelCount() const override;
+ audio_channel_mask_t inChannelMask(int id) const override;
+ uint32_t inChannelCount(int id) const override;
+ audio_channel_mask_t outChannelMask() const override;
+ uint32_t outChannelCount() const override;
audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
@@ -711,11 +766,15 @@
void resetVolume() override {}
product_strategy_t strategy() const override { return static_cast<product_strategy_t>(0); }
int32_t activeTrackCnt() const override { return 0; }
- void onEffectEnable(const sp<EffectBase>& effect __unused) override {}
- void onEffectDisable(const sp<EffectBase>& effect __unused) override {}
+ void onEffectEnable(const sp<EffectBase>& effect __unused) override;
+ void onEffectDisable(const sp<EffectBase>& effect __unused) override;
wp<EffectChain> chain() const override { return nullptr; }
+ bool isAudioPolicyReady() const override {
+ return mManagerCallback->isAudioPolicyReady();
+ }
+
int newEffectId();
private:
@@ -734,4 +793,5 @@
std::map<audio_patch_handle_t, sp<EffectHandle>> mEffectHandles; // protected by mProxyLock
sp<EffectModule> mHalEffect; // protected by mProxyLock
struct audio_port_config mDevicePort = { .id = AUDIO_PORT_HANDLE_NONE };
+ const bool mNotifyFramesProcessed;
};
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 88d4eaf..26bd92d 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -89,6 +89,7 @@
// TODO: Add channel mask to NBAIO_Format.
// We assume that the channel mask must be a valid positional channel mask.
mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
+ mBalance.setChannelMask(mSinkChannelMask);
unsigned i;
for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
@@ -204,6 +205,8 @@
(void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
(void *)(uintptr_t)fastTrack->mHapticIntensity);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_MAX_AMPLITUDE,
+ (void *)(&(fastTrack->mHapticMaxAmplitude)));
mMixer->enable(index);
break;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 857d3de..ce3cc14 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_FAST_MIXER_STATE_H
#define ANDROID_AUDIO_FAST_MIXER_STATE_H
+#include <math.h>
+
#include <audio_utils/minifloat.h>
#include <system/audio.h>
#include <media/AudioMixer.h>
@@ -51,6 +53,7 @@
int mGeneration; // increment when any field is assigned
bool mHapticPlaybackEnabled = false; // haptic playback is enabled or not
os::HapticScale mHapticIntensity = os::HapticScale::MUTE; // intensity of haptic data
+ float mHapticMaxAmplitude = NAN; // max amplitude allowed for haptic data
};
// Represents a single state of the fast mixer
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index a381c7d..93118b8 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -258,6 +258,7 @@
reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
} else {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
@@ -276,6 +277,7 @@
patch->sinks[0].ext.device.hw_module,
&output,
&config,
+ &mixerConfig,
outputDevice,
outputDeviceAddress,
flags);
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 0929055..3cce998 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -19,6 +19,8 @@
#error This header file should only be included from AudioFlinger.h
#endif
+#include <math.h>
+
// Checks and monitors OP_PLAY_AUDIO
class OpPlayAudioMonitor : public RefBase {
public:
@@ -161,6 +163,8 @@
}
/** Return at what intensity to play haptics, used in mixer. */
os::HapticScale getHapticIntensity() const { return mHapticIntensity; }
+ /** Return the maximum amplitude allowed for haptics data, used in mixer. */
+ float getHapticMaxAmplitude() const { return mHapticMaxAmplitude; }
/** Set intensity of haptic playback, should be set after querying vibrator service. */
void setHapticIntensity(os::HapticScale hapticIntensity) {
if (os::isValidHapticScale(hapticIntensity)) {
@@ -168,6 +172,12 @@
setHapticPlaybackEnabled(mHapticIntensity != os::HapticScale::MUTE);
}
}
+ /** Set maximum amplitude allowed for haptic data, should be set after querying
+ * vibrator service.
+ */
+ void setHapticMaxAmplitude(float maxAmplitude) {
+ mHapticMaxAmplitude = maxAmplitude;
+ }
sp<os::ExternalVibration> getExternalVibration() const { return mExternalVibration; }
void setTeePatches(TeePatches teePatches);
@@ -185,6 +195,7 @@
audio_output_flags_t getOutputFlags() const { return mFlags; }
float getSpeed() const { return mSpeed; }
+
protected:
// for numerous
friend class PlaybackThread;
@@ -282,6 +293,8 @@
bool mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
// intensity to play haptic data
os::HapticScale mHapticIntensity = os::HapticScale::MUTE;
+ // max amplitude allowed for haptic data
+ float mHapticMaxAmplitude = NAN;
class AudioVibrationController : public os::BnExternalVibrationController {
public:
explicit AudioVibrationController(Track* track) : mTrack(track) {}
diff --git a/services/audioflinger/PropertyUtils.cpp b/services/audioflinger/PropertyUtils.cpp
new file mode 100644
index 0000000..65e2533
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+#include <android/media/audio/common/AudioMMapPolicy.h>
+#include <cutils/properties.h>
+
+#include "PropertyUtils.h"
+
+namespace android {
+
+using media::audio::common::AudioMMapPolicy;
+using media::audio::common::AudioMMapPolicyType;
+using media::audio::common::AudioMMapPolicyInfo;
+
+std::string getMmapPolicyProperty(AudioMMapPolicyType policyType) {
+ switch (policyType) {
+ case AudioMMapPolicyType::DEFAULT:
+ return "aaudio.mmap_policy";
+ case AudioMMapPolicyType::EXCLUSIVE:
+ return "aaudio.mmap_exclusive_policy";
+ default:
+ return "";
+ }
+}
+
+int getDefaultPolicyFromType(AudioMMapPolicyType policyType) {
+ switch (policyType) {
+ case AudioMMapPolicyType::EXCLUSIVE:
+ return AAUDIO_UNSPECIFIED;
+ case AudioMMapPolicyType::DEFAULT:
+ default:
+ return AAUDIO_POLICY_NEVER;
+ }
+}
+
+AudioMMapPolicy legacy2aidl_aaudio_policy_t_AudioMMapPolicy(aaudio_policy_t legacy) {
+ switch (legacy) {
+ case AAUDIO_POLICY_NEVER:
+ return AudioMMapPolicy::NEVER;
+ case AAUDIO_POLICY_AUTO:
+ return AudioMMapPolicy::AUTO;
+ case AAUDIO_POLICY_ALWAYS:
+ return AudioMMapPolicy::ALWAYS;
+ case AAUDIO_UNSPECIFIED:
+ return AudioMMapPolicy::UNSPECIFIED;
+ default:
+ ALOGE("%s unknown aaudio policy: %d", __func__, legacy);
+ return AudioMMapPolicy::UNSPECIFIED;
+ }
+}
+
+status_t getMmapPolicyInfosFromSystemProperty(
+ AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
+ AudioMMapPolicyInfo policyInfo;
+ const std::string propertyStr = getMmapPolicyProperty(policyType);
+ if (propertyStr.empty()) {
+ return BAD_VALUE;
+ }
+ policyInfo.mmapPolicy = legacy2aidl_aaudio_policy_t_AudioMMapPolicy(
+ property_get_int32(propertyStr.c_str(), getDefaultPolicyFromType(policyType)));
+ policyInfos->push_back(policyInfo);
+ return NO_ERROR;
+}
+
+int32_t getAAudioMixerBurstCountFromSystemProperty() {
+ static const int32_t sDefaultBursts = 2; // arbitrary, use 2 for double buffered
+ static const int32_t sMaxBursts = 1024; // arbitrary
+ static const char* sPropMixerBursts = "aaudio.mixer_bursts";
+ int32_t prop = property_get_int32(sPropMixerBursts, sDefaultBursts);
+ if (prop <= 0 || prop > sMaxBursts) {
+ ALOGE("%s: invalid value %d, use default %d", __func__, prop, sDefaultBursts);
+ prop = sDefaultBursts;
+ }
+ return prop;
+}
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty() {
+ static const int32_t sDefaultMicros = 1000; // arbitrary
+ static const int32_t sMaxMicros = 1000 * 1000; // arbitrary
+ static const char* sPropHwBurstMinUsec = "aaudio.hw_burst_min_usec";
+ int32_t prop = property_get_int32(sPropHwBurstMinUsec, sDefaultMicros);
+ if (prop <= 0 || prop > sMaxMicros) {
+ ALOGE("%s invalid value %d, use default %d", __func__, prop, sDefaultMicros);
+ prop = sDefaultMicros;
+ }
+ return prop;
+}
+
+} // namespace android
diff --git a/services/audioflinger/PropertyUtils.h b/services/audioflinger/PropertyUtils.h
new file mode 100644
index 0000000..fbf651a
--- /dev/null
+++ b/services/audioflinger/PropertyUtils.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/media/audio/common/AudioMMapPolicyType.h>
+#include <android/media/audio/common/AudioMMapPolicyInfo.h>
+
+namespace android {
+
+status_t getMmapPolicyInfosFromSystemProperty(
+ media::audio::common::AudioMMapPolicyType policyType,
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos);
+
+int32_t getAAudioMixerBurstCountFromSystemProperty();
+
+int32_t getAAudioHardwareBurstMinUsecFromSystemProperty();
+
+} // namespace android
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b9cdab8..43fa781 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -50,8 +50,10 @@
#include <audio_utils/format.h>
#include <audio_utils/minifloat.h>
#include <audio_utils/safe_math.h>
-#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_downmix.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_spatializer.h>
#include <system/audio.h>
// NBAIO implementations
@@ -507,6 +509,8 @@
return "MMAP_PLAYBACK";
case MMAP_CAPTURE:
return "MMAP_CAPTURE";
+ case SPATIALIZER:
+ return "SPATIALIZER";
default:
return "unknown";
}
@@ -622,7 +626,7 @@
return status;
}
-void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId)
{
Mutex::Autolock _l(mLock);
@@ -630,7 +634,7 @@
}
// sendIoConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId)
{
// The audio statistics history is exponentially weighted to forget events
@@ -640,6 +644,7 @@
mIoJitterMs.reset();
mLatencyMs.reset();
mProcessTimeMs.reset();
+ mMonopipePipeDepthStats.reset();
mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid, portId);
@@ -722,6 +727,19 @@
sendConfigEvent_l(configEvent);
}
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent()
+{
+ Mutex::Autolock _l(mLock);
+ sendCheckOutputStageEffectsEvent_l();
+}
+
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent_l()
+{
+ sp<ConfigEvent> configEvent =
+ (ConfigEvent *)new CheckOutputStageEffectsEvent();
+ sendConfigEvent_l(configEvent);
+}
+
// post condition: mConfigEvents.isEmpty()
void AudioFlinger::ThreadBase::processConfigEvents_l()
{
@@ -784,6 +802,11 @@
(ResizeBufferConfigEventData *)event->mData.get();
resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
} break;
+
+ case CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS: {
+ setCheckOutputStageEffects();
+ } break;
+
default:
ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
break;
@@ -966,6 +989,12 @@
isOutput() ? "write" : "read",
mLatencyMs.toString().c_str());
}
+
+ if (mMonopipePipeDepthStats.getN() > 0) {
+ dprintf(fd, " Monopipe %s pipe depth stats: %s\n",
+ isOutput() ? "write" : "read",
+ mMonopipePipeDepthStats.toString().c_str());
+ }
}
void AudioFlinger::ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
@@ -1008,6 +1037,8 @@
return String16("MmapPlayback");
case MMAP_CAPTURE:
return String16("MmapCapture");
+ case SPATIALIZER:
+ return String16("AudioSpatial");
default:
ALOG_ASSERT(false);
return String16("AudioUnknown");
@@ -1296,8 +1327,8 @@
{
// no preprocessing on playback threads
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
- ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback"
- " thread %s", desc->name, mThreadName);
+ ALOGW("%s: pre processing effect %s created on playback"
+ " thread %s", __func__, desc->name, mThreadName);
return BAD_VALUE;
}
@@ -1312,14 +1343,21 @@
return BAD_VALUE;
}
+ if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+ && mType != SPATIALIZER) {
+ ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
+ __func__, mType);
+ return BAD_VALUE;
+ }
+
switch (mType) {
case MIXER: {
#ifndef MULTICHANNEL_EFFECT_CHAIN
// Reject any effect on mixer multichannel sinks.
// TODO: fix both format and multichannel issues with effects.
if (mChannelCount != FCC_2) {
- ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d) on MIXER"
- " thread %s", desc->name, mChannelCount, mThreadName);
+ ALOGW("%s: effect %s for multichannel(%d) on MIXER thread %s",
+ __func__, desc->name, mChannelCount, mThreadName);
return BAD_VALUE;
}
#endif
@@ -1333,15 +1371,15 @@
} else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
// only post processing on output stage session
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
- " on output stage session", desc->name);
+ ALOGW("%s: non post processing effect %s not allowed on output stage session",
+ __func__, desc->name);
return BAD_VALUE;
}
} else if (sessionId == AUDIO_SESSION_DEVICE) {
// only post processing on output stage session
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
- " on device session", desc->name);
+ ALOGW("%s: non post processing effect %s not allowed on device session",
+ __func__, desc->name);
return BAD_VALUE;
}
} else {
@@ -1352,13 +1390,12 @@
}
if (flags & AUDIO_OUTPUT_FLAG_RAW) {
- ALOGW("checkEffectCompatibility_l(): effect %s on playback thread in raw mode",
- desc->name);
+ ALOGW("%s: effect %s on playback thread in raw mode", __func__, desc->name);
return BAD_VALUE;
}
if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
- ALOGW("checkEffectCompatibility_l(): non HW effect %s on playback thread"
- " in fast mode", desc->name);
+ ALOGW("%s: non HW effect %s on playback thread in fast mode",
+ __func__, desc->name);
return BAD_VALUE;
}
}
@@ -1372,35 +1409,64 @@
case DIRECT:
// Reject any effect on Direct output threads for now, since the format of
// mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
- ALOGW("checkEffectCompatibility_l(): effect %s on DIRECT output thread %s",
- desc->name, mThreadName);
+ ALOGW("%s: effect %s on DIRECT output thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
case DUPLICATING:
#ifndef MULTICHANNEL_EFFECT_CHAIN
// Reject any effect on mixer multichannel sinks.
// TODO: fix both format and multichannel issues with effects.
if (mChannelCount != FCC_2) {
- ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d)"
- " on DUPLICATING thread %s", desc->name, mChannelCount, mThreadName);
+ ALOGW("%s: effect %s for multichannel(%d) on DUPLICATING thread %s",
+ __func__, desc->name, mChannelCount, mThreadName);
return BAD_VALUE;
}
#endif
if (audio_is_global_session(sessionId)) {
- ALOGW("checkEffectCompatibility_l(): global effect %s on DUPLICATING"
- " thread %s", desc->name, mThreadName);
+ ALOGW("%s: global effect %s on DUPLICATING thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
}
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGW("checkEffectCompatibility_l(): post processing effect %s on"
- " DUPLICATING thread %s", desc->name, mThreadName);
+ ALOGW("%s: post processing effect %s on DUPLICATING thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
}
if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
- ALOGW("checkEffectCompatibility_l(): HW tunneled effect %s on"
- " DUPLICATING thread %s", desc->name, mThreadName);
+ ALOGW("%s: HW tunneled effect %s on DUPLICATING thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
}
break;
+ case SPATIALIZER:
+ // Global effects (AUDIO_SESSION_OUTPUT_MIX) are not supported on spatializer mixer
+ // as there is no common accumulation buffer for sptialized and non sptialized tracks.
+ // Post processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE)
+ // are supported and added after the spatializer.
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+ ALOGW("%s: global effect %s not supported on spatializer thread %s",
+ __func__, desc->name, mThreadName);
+ return BAD_VALUE;
+ } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ // only post processing , downmixer or spatializer effects on output stage session
+ if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+ || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+ break;
+ }
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+ ALOGW("%s: non post processing effect %s not allowed on output stage session",
+ __func__, desc->name);
+ return BAD_VALUE;
+ }
+ } else if (sessionId == AUDIO_SESSION_DEVICE) {
+ // only post processing on output stage session
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+ ALOGW("%s: non post processing effect %s not allowed on device session",
+ __func__, desc->name);
+ return BAD_VALUE;
+ }
+ }
+ break;
default:
LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
}
@@ -1418,7 +1484,8 @@
int *enabled,
status_t *status,
bool pinned,
- bool probe)
+ bool probe,
+ bool notifyFramesProcessed)
{
sp<EffectModule> effect;
sp<EffectHandle> handle;
@@ -1477,18 +1544,19 @@
if (effect->isHapticGenerator()) {
// TODO(b/184194057): Use the vibrator information from the vibrator that will be used
// for the HapticGenerator.
- const media::AudioVibratorInfo* defaultVibratorInfo =
- mAudioFlinger->getDefaultVibratorInfo_l();
- if (defaultVibratorInfo != nullptr) {
+ const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
+ std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+ if (defaultVibratorInfo) {
// Only set the vibrator info when it is a valid one.
- effect->setVibratorInfo(defaultVibratorInfo);
+ effect->setVibratorInfo(*defaultVibratorInfo);
}
}
// create effect handle and connect it to effect module
- handle = new EffectHandle(effect, client, effectClient, priority);
+ handle = new EffectHandle(effect, client, effectClient, priority, notifyFramesProcessed);
lStatus = handle->initCheck();
if (lStatus == OK) {
lStatus = effect->addHandle(handle.get());
+ sendCheckOutputStageEffectsEvent_l();
}
if (enabled != NULL) {
*enabled = (int)effect->isEnabled();
@@ -1531,6 +1599,7 @@
if (remove) {
removeEffect_l(effect, true);
}
+ sendCheckOutputStageEffectsEvent_l();
}
if (remove) {
mAudioFlinger->updateOrphanEffectChains(effect);
@@ -1884,10 +1953,24 @@
item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
}
+ if (mMonopipePipeDepthStats.getN() > 0) {
+ item->setDouble(MM_PREFIX "monopipePipeDepthStats.mean",
+ mMonopipePipeDepthStats.getMean());
+ item->setDouble(MM_PREFIX "monopipePipeDepthStats.std",
+ mMonopipePipeDepthStats.getStdDev());
+ }
item->selfrecord();
}
+product_strategy_t AudioFlinger::ThreadBase::getStrategyForStream(audio_stream_type_t stream) const
+{
+ if (!mAudioFlinger->isAudioPolicyReady()) {
+ return PRODUCT_STRATEGY_NONE;
+ }
+ return AudioSystem::getStrategyForStream(stream);
+}
+
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
@@ -1896,15 +1979,16 @@
AudioStreamOut* output,
audio_io_handle_t id,
type_t type,
- bool systemReady)
+ bool systemReady,
+ audio_config_base_t *mixerConfig)
: ThreadBase(audioFlinger, id, type, systemReady, true /* isOut */),
mNormalFrameCount(0), mSinkBuffer(NULL),
- mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+ mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
mMixerBuffer(NULL),
mMixerBufferSize(0),
mMixerBufferFormat(AUDIO_FORMAT_INVALID),
mMixerBufferValid(false),
- mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+ mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
mEffectBuffer(NULL),
mEffectBufferSize(0),
mEffectBufferFormat(AUDIO_FORMAT_INVALID),
@@ -1956,8 +2040,18 @@
mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
}
+ if (mixerConfig != nullptr && mixerConfig->channel_mask != AUDIO_CHANNEL_NONE) {
+ mMixerChannelMask = mixerConfig->channel_mask;
+ }
+
readOutputParameters_l();
+ if (mType != SPATIALIZER
+ && mMixerChannelMask != mChannelMask) {
+ LOG_ALWAYS_FATAL("HAL channel mask %#x does not match mixer channel mask %#x",
+ mChannelMask, mMixerChannelMask);
+ }
+
// TODO: We may also match on address as well as device type for
// AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
if (type == MIXER || type == DIRECT || type == OFFLOAD) {
@@ -1986,6 +2080,7 @@
free(mSinkBuffer);
free(mMixerBuffer);
free(mEffectBuffer);
+ free(mPostSpatializerBuffer);
}
// Thread virtuals
@@ -2080,10 +2175,12 @@
write(fd, result.string(), result.size());
}
-void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
{
dprintf(fd, " Master volume: %f\n", mMasterVolume);
dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off");
+ dprintf(fd, " Mixer channel Mask: %#x (%s)\n",
+ mMixerChannelMask, channelMaskToString(mMixerChannelMask, true /* output */).c_str());
if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
channelMaskToString(mHapticChannelMask, true /* output */).c_str());
@@ -2109,7 +2206,7 @@
}
if (output != nullptr) {
dprintf(fd, " Hal stream dump:\n");
- (void)output->stream->dump(fd);
+ (void)output->stream->dump(fd, args);
}
}
@@ -2219,7 +2316,7 @@
"AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
frameCount, mFrameCount);
} else {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
+ ALOGD("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
"mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
"sampleRate=%u mSampleRate=%u "
"hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
@@ -2397,11 +2494,11 @@
// all tracks in same audio session must share the same routing strategy otherwise
// conflicts will happen when tracks are moved from one output to another by audio policy
// manager
- product_strategy_t strategy = AudioSystem::getStrategyForStream(streamType);
+ product_strategy_t strategy = getStrategyForStream(streamType);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> t = mTracks[i];
if (t != 0 && t->isExternalTrack()) {
- product_strategy_t actual = AudioSystem::getStrategyForStream(t->streamType());
+ product_strategy_t actual = getStrategyForStream(t->streamType());
if (sessionId == t->sessionId() && strategy != actual) {
ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
strategy, actual);
@@ -2445,7 +2542,7 @@
if (chain != 0) {
ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
- chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
+ chain->setStrategy(getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
@@ -2613,8 +2710,19 @@
mLock.unlock();
const int intensity = AudioFlinger::onExternalVibrationStart(
track->getExternalVibration());
+ std::optional<media::AudioVibratorInfo> vibratorInfo;
+ {
+ // TODO(b/184194780): Use the vibrator information from the vibrator that will be
+ // used to play this track.
+ Mutex::Autolock _l(mAudioFlinger->mLock);
+ vibratorInfo = std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+ }
mLock.lock();
track->setHapticIntensity(static_cast<os::HapticScale>(intensity));
+ if (vibratorInfo) {
+ track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
+ }
+
// Haptic playback should be enabled by vibrator service.
if (track->getHapticPlaybackEnabled()) {
// Disable haptic playback of all active track to ensure only
@@ -2707,36 +2815,26 @@
return mOutput->stream->selectPresentation(presentationId, programId);
}
-void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId) {
- sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
-
- desc->mIoHandle = mId;
- struct audio_patch patch = mPatch;
- if (isMsdDevice()) {
- patch = mDownStreamPatch;
- }
-
+ sp<AudioIoDescriptor> desc;
+ const struct audio_patch patch = isMsdDevice() ? mDownStreamPatch : mPatch;
switch (event) {
case AUDIO_OUTPUT_OPENED:
case AUDIO_OUTPUT_REGISTERED:
case AUDIO_OUTPUT_CONFIG_CHANGED:
- desc->mPatch = patch;
- desc->mChannelMask = mChannelMask;
- desc->mSamplingRate = mSampleRate;
- desc->mFormat = mFormat;
- desc->mFrameCount = mNormalFrameCount; // FIXME see
- // AudioFlinger::frameCount(audio_io_handle_t)
- desc->mFrameCountHAL = mFrameCount;
- desc->mLatency = latency_l();
+ desc = sp<AudioIoDescriptor>::make(mId, patch, false /*isInput*/,
+ mSampleRate, mFormat, mChannelMask,
+ // FIXME AudioFlinger::frameCount(audio_io_handle_t) instead of mNormalFrameCount?
+ mNormalFrameCount, mFrameCount, latency_l());
break;
case AUDIO_CLIENT_STARTED:
- desc->mPatch = patch;
- desc->mPortId = portId;
+ desc = sp<AudioIoDescriptor>::make(mId, patch, portId);
break;
case AUDIO_OUTPUT_CLOSED:
default:
+ desc = sp<AudioIoDescriptor>::make(mId);
break;
}
mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -2814,14 +2912,20 @@
if (!audio_is_output_channel(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
}
- if ((mType == MIXER || mType == DUPLICATING)
- && !isValidPcmSinkChannelMask(mChannelMask)) {
+ if (hasMixer() && !isValidPcmSinkChannelMask(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
mChannelMask);
}
+
+ if (mMixerChannelMask == AUDIO_CHANNEL_NONE) {
+ mMixerChannelMask = mChannelMask;
+ }
+
mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
mBalance.setChannelMask(mChannelMask);
+ uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mMixerChannelMask);
+
// Get actual HAL format.
status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
@@ -2831,8 +2935,7 @@
if (!audio_is_valid_format(mFormat)) {
LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
}
- if ((mType == MIXER || mType == DUPLICATING)
- && !isValidPcmSinkFormat(mFormat)) {
+ if (hasMixer() && !isValidPcmSinkFormat(mFormat)) {
LOG_FATAL("HAL format %#x not supported for mixed output",
mFormat);
}
@@ -2841,7 +2944,7 @@
LOG_ALWAYS_FATAL_IF(result != OK,
"Error when retrieving output stream buffer size: %d", result);
mFrameCount = mBufferSize / mFrameSize;
- if ((mType == MIXER || mType == DUPLICATING) && (mFrameCount & 15)) {
+ if (hasMixer() && (mFrameCount & 15)) {
ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
@@ -2914,7 +3017,7 @@
}
mNormalFrameCount = multiplier * mFrameCount;
// round up to nearest 16 frames to satisfy AudioMixer
- if (mType == MIXER || mType == DUPLICATING) {
+ if (hasMixer()) {
mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
}
ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
@@ -2930,6 +3033,7 @@
// Originally this was int16_t[] array, need to remove legacy implications.
free(mSinkBuffer);
mSinkBuffer = NULL;
+
// For sink buffer size, we use the frame size from the downstream sink to avoid problems
// with non PCM formats for compressed music, e.g. AAC, and Offload threads.
const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
@@ -2941,7 +3045,7 @@
mMixerBuffer = NULL;
if (mMixerBufferEnabled) {
mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
- mMixerBufferSize = mNormalFrameCount * mChannelCount
+ mMixerBufferSize = mNormalFrameCount * mixerChannelCount
* audio_bytes_per_sample(mMixerBufferFormat);
(void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
}
@@ -2949,15 +3053,24 @@
mEffectBuffer = NULL;
if (mEffectBufferEnabled) {
mEffectBufferFormat = EFFECT_BUFFER_FORMAT;
- mEffectBufferSize = mNormalFrameCount * mChannelCount
+ mEffectBufferSize = mNormalFrameCount * mixerChannelCount
* audio_bytes_per_sample(mEffectBufferFormat);
(void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
}
+ if (mType == SPATIALIZER) {
+ free(mPostSpatializerBuffer);
+ mPostSpatializerBuffer = nullptr;
+ mPostSpatializerBufferSize = mNormalFrameCount * mChannelCount
+ * audio_bytes_per_sample(mEffectBufferFormat);
+ (void)posix_memalign(&mPostSpatializerBuffer, 32, mPostSpatializerBufferSize);
+ }
+
mHapticChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
mChannelCount -= mHapticChannelCount;
+ mMixerChannelMask = static_cast<audio_channel_mask_t>(mMixerChannelMask & ~mHapticChannelMask);
// force reconfiguration of effect chains and engines to take new buffer size and audio
// parameters into account
@@ -3051,15 +3164,15 @@
// session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
// it is moved to correct output by audio policy manager when A2DP is connected or disconnected
if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ return getStrategyForStream(AUDIO_STREAM_MUSIC);
}
for (size_t i = 0; i < mTracks.size(); i++) {
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() && !track->isInvalid()) {
- return AudioSystem::getStrategyForStream(track->streamType());
+ return getStrategyForStream(track->streamType());
}
}
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ return getStrategyForStream(AUDIO_STREAM_MUSIC);
}
@@ -3336,23 +3449,34 @@
{
audio_session_t session = chain->sessionId();
sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
- status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
- mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
- mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
- &halInBuffer);
- if (result != OK) return result;
- halOutBuffer = halInBuffer;
- effect_buffer_t *buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
- ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
- if (!audio_is_global_session(session)) {
- // Only one effect chain can be present in direct output thread and it uses
- // the sink buffer as input
- if (mType != DIRECT) {
- size_t numSamples = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
+ effect_buffer_t *buffer = nullptr; // only used for non global sessions
+
+ if (mType == SPATIALIZER ) {
+ if (!audio_is_global_session(session)) {
+ // player sessions on a spatializer output will use a dedicated input buffer and
+ // will either output multi channel to mEffectBuffer if the track is spatilaized
+ // or stereo to mPostSpatializerBuffer if not spatialized.
+ uint32_t channelMask;
+ bool isSessionSpatialized =
+ (hasAudioSession_l(session) & ThreadBase::SPATIALIZED_SESSION) != 0;
+ if (isSessionSpatialized) {
+ channelMask = mMixerChannelMask;
+ } else {
+ channelMask = mChannelMask;
+ }
+ size_t numSamples = mNormalFrameCount
+ * (audio_channel_count_from_out_mask(channelMask) + mHapticChannelCount);
status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
numSamples * sizeof(effect_buffer_t),
&halInBuffer);
if (result != OK) return result;
+
+ result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ isSessionSpatialized ? mEffectBuffer : mPostSpatializerBuffer,
+ isSessionSpatialized ? mEffectBufferSize : mPostSpatializerBufferSize,
+ &halOutBuffer);
+ if (result != OK) return result;
+
#ifdef FLOAT_EFFECT_CHAIN
buffer = halInBuffer->audioBuffer()->f32;
#else
@@ -3360,14 +3484,60 @@
#endif
ALOGV("addEffectChain_l() creating new input buffer %p session %d",
buffer, session);
- }
+ } else {
+ // A global session on a SPATIALIZER thread is either OUTPUT_STAGE or DEVICE
+ // - OUTPUT_STAGE session uses the mEffectBuffer as input buffer and
+ // mPostSpatializerBuffer as output buffer
+ // - DEVICE session uses the mPostSpatializerBuffer as input and output buffer.
+ status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ mEffectBuffer, mEffectBufferSize, &halInBuffer);
+ if (result != OK) return result;
+ result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ mPostSpatializerBuffer, mPostSpatializerBufferSize, &halOutBuffer);
+ if (result != OK) return result;
+ if (session == AUDIO_SESSION_DEVICE) {
+ halInBuffer = halOutBuffer;
+ }
+ }
+ } else {
+ status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
+ mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
+ &halInBuffer);
+ if (result != OK) return result;
+ halOutBuffer = halInBuffer;
+ ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
+ if (!audio_is_global_session(session)) {
+ buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
+ // Only one effect chain can be present in direct output thread and it uses
+ // the sink buffer as input
+ if (mType != DIRECT) {
+ size_t numSamples = mNormalFrameCount
+ * (audio_channel_count_from_out_mask(mMixerChannelMask)
+ + mHapticChannelCount);
+ status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
+ numSamples * sizeof(effect_buffer_t),
+ &halInBuffer);
+ if (result != OK) return result;
+#ifdef FLOAT_EFFECT_CHAIN
+ buffer = halInBuffer->audioBuffer()->f32;
+#else
+ buffer = halInBuffer->audioBuffer()->s16;
+#endif
+ ALOGV("addEffectChain_l() creating new input buffer %p session %d",
+ buffer, session);
+ }
+ }
+ }
+
+ if (!audio_is_global_session(session)) {
// Attach all tracks with same session ID to this chain.
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (session == track->sessionId()) {
- ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(),
- buffer);
+ ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p",
+ track.get(), buffer);
track->setMainBuffer(buffer);
chain->incTrackCnt();
}
@@ -3376,11 +3546,13 @@
// indicate all active tracks in the chain
for (const sp<Track> &track : mActiveTracks) {
if (session == track->sessionId()) {
- ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
+ ALOGV("addEffectChain_l() activating track %p on session %d",
+ track.get(), session);
chain->incActiveTrackCnt();
}
}
}
+
chain->setThread(this);
chain->setInBuffer(halInBuffer);
chain->setOutBuffer(halOutBuffer);
@@ -3531,6 +3703,8 @@
audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ sendCheckOutputStageEffectsEvent();
+
// loopCount is used for statistics and diagnostics.
for (int64_t loopCount = 0; !exitPending(); ++loopCount)
{
@@ -3542,6 +3716,7 @@
Vector< sp<EffectChain> > effectChains;
audio_session_t activeHapticSessionId = AUDIO_SESSION_NONE;
+ bool isHapticSessionSpatialized = false;
std::vector<sp<Track>> activeTracks;
// If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
@@ -3587,11 +3762,18 @@
}
}
+ if (mCheckOutputStageEffects.exchange(false)) {
+ checkOutputStageEffects();
+ }
+
{ // scope for mLock
Mutex::Autolock _l(mLock);
processConfigEvents_l();
+ if (mCheckOutputStageEffects.load()) {
+ continue;
+ }
// See comment at declaration of logString for why this is done under mLock
if (logString != NULL) {
@@ -3695,16 +3877,21 @@
// This must be done under the same lock as prepareTracks_l().
// The haptic data from the effect is at a higher priority than the one from track.
// TODO: Write haptic data directly to sink buffer when mixing.
- if (mHapticChannelCount > 0 && effectChains.size() > 0) {
+ if (mHapticChannelCount > 0) {
for (const auto& track : mActiveTracks) {
sp<EffectChain> effectChain = getEffectChain_l(track->sessionId());
- if (effectChain != nullptr && effectChain->containsHapticGeneratingEffect_l()) {
+ if (effectChain != nullptr
+ && effectChain->containsHapticGeneratingEffect_l()) {
activeHapticSessionId = track->sessionId();
+ isHapticSessionSpatialized =
+ mType == SPATIALIZER && track->canBeSpatialized();
break;
}
- if (track->getHapticPlaybackEnabled()) {
+ if (activeHapticSessionId == AUDIO_SESSION_NONE
+ && track->getHapticPlaybackEnabled()) {
activeHapticSessionId = track->sessionId();
- break;
+ isHapticSessionSpatialized =
+ mType == SPATIALIZER && track->canBeSpatialized();
}
}
}
@@ -3754,6 +3941,8 @@
//
// mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
// TODO use mSleepTimeUs == 0 as an additional condition.
+ uint32_t mixerChannelCount = mEffectBufferValid ?
+ audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
if (mMixerBufferValid) {
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
@@ -3774,7 +3963,7 @@
}
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
- mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+ mNormalFrameCount * (mixerChannelCount + mHapticChannelCount));
// If we're going directly to the sink and there are haptic channels,
// we should adjust channels as the sample data is partially interleaved
@@ -3807,8 +3996,16 @@
&& activeHapticSessionId == effectChains[i]->sessionId()) {
// Haptic data is active in this case, copy it directly from
// in buffer to out buffer.
+ uint32_t hapticSessionChannelCount = mEffectBufferValid ?
+ audio_channel_count_from_out_mask(mMixerChannelMask) :
+ mChannelCount;
+ if (mType == SPATIALIZER && !isHapticSessionSpatialized) {
+ hapticSessionChannelCount = mChannelCount;
+ }
+
const size_t audioBufferSize = mNormalFrameCount
- * audio_bytes_per_frame(mChannelCount, EFFECT_BUFFER_FORMAT);
+ * audio_bytes_per_frame(hapticSessionChannelCount,
+ EFFECT_BUFFER_FORMAT);
memcpy_by_audio_format(
(uint8_t*)effectChains[i]->outBuffer() + audioBufferSize,
EFFECT_BUFFER_FORMAT,
@@ -3834,9 +4031,9 @@
// TODO use mSleepTimeUs == 0 as an additional condition.
if (mEffectBufferValid) {
//ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
-
+ void *effectBuffer = (mType == SPATIALIZER) ? mPostSpatializerBuffer : mEffectBuffer;
if (requireMonoBlend()) {
- mono_blend(mEffectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
+ mono_blend(effectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
true /*limit*/);
}
@@ -3845,11 +4042,30 @@
// We do it here if there is no FastMixer.
// mBalance detects zero balance within the class for speed (not needed here).
mBalance.setBalance(mMasterBalance.load());
- mBalance.process((float *)mEffectBuffer, mNormalFrameCount);
+ mBalance.process((float *)effectBuffer, mNormalFrameCount);
}
- memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
+ // for SPATIALIZER thread, Move haptics channels from mEffectBuffer to
+ // mPostSpatializerBuffer if the haptics track is spatialized.
+ // Otherwise, the haptics channels are already in mPostSpatializerBuffer.
+ // For other thread types, the haptics channels are already in mEffectBuffer.
+ if (mType == SPATIALIZER && isHapticSessionSpatialized) {
+ const size_t srcBufferSize = mNormalFrameCount *
+ audio_bytes_per_frame(audio_channel_count_from_out_mask(mMixerChannelMask),
+ mEffectBufferFormat);
+ const size_t dstBufferSize = mNormalFrameCount
+ * audio_bytes_per_frame(mChannelCount, mEffectBufferFormat);
+
+ memcpy_by_audio_format((uint8_t*)mPostSpatializerBuffer + dstBufferSize,
+ mEffectBufferFormat,
+ (uint8_t*)mEffectBuffer + srcBufferSize,
+ mEffectBufferFormat,
+ mNormalFrameCount * mHapticChannelCount);
+ }
+
+ memcpy_by_audio_format(mSinkBuffer, mFormat, effectBuffer, mEffectBufferFormat,
mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+
// The sample data is partially interleaved when haptic channels exist,
// we need to adjust channels here.
if (mHapticChannelCount > 0) {
@@ -3899,6 +4115,18 @@
Mutex::Autolock _l(mLock);
mIoJitterMs.add(jitterMs);
mProcessTimeMs.add(processMs);
+
+ if (mPipeSink.get() != nullptr) {
+ // Using the Monopipe availableToWrite, we estimate the current
+ // buffer size.
+ MonoPipe* monoPipe = static_cast<MonoPipe*>(mPipeSink.get());
+ const ssize_t
+ availableToWrite = mPipeSink->availableToWrite();
+ const size_t pipeFrames = monoPipe->maxFrames();
+ const size_t
+ remainingFrames = pipeFrames - max(availableToWrite, 0);
+ mMonopipePipeDepthStats.add(remainingFrames);
+ }
}
// write blocked detection
@@ -4448,8 +4676,8 @@
// ----------------------------------------------------------------------------
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, bool systemReady, type_t type)
- : PlaybackThread(audioFlinger, output, id, type, systemReady),
+ audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t *mixerConfig)
+ : PlaybackThread(audioFlinger, output, id, type, systemReady, mixerConfig),
// mAudioMixer below
// mFastMixer below
mFastMixerFutex(0),
@@ -4487,26 +4715,25 @@
// initialize fast mixer depending on configuration
bool initFastMixer;
- switch (kUseFastMixer) {
- case FastMixer_Never:
+ if (mType == SPATIALIZER) {
initFastMixer = false;
- break;
- case FastMixer_Always:
- initFastMixer = true;
- break;
- case FastMixer_Static:
- case FastMixer_Dynamic:
- // FastMixer was designed to operate with a HAL that pulls at a regular rate,
- // where the period is less than an experimentally determined threshold that can be
- // scheduled reliably with CFS. However, the BT A2DP HAL is
- // bursty (does not pull at a regular rate) and so cannot operate with FastMixer.
- initFastMixer = mFrameCount < mNormalFrameCount
- && Intersection(outDeviceTypes(), getAudioDeviceOutAllA2dpSet()).empty();
- break;
+ } else {
+ switch (kUseFastMixer) {
+ case FastMixer_Never:
+ initFastMixer = false;
+ break;
+ case FastMixer_Always:
+ initFastMixer = true;
+ break;
+ case FastMixer_Static:
+ case FastMixer_Dynamic:
+ initFastMixer = mFrameCount < mNormalFrameCount;
+ break;
+ }
+ ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
+ "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
+ mFrameCount, mNormalFrameCount);
}
- ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
- "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
- mFrameCount, mNormalFrameCount);
if (initFastMixer) {
audio_format_t fastMixerFormat;
if (mMixerBufferEnabled && mEffectBufferEnabled) {
@@ -4566,6 +4793,7 @@
fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
fastTrack->mHapticIntensity = os::HapticScale::NONE;
+ fastTrack->mHapticMaxAmplitude = NAN;
fastTrack->mGeneration++;
state->mFastTracksGen++;
state->mTrackMask = 1;
@@ -4861,6 +5089,9 @@
// before effects processing or output.
if (mMixerBufferValid) {
memset(mMixerBuffer, 0, mMixerBufferSize);
+ if (mType == SPATIALIZER) {
+ memset(mSinkBuffer, 0, mSinkBufferSize);
+ }
} else {
memset(mSinkBuffer, 0, mSinkBufferSize);
}
@@ -5089,7 +5320,7 @@
break;
case TrackBase::IDLE:
default:
- LOG_ALWAYS_FATAL("unexpected track state %d", track->mState);
+ LOG_ALWAYS_FATAL("unexpected track state %d", (int)track->mState);
}
if (isActive) {
@@ -5103,6 +5334,7 @@
fastTrack->mFormat = track->mFormat;
fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
fastTrack->mHapticIntensity = track->getHapticIntensity();
+ fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
fastTrack->mGeneration++;
state->mTrackMask |= 1 << j;
didModify = true;
@@ -5148,7 +5380,7 @@
// TODO Remove the ALOGW when this theory is confirmed.
ALOGW("fast track %d should have been active; "
"mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
- j, track->mState, state->mTrackMask, recentUnderruns,
+ j, (int)track->mState, state->mTrackMask, recentUnderruns,
track->sharedBuffer() != 0);
// Since the FastMixer state already has the track inactive, do nothing here.
}
@@ -5352,11 +5584,21 @@
trackId,
AudioMixer::TRACK,
AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
- mAudioMixer->setParameter(
- trackId,
- AudioMixer::TRACK,
- AudioMixer::MIXER_CHANNEL_MASK,
- (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+
+ if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_CHANNEL_MASK,
+ (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+ } else {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_CHANNEL_MASK,
+ (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
+ }
+
// limit track sample rate to 2 x output sample rate, which changes at re-configuration
uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
uint32_t reqSampleRate = proxy->getSampleRate();
@@ -5393,16 +5635,27 @@
if (mMixerBufferEnabled
&& (track->mainBuffer() == mSinkBuffer
|| track->mainBuffer() == mMixerBuffer)) {
- mAudioMixer->setParameter(
- trackId,
- AudioMixer::TRACK,
- AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
- mAudioMixer->setParameter(
- trackId,
- AudioMixer::TRACK,
- AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
- // TODO: override track->mainBuffer()?
- mMixerBufferValid = true;
+ if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_FORMAT, (void *)mEffectBufferFormat);
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MAIN_BUFFER, (void *)mPostSpatializerBuffer);
+ } else {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
+ // TODO: override track->mainBuffer()?
+ mMixerBufferValid = true;
+ }
} else {
mAudioMixer->setParameter(
trackId,
@@ -5425,6 +5678,10 @@
trackId,
AudioMixer::TRACK,
AudioMixer::HAPTIC_INTENSITY, (void *)(uintptr_t)track->getHapticIntensity());
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::HAPTIC_MAX_AMPLITUDE, (void *)(&(track->mHapticMaxAmplitude)));
// reset retry count
track->mRetryCount = kMaxTrackRetries;
@@ -5575,7 +5832,8 @@
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
- if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0) {
+ if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0 ||
+ getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE) != 0) {
mEffectBufferValid = true;
}
@@ -5583,12 +5841,17 @@
// as long as there are effects we should clear the effects buffer, to avoid
// passing a non-clean buffer to the effect chain
memset(mEffectBuffer, 0, mEffectBufferSize);
+ if (mType == SPATIALIZER) {
+ memset(mPostSpatializerBuffer, 0, mPostSpatializerBufferSize);
+ }
}
// sink or mix buffer must be cleared if all tracks are connected to an
// effect chain as in this case the mixer will not write to the sink or mix buffer
// and track effects will accumulate into it
- if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
- (mixedTracks == 0 && fastTracks > 0))) {
+ // always clear sink buffer for spatializer output as the output of the spatializer
+ // effect will be accumulated into it
+ if ((mBytesRemaining == 0) && (((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+ (mixedTracks == 0 && fastTracks > 0)) || (mType == SPATIALIZER))) {
// FIXME as a performance optimization, should remember previous zero status
if (mMixerBufferValid) {
memset(mMixerBuffer, 0, mMixerBufferSize);
@@ -6971,6 +7234,69 @@
MixerThread::cacheParameters_l();
}
+// ----------------------------------------------------------------------------
+
+AudioFlinger::SpatializerThread::SpatializerThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ bool systemReady,
+ audio_config_base_t *mixerConfig)
+ : MixerThread(audioFlinger, output, id, systemReady, SPATIALIZER, mixerConfig)
+{
+}
+
+void AudioFlinger::SpatializerThread::checkOutputStageEffects()
+{
+ bool hasVirtualizer = false;
+ bool hasDownMixer = false;
+ sp<EffectHandle> finalDownMixer;
+ {
+ Mutex::Autolock _l(mLock);
+ sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
+ if (chain != 0) {
+ hasVirtualizer = chain->getEffectFromType_l(FX_IID_SPATIALIZER) != nullptr;
+ hasDownMixer = chain->getEffectFromType_l(EFFECT_UIID_DOWNMIX) != nullptr;
+ }
+
+ finalDownMixer = mFinalDownMixer;
+ mFinalDownMixer.clear();
+ }
+
+ if (hasVirtualizer) {
+ if (finalDownMixer != nullptr) {
+ int32_t ret;
+ finalDownMixer->disable(&ret);
+ }
+ finalDownMixer.clear();
+ } else if (!hasDownMixer) {
+ std::vector<effect_descriptor_t> descriptors;
+ status_t status = mAudioFlinger->mEffectsFactoryHal->getDescriptors(
+ EFFECT_UIID_DOWNMIX, &descriptors);
+ if (status != NO_ERROR) {
+ return;
+ }
+ ALOG_ASSERT(!descriptors.empty(),
+ "%s getDescriptors() returned no error but empty list", __func__);
+
+ finalDownMixer = createEffect_l(nullptr /*client*/, nullptr /*effectClient*/,
+ 0 /*priority*/, AUDIO_SESSION_OUTPUT_STAGE, &descriptors[0], nullptr /*enabled*/,
+ &status, false /*pinned*/, false /*probe*/, false /*notifyFramesProcessed*/);
+
+ if (finalDownMixer == nullptr || (status != NO_ERROR && status != ALREADY_EXISTS)) {
+ ALOGW("%s error creating downmixer %d", __func__, status);
+ finalDownMixer.clear();
+ } else {
+ int32_t ret;
+ finalDownMixer->enable(&ret);
+ }
+ }
+
+ {
+ Mutex::Autolock _l(mLock);
+ mFinalDownMixer = finalDownMixer;
+ }
+}
+
// ----------------------------------------------------------------------------
// Record
@@ -7445,6 +7771,7 @@
const ssize_t availableToRead = mPipeSource->availableToRead();
if (availableToRead >= 0) {
+ mMonopipePipeDepthStats.add(availableToRead);
// PipeSource is the primary clock. It is up to the AudioRecord client to keep up.
LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
"more frames to read than fifo size, %zd > %zu",
@@ -7873,6 +8200,7 @@
if (
// we formerly checked for a callback handler (non-0 tid),
// but that is no longer required for TRANSFER_OBTAIN mode
+ // No need to match hardware format, format conversion will be done in client side.
//
// Frame count is not specified (0), or is less than or equal the pipe depth.
// It is OK to provide a higher capacity than requested.
@@ -7880,8 +8208,6 @@
(frameCount <= mPipeFramesP2) &&
// PCM data
audio_is_linear_pcm(format) &&
- // hardware format
- (format == mFormat) &&
// hardware channel mask
(channelMask == mChannelMask) &&
// hardware sample rate
@@ -8041,7 +8367,7 @@
ALOGV("active record track PAUSING -> ACTIVE");
recordTrack->mState = TrackBase::ACTIVE;
} else {
- ALOGV("active record track state %d", recordTrack->mState);
+ ALOGV("active record track state %d", (int)recordTrack->mState);
}
return status;
}
@@ -8067,7 +8393,7 @@
}
if (recordTrack->mState != TrackBase::STARTING_1) {
ALOGW("%s(%d): unsynchronized mState:%d change",
- __func__, recordTrack->id(), recordTrack->mState);
+ __func__, recordTrack->id(), (int)recordTrack->mState);
// Someone else has changed state, let them take over,
// leave mState in the new state.
recordTrack->clearSyncStartEvent();
@@ -8631,30 +8957,22 @@
return String8();
}
-void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId) {
- sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
- desc->mIoHandle = mId;
-
+ sp<AudioIoDescriptor> desc;
switch (event) {
case AUDIO_INPUT_OPENED:
case AUDIO_INPUT_REGISTERED:
case AUDIO_INPUT_CONFIG_CHANGED:
- desc->mPatch = mPatch;
- desc->mChannelMask = mChannelMask;
- desc->mSamplingRate = mSampleRate;
- desc->mFormat = mFormat;
- desc->mFrameCount = mFrameCount;
- desc->mFrameCountHAL = mFrameCount;
- desc->mLatency = 0;
+ desc = sp<AudioIoDescriptor>::make(mId, mPatch, true /*isInput*/,
+ mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
break;
case AUDIO_CLIENT_STARTED:
- desc->mPatch = mPatch;
- desc->mPortId = portId;
+ desc = sp<AudioIoDescriptor>::make(mId, mPatch, portId);
break;
case AUDIO_INPUT_CLOSED:
default:
+ desc = sp<AudioIoDescriptor>::make(mId);
break;
}
mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -9278,7 +9596,7 @@
mActiveTracks.add(track);
sp<EffectChain> chain = getEffectChain_l(mSessionId);
if (chain != 0) {
- chain->setStrategy(AudioSystem::getStrategyForStream(streamType()));
+ chain->setStrategy(getStrategyForStream(streamType()));
chain->incTrackCnt();
chain->incActiveTrackCnt();
}
@@ -9497,31 +9815,26 @@
return String8();
}
-void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId __unused) {
- sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
- desc->mIoHandle = mId;
-
+ sp<AudioIoDescriptor> desc;
+ bool isInput = false;
switch (event) {
case AUDIO_INPUT_OPENED:
case AUDIO_INPUT_REGISTERED:
case AUDIO_INPUT_CONFIG_CHANGED:
+ isInput = true;
+ FALLTHROUGH_INTENDED;
case AUDIO_OUTPUT_OPENED:
case AUDIO_OUTPUT_REGISTERED:
case AUDIO_OUTPUT_CONFIG_CHANGED:
- desc->mPatch = mPatch;
- desc->mChannelMask = mChannelMask;
- desc->mSamplingRate = mSampleRate;
- desc->mFormat = mFormat;
- desc->mFrameCount = mFrameCount;
- desc->mFrameCountHAL = mFrameCount;
- desc->mLatency = 0;
+ desc = sp<AudioIoDescriptor>::make(mId, mPatch, isInput,
+ mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
break;
-
case AUDIO_INPUT_CLOSED:
case AUDIO_OUTPUT_CLOSED:
default:
+ desc = sp<AudioIoDescriptor>::make(mId);
break;
}
mAudioFlinger->ioConfigChanged(event, desc, pid);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 16082a9..43d1055 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -32,6 +32,7 @@
OFFLOAD, // Thread class is OffloadThread
MMAP_PLAYBACK, // Thread class for MMAP playback stream
MMAP_CAPTURE, // Thread class for MMAP capture stream
+ SPATIALIZER, //
// If you add any values here, also update ThreadBase::threadTypeToString()
};
@@ -53,7 +54,8 @@
CFG_EVENT_CREATE_AUDIO_PATCH,
CFG_EVENT_RELEASE_AUDIO_PATCH,
CFG_EVENT_UPDATE_OUT_DEVICE,
- CFG_EVENT_RESIZE_BUFFER
+ CFG_EVENT_RESIZE_BUFFER,
+ CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS
};
class ConfigEventData: public RefBase {
@@ -87,7 +89,13 @@
public:
virtual ~ConfigEvent() {}
- void dump(char *buffer, size_t size) { mData->dump(buffer, size); }
+ void dump(char *buffer, size_t size) {
+ snprintf(buffer, size, "Event type: %d\n", mType);
+ if (mData != nullptr) {
+ snprintf(buffer, size, "Data:\n");
+ mData->dump(buffer, size);
+ }
+ }
const int mType; // event type e.g. CFG_EVENT_IO
Mutex mLock; // mutex associated with mCond
@@ -105,22 +113,22 @@
class IoConfigEventData : public ConfigEventData {
public:
- IoConfigEventData(audio_io_config_event event, pid_t pid,
+ IoConfigEventData(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId) :
mEvent(event), mPid(pid), mPortId(portId) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "IO event: event %d\n", mEvent);
+ snprintf(buffer, size, "- IO event: event %d\n", mEvent);
}
- const audio_io_config_event mEvent;
+ const audio_io_config_event_t mEvent;
const pid_t mPid;
const audio_port_handle_t mPortId;
};
class IoConfigEvent : public ConfigEvent {
public:
- IoConfigEvent(audio_io_config_event event, pid_t pid, audio_port_handle_t portId) :
+ IoConfigEvent(audio_io_config_event_t event, pid_t pid, audio_port_handle_t portId) :
ConfigEvent(CFG_EVENT_IO) {
mData = new IoConfigEventData(event, pid, portId);
}
@@ -133,7 +141,7 @@
mPid(pid), mTid(tid), mPrio(prio), mForApp(forApp) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d, for app? %d\n",
+ snprintf(buffer, size, "- Prio event: pid %d, tid %d, prio %d, for app? %d\n",
mPid, mTid, mPrio, mForApp);
}
@@ -158,7 +166,7 @@
mKeyValuePairs(keyValuePairs) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "KeyValue: %s\n", mKeyValuePairs.string());
+ snprintf(buffer, size, "- KeyValue: %s\n", mKeyValuePairs.string());
}
const String8 mKeyValuePairs;
@@ -181,7 +189,7 @@
mPatch(patch), mHandle(handle) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+ snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
}
const struct audio_patch mPatch;
@@ -205,7 +213,7 @@
mHandle(handle) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+ snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
}
audio_patch_handle_t mHandle;
@@ -227,7 +235,7 @@
mOutDevices(outDevices) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Devices: %s", android::toString(mOutDevices).c_str());
+ snprintf(buffer, size, "- Devices: %s", android::toString(mOutDevices).c_str());
}
DeviceDescriptorBaseVector mOutDevices;
@@ -249,7 +257,7 @@
mMaxSharedAudioHistoryMs(maxSharedAudioHistoryMs) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
+ snprintf(buffer, size, "- mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
}
int32_t mMaxSharedAudioHistoryMs;
@@ -265,6 +273,16 @@
virtual ~ResizeBufferConfigEvent() {}
};
+ class CheckOutputStageEffectsEvent : public ConfigEvent {
+ public:
+ CheckOutputStageEffectsEvent() :
+ ConfigEvent(CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS) {
+ }
+
+ virtual ~CheckOutputStageEffectsEvent() {}
+ };
+
+
class PMDeathRecipient : public IBinder::DeathRecipient {
public:
explicit PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {}
@@ -290,8 +308,11 @@
// dynamic externally-visible
uint32_t sampleRate() const { return mSampleRate; }
audio_channel_mask_t channelMask() const { return mChannelMask; }
+ virtual audio_channel_mask_t mixerChannelMask() const { return mChannelMask; }
+
audio_format_t format() const { return mHALFormat; }
uint32_t channelCount() const { return mChannelCount; }
+
// Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
// and returns the [normal mix] buffer's frame count.
virtual size_t frameCount() const = 0;
@@ -311,15 +332,15 @@
status_t& status) = 0;
virtual status_t setParameters(const String8& keyValuePairs);
virtual String8 getParameters(const String8& keys) = 0;
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) = 0;
// sendConfigEvent_l() must be called with ThreadBase::mLock held
// Can temporarily release the lock if waiting for a reply from
// processConfigEvents_l().
status_t sendConfigEvent_l(sp<ConfigEvent>& event);
- void sendIoConfigEvent(audio_io_config_event event, pid_t pid = 0,
+ void sendIoConfigEvent(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
- void sendIoConfigEvent_l(audio_io_config_event event, pid_t pid = 0,
+ void sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
void sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp);
void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio, bool forApp);
@@ -330,7 +351,11 @@
status_t sendUpdateOutDeviceConfigEvent(
const DeviceDescriptorBaseVector& outDevices);
void sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs);
+ void sendCheckOutputStageEffectsEvent();
+ void sendCheckOutputStageEffectsEvent_l();
+
void processConfigEvents_l();
+ virtual void setCheckOutputStageEffects() {}
virtual void cacheParameters_l() = 0;
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle) = 0;
@@ -383,7 +408,8 @@
int *enabled,
status_t *status /*non-NULL*/,
bool pinned,
- bool probe);
+ bool probe,
+ bool notifyFramesProcessed);
// return values for hasAudioSession (bit field)
enum effect_state {
@@ -391,8 +417,10 @@
// effect
TRACK_SESSION = 0x2, // the audio session corresponds to at least one
// track
- FAST_SESSION = 0x4 // the audio session corresponds to at least one
+ FAST_SESSION = 0x4, // the audio session corresponds to at least one
// fast track
+ SPATIALIZED_SESSION = 0x8 // the audio session corresponds to at least one
+ // spatialized track
};
// get effect chain corresponding to session Id.
@@ -433,6 +461,7 @@
// - EFFECT_SESSION if effects on this audio session exist in one chain
// - TRACK_SESSION if tracks on this audio session exist
// - FAST_SESSION if fast tracks on this audio session exist
+ // - SPATIALIZED_SESSION if spatialized tracks on this audio session exist
virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const = 0;
uint32_t hasAudioSession(audio_session_t sessionId) const {
Mutex::Autolock _l(mLock);
@@ -454,6 +483,9 @@
if (track->isFastTrack()) {
result |= FAST_SESSION; // caution, only represents first track.
}
+ if (track->canBeSpatialized()) {
+ result |= SPATIALIZED_SESSION; // caution, only first track.
+ }
break;
}
}
@@ -574,6 +606,8 @@
return INVALID_OPERATION;
}
+ product_strategy_t getStrategyForStream(audio_stream_type_t stream) const;
+
virtual void dumpInternals_l(int fd __unused, const Vector<String16>& args __unused)
{ }
virtual void dumpTracks_l(int fd __unused, const Vector<String16>& args __unused) { }
@@ -657,6 +691,7 @@
audio_utils::Statistics<double> mIoJitterMs{0.995 /* alpha */};
audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
audio_utils::Statistics<double> mLatencyMs{0.995 /* alpha */};
+ audio_utils::Statistics<double> mMonopipePipeDepthStats{0.999 /* alpha */};
// Save the last count when we delivered statistics to mediametrics.
int64_t mLastRecordedTimestampVerifierN = 0;
@@ -824,7 +859,8 @@
static const nsecs_t kMaxNextBufferDelayNs = 100000000;
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, type_t type, bool systemReady);
+ audio_io_handle_t id, type_t type, bool systemReady,
+ audio_config_base_t *mixerConfig = nullptr);
virtual ~PlaybackThread();
// Thread virtuals
@@ -881,6 +917,8 @@
mActiveTracks.updatePowerState(this, true /* force */);
}
+ virtual void checkOutputStageEffects() {}
+
void dumpInternals_l(int fd, const Vector<String16>& args) override;
void dumpTracks_l(int fd, const Vector<String16>& args) override;
@@ -942,7 +980,7 @@
{ return android_atomic_acquire_load(&mSuspended) > 0; }
virtual String8 getParameters(const String8& keys);
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
// Consider also removing and passing an explicit mMainBuffer initialization
@@ -973,6 +1011,10 @@
virtual size_t frameCount() const { return mNormalFrameCount; }
+ audio_channel_mask_t mixerChannelMask() const override {
+ return mMixerChannelMask;
+ }
+
status_t getTimestamp_l(AudioTimestamp& timestamp);
void addPatchTrack(const sp<PatchTrack>& track);
@@ -1015,6 +1057,9 @@
PlaybackThread::Track* getTrackById_l(audio_port_handle_t trackId);
+ bool hasMixer() const {
+ return mType == MIXER || mType == DUPLICATING || mType == SPATIALIZER;
+ }
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -1084,6 +1129,15 @@
// for any processing (including output processing).
bool mEffectBufferValid;
+ // Frame size aligned buffer used as input and output to all post processing effects
+ // except the Spatializer in a SPATIALIZER thread. Non spatialized tracks are mixed into
+ // this buffer so that post processing effects can be applied.
+ void* mPostSpatializerBuffer = nullptr;
+
+ // Size of mPostSpatializerBuffer in bytes
+ size_t mPostSpatializerBufferSize;
+
+
// suspend count, > 0 means suspended. While suspended, the thread continues to pull from
// tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle
// concurrent use of both of them, so Audio Policy Service suspends one of the threads to
@@ -1101,6 +1155,9 @@
// haptic playback.
audio_channel_mask_t mHapticChannelMask = AUDIO_CHANNEL_NONE;
uint32_t mHapticChannelCount = 0;
+
+ audio_channel_mask_t mMixerChannelMask = AUDIO_CHANNEL_NONE;
+
private:
// mMasterMute is in both PlaybackThread and in AudioFlinger. When a
// PlaybackThread needs to find out if master-muted, it checks it's local
@@ -1134,6 +1191,9 @@
// Cache various calculated values, at threadLoop() entry and after a parameter change
virtual void cacheParameters_l();
+ void setCheckOutputStageEffects() override {
+ mCheckOutputStageEffects.store(true);
+ }
virtual uint32_t correctLatency_l(uint32_t latency) const;
@@ -1314,6 +1374,8 @@
// audio patch used by the downstream software patch.
// Only used if ThreadBase::mIsMsdDevice is true.
struct audio_patch mDownStreamPatch;
+
+ std::atomic_bool mCheckOutputStageEffects{};
};
class MixerThread : public PlaybackThread {
@@ -1322,7 +1384,8 @@
AudioStreamOut* output,
audio_io_handle_t id,
bool systemReady,
- type_t type = MIXER);
+ type_t type = MIXER,
+ audio_config_base_t *mixerConfig = nullptr);
virtual ~MixerThread();
// Thread virtuals
@@ -1611,6 +1674,24 @@
}
};
+class SpatializerThread : public MixerThread {
+public:
+ SpatializerThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ bool systemReady,
+ audio_config_base_t *mixerConfig);
+ ~SpatializerThread() override {}
+
+ bool hasFastMixer() const override { return false; }
+
+protected:
+ void checkOutputStageEffects() override;
+
+private:
+ sp<EffectHandle> mFinalDownMixer;
+};
+
// record thread
class RecordThread : public ThreadBase
{
@@ -1721,7 +1802,7 @@
status_t& status);
virtual void cacheParameters_l() {}
virtual String8 getParameters(const String8& keys);
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle);
@@ -1930,7 +2011,7 @@
virtual bool checkForNewParameter_l(const String8& keyValuePair,
status_t& status);
virtual String8 getParameters(const String8& keys);
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
void readHalParameters_l();
virtual void cacheParameters_l() {}
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 92f129c..b582b3a 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -23,7 +23,7 @@
class TrackBase : public ExtendedAudioBufferProvider, public RefBase {
public:
- enum track_state {
+ enum track_state : int32_t {
IDLE,
FLUSHED, // for PlaybackTracks only
STOPPED,
@@ -107,6 +107,9 @@
audio_attributes_t attributes() const { return mAttr; }
+ bool canBeSpatialized() const { return mIsOut && (mAttr.flags
+ & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) == 0; }
+
#ifdef TEE_SINK
void dumpTee(int fd, const std::string &reason) const {
mTee.dump(fd, reason);
@@ -271,6 +274,7 @@
void releaseCblk() {
if (mCblk != nullptr) {
+ mState.clear();
mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
if (mClient == 0) {
free(mCblk);
@@ -355,7 +359,7 @@
// except for OutputTrack when it is in local memory
size_t mBufferSize; // size of mBuffer in bytes
// we don't really need a lock for these
- track_state mState;
+ MirroredVariable<track_state> mState;
const audio_attributes_t mAttr;
const uint32_t mSampleRate; // initial sample rate only; for tracks which
// support dynamic rates, the current value is in control block
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index d2a30b1..616fd78 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -234,7 +234,11 @@
#ifdef TEE_SINK
mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
#endif
-
+ // mState is mirrored for the client to read.
+ mState.setMirror(&mCblk->mState);
+ // ensure our state matches up until we consolidate the enumeration.
+ static_assert(CBLK_STATE_IDLE == IDLE);
+ static_assert(CBLK_STATE_PAUSING == PAUSING);
}
}
@@ -933,7 +937,7 @@
buffer->raw = buf.mRaw;
if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
ALOGV("%s(%d): underrun, framesReady(%zu) < framesDesired(%zd), state: %d",
- __func__, mId, buf.mFrameCount, desiredFrames, mState);
+ __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
} else {
mAudioTrackServerProxy->tallyUnderrunFrames(0);
@@ -1401,6 +1405,60 @@
.content_type = mAttr.content_type,
.gain = mFinalVolume,
};
+
+ // When attributes are undefined, derive default values from stream type.
+ // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
+ if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
+ switch (mStreamType) {
+ case AUDIO_STREAM_VOICE_CALL:
+ metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+ break;
+ case AUDIO_STREAM_SYSTEM:
+ metadata.base.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ break;
+ case AUDIO_STREAM_RING:
+ metadata.base.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ break;
+ case AUDIO_STREAM_MUSIC:
+ metadata.base.usage = AUDIO_USAGE_MEDIA;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+ break;
+ case AUDIO_STREAM_ALARM:
+ metadata.base.usage = AUDIO_USAGE_ALARM;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ break;
+ case AUDIO_STREAM_NOTIFICATION:
+ metadata.base.usage = AUDIO_USAGE_NOTIFICATION;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ break;
+ case AUDIO_STREAM_DTMF:
+ metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ break;
+ case AUDIO_STREAM_ACCESSIBILITY:
+ metadata.base.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+ break;
+ case AUDIO_STREAM_ASSISTANT:
+ metadata.base.usage = AUDIO_USAGE_ASSISTANT;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+ break;
+ case AUDIO_STREAM_REROUTING:
+ metadata.base.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
+ // unknown content type
+ break;
+ case AUDIO_STREAM_CALL_ASSISTANT:
+ metadata.base.usage = AUDIO_USAGE_CALL_ASSISTANT;
+ metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+ break;
+ default:
+ break;
+ }
+ }
+
metadata.channel_mask = mChannelMask,
strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
*backInserter++ = metadata;
@@ -1590,7 +1648,7 @@
(mState == STOPPED)))) {
ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
__func__, mId,
- mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
+ (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
event->cancel();
return INVALID_OPERATION;
}
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 2e49e71..33b455f 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -18,6 +18,7 @@
#define ANDROID_AUDIOPOLICY_INTERFACE_H
#include <media/AudioCommonTypes.h>
+#include <media/AudioContainers.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioSystem.h>
#include <media/AudioPolicy.h>
@@ -31,30 +32,42 @@
// ----------------------------------------------------------------------------
-// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
-// between the platform specific audio policy manager and Android generic audio policy manager.
-// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class.
+// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication
+// interfaces between the platform specific audio policy manager and Android generic audio policy
+// manager.
+// The platform specific audio policy manager must implement methods of the AudioPolicyInterface
+// class.
// This implementation makes use of the AudioPolicyClientInterface to control the activity and
// configuration of audio input and output streams.
//
// The platform specific audio policy manager is in charge of the audio routing and volume control
// policies for a given platform.
// The main roles of this module are:
-// - keep track of current system state (removable device connections, phone state, user requests...).
-// System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface.
+// - keep track of current system state (removable device connections, phone state,
+// user requests...).
+// System state changes and user actions are notified to audio policy manager with methods of the
+// AudioPolicyInterface.
// - process getOutput() queries received when AudioTrack objects are created: Those queries
-// return a handler on an output that has been selected, configured and opened by the audio policy manager and that
-// must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method.
-// When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide
-// to close or reconfigure the output depending on other streams using this output and current system state.
-// - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs.
-// - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value
-// applicable to each output as a function of platform specific settings and current output route (destination device). It
-// also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries).
+// return a handler on an output that has been selected, configured and opened by the audio
+// policy manager and that must be used by the AudioTrack when registering to the AudioFlinger
+// with the createTrack() method.
+// When the AudioTrack object is released, a putOutput() query is received and the audio policy
+// manager can decide to close or reconfigure the output depending on other streams using this
+// output and current system state.
+// - similarly process getInput() and putInput() queries received from AudioRecord objects and
+// configure audio inputs.
+// - process volume control requests: the stream volume is converted from an index value
+// (received from UI) to a float value applicable to each output as a function of platform
+// specificsettings and current output route (destination device). It also make sure that streams
+// are not muted if not allowed (e.g. camera shutter sound in some countries).
//
-// The platform specific audio policy manager is provided as a shared library by platform vendors (as for libaudio.so)
-// and is linked with libaudioflinger.so
-
+// The platform specific audio policy manager is provided as a shared library by platform vendors
+// (as for libaudio.so) and is linked with libaudioflinger.so
+//
+// NOTE: by convention, the implementation of the AudioPolicyInterface in AudioPolicyManager does
+// not have to perform any nullptr check on input arguments: The caller of this API is
+// AudioPolicyService running in the same process and in charge of validating arguments received
+// from incoming binder calls before calling AudioPolicyManager.
// Audio Policy Manager Interface
class AudioPolicyInterface
@@ -99,7 +112,7 @@
audio_format_t encodedFormat) = 0;
// retrieve a device connection status
virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
- const char *device_address) = 0;
+ const char *device_address) = 0;
// indicate a change in device configuration
virtual status_t handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
@@ -133,9 +146,11 @@
audio_port_handle_t *portId,
std::vector<audio_io_handle_t> *secondaryOutputs,
output_type_t *outputType) = 0;
- // indicates to the audio policy manager that the output starts being used by corresponding stream.
+ // indicates to the audio policy manager that the output starts being used by corresponding
+ // stream.
virtual status_t startOutput(audio_port_handle_t portId) = 0;
- // indicates to the audio policy manager that the output stops being used by corresponding stream.
+ // indicates to the audio policy manager that the output stops being used by corresponding
+ // stream.
virtual status_t stopOutput(audio_port_handle_t portId) = 0;
// releases the output, return true if the output descriptor is reopened.
virtual bool releaseOutput(audio_port_handle_t portId) = 0;
@@ -198,7 +213,7 @@
virtual product_strategy_t getStrategyForStream(audio_stream_type_t stream) = 0;
// return the enabled output devices for the given stream type
- virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
+ virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream) = 0;
// retrieves the list of enabled output devices for the given audio attributes
virtual status_t getDevicesForAttributes(const audio_attributes_t &attr,
@@ -285,8 +300,8 @@
virtual bool isHapticPlaybackSupported() = 0;
- virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<audio_format_t> *formats) = 0;
+ virtual status_t getHwOffloadFormatsSupportedForBluetoothMedia(
+ audio_devices_t device, std::vector<audio_format_t> *formats) = 0;
virtual void setAppState(audio_port_handle_t portId, app_state_t state) = 0;
@@ -333,6 +348,50 @@
virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
device_role_t role,
AudioDeviceTypeAddrVector &devices) = 0;
+
+ /**
+ * Queries if some kind of spatialization will be performed if the audio playback context
+ * described by the provided arguments is present.
+ * The context is made of:
+ * - The audio attributes describing the playback use case.
+ * - The audio configuration describing the audio format, channels, sampling rate ...
+ * - The devices describing the sink audio device selected for playback.
+ * All arguments are optional and only the specified arguments are used to match against
+ * supported criteria. For instance, supplying no argument will tell if spatialization is
+ * supported or not in general.
+ * @param attr audio attributes describing the playback use case
+ * @param config audio configuration describing the audio format, channels, sampling rate...
+ * @param devices the sink audio device selected for playback
+ * @return true if spatialization is enabled for this context,
+ * false otherwise
+ */
+ virtual bool canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const = 0;
+
+ /**
+ * Opens a specialized spatializer output if supported by the platform.
+ * If several spatializer output profiles exist, the one supporting the sink device
+ * corresponding to the provided audio attributes will be selected.
+ * Only one spatializer output stream can be opened at a time and an error is returned
+ * if one already exists.
+ * @param config audio format, channel mask and sampling rate to be used as the mixer
+ * configuration for the spatializer mixer created.
+ * @param attr audio attributes describing the playback use case that will drive the
+ * sink device selection
+ * @param output the IO handle of the output opened
+ * @return NO_ERROR if an output was opened, INVALID_OPERATION or BAD_VALUE otherwise
+ */
+ virtual status_t getSpatializerOutput(const audio_config_base_t *config,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output) = 0;
+
+ /**
+ * Closes a previously opened specialized spatializer output.
+ * @param output the IO handle of the output to close.
+ * @return NO_ERROR if an output was closed, INVALID_OPERATION or BAD_VALUE otherwise
+ */
+ virtual status_t releaseSpatializerOutput(audio_io_handle_t output) = 0;
};
@@ -353,23 +412,29 @@
// Audio output Control functions
//
- // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
- // in case the audio policy manager has no specific requirements for the output being opened.
- // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
- // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+ // opens an audio output with the requested parameters. The parameter values can indicate to
+ // use the default values in case the audio policy manager has no specific requirements for the
+ // output being opened.
+ // When the function returns, the parameter values reflect the actual values used by the audio
+ // hardware output stream.
+ // The audio policy manager can check if the proposed parameters are suitable or not and act
+ // accordingly.
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags) = 0;
- // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
- // a special mixer thread in the AudioFlinger.
- virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0;
+ // creates a special output that is duplicated to the two outputs passed as arguments.
+ // The duplication is performed by a special mixer thread in the AudioFlinger.
+ virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2) = 0;
// closes the output stream
virtual status_t closeOutput(audio_io_handle_t output) = 0;
- // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
- // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+ // suspends the output. When an output is suspended, the corresponding audio hardware output
+ // stream is placed in standby and the AudioTracks attached to the mixer thread are still
+ // processed but the output mix is discarded.
virtual status_t suspendOutput(audio_io_handle_t output) = 0;
// restores a suspended output.
virtual status_t restoreOutput(audio_io_handle_t output) = 0;
@@ -392,16 +457,21 @@
// misc control functions
//
- // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+ // set a stream volume for a particular output. For the same user setting, a given stream type
+ // can have different volumes
// for each output (destination device) it is attached to.
- virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0) = 0;
+ virtual status_t setStreamVolume(audio_stream_type_t stream, float volume,
+ audio_io_handle_t output, int delayMs = 0) = 0;
// invalidate a stream type, causing a reroute to an unspecified new output
virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
- // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
- virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0) = 0;
- // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+ // function enabling to send proprietary informations directly from audio policy manager to
+ // audio hardware interface.
+ virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs,
+ int delayMs = 0) = 0;
+ // function enabling to receive proprietary informations directly from audio hardware interface
+ // to audio policy manager.
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
// set down link audio volume.
@@ -464,7 +534,8 @@
// These are the signatures of createAudioPolicyManager/destroyAudioPolicyManager
// methods respectively, expected by AudioPolicyService, needs to be exposed by
// libaudiopolicymanagercustom.
- using CreateAudioPolicyManagerInstance = AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
+ using CreateAudioPolicyManagerInstance =
+ AudioPolicyInterface* (*)(AudioPolicyClientInterface*);
using DestroyAudioPolicyManagerInstance = void (*)(AudioPolicyInterface*);
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index 227c2d8..1f23ae3 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -30,6 +30,7 @@
],
shared_libs: [
"libaudiofoundation",
+ "libbase",
"libcutils",
"libhidlbase",
"liblog",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index a40f6aa..856ae66 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -54,7 +54,7 @@
DeviceVector supportedDevices() const {
return mProfile != nullptr ? mProfile->getSupportedDevices() : DeviceVector(); }
- void dump(String8 *dst) const override;
+ void dump(String8 *dst, int spaces, const char* extraInfo) const override;
audio_io_handle_t mIoHandle = AUDIO_IO_HANDLE_NONE; // input handle
wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
@@ -93,8 +93,10 @@
audio_patch_handle_t getPatchHandle() const override;
void setPatchHandle(audio_patch_handle_t handle) override;
bool isMmap() override {
- if (getPolicyAudioPort() != nullptr) {
- return getPolicyAudioPort()->isMmap();
+ if (const auto policyPort = getPolicyAudioPort(); policyPort != nullptr) {
+ if (const auto port = policyPort->asAudioPort(); port != nullptr) {
+ return port->isMmap();
+ }
}
return false;
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 1f9b535..69082ac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -149,7 +149,7 @@
AudioPolicyClientInterface *clientInterface);
virtual ~AudioOutputDescriptor() {}
- void dump(String8 *dst) const override;
+ void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
void log(const char* indent);
virtual DeviceVector devices() const { return mDevices; }
@@ -270,8 +270,10 @@
audio_patch_handle_t getPatchHandle() const override;
void setPatchHandle(audio_patch_handle_t handle) override;
bool isMmap() override {
- if (getPolicyAudioPort() != nullptr) {
- return getPolicyAudioPort()->isMmap();
+ if (const auto policyPort = getPolicyAudioPort(); policyPort != nullptr) {
+ if (const auto port = policyPort->asAudioPort(); port != nullptr) {
+ return port->isMmap();
+ }
}
return false;
}
@@ -307,6 +309,8 @@
DeviceVector mDevices; /**< current devices this output is routed to */
wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
+ virtual uint32_t getRecommendedMuteDurationMs() const { return 0; }
+
protected:
const sp<PolicyAudioPort> mPolicyAudioPort;
AudioPolicyClientInterface * const mClientInterface;
@@ -332,7 +336,7 @@
AudioPolicyClientInterface *clientInterface);
virtual ~SwAudioOutputDescriptor() {}
- void dump(String8 *dst) const override;
+ void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
virtual DeviceVector devices() const;
void setDevices(const DeviceVector &devices) { mDevices = devices; }
bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
@@ -362,7 +366,8 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port_v7 *port) const;
- status_t open(const audio_config_t *config,
+ status_t open(const audio_config_t *halConfig,
+ const audio_config_base_t *mixerConfig,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
@@ -414,6 +419,8 @@
*/
DeviceVector filterSupportedDevices(const DeviceVector &devices) const;
+ uint32_t getRecommendedMuteDurationMs() const override;
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
@@ -423,6 +430,7 @@
uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
audio_session_t mDirectClientSession; // session id of the direct output client
bool mPendingReopenToQueryProfiles = false;
+ audio_channel_mask_t mMixerChannelMask = AUDIO_CHANNEL_NONE;
};
// Audio output driven by an input device directly.
@@ -433,7 +441,7 @@
AudioPolicyClientInterface *clientInterface);
virtual ~HwAudioOutputDescriptor() {}
- void dump(String8 *dst) const override;
+ void dump(String8 *dst, int spaces, const char* extraInfo) const override;
virtual bool setVolume(float volumeDb,
VolumeSource volumeSource, const StreamTypeVector &streams,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
index a5de655..955b0cf 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -41,7 +41,7 @@
void setUid(uid_t uid) { mUid = uid; }
- void dump(String8 *dst, int spaces, int index) const;
+ void dump(String8 *dst, int spaces) const;
struct audio_patch mPatch;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index cf1f64c..a8fd856 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -202,6 +202,20 @@
{AUDIO_FORMAT_AC4, {}}};
}
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ // until then, use DEEP_BUFFER+FAST flag combo to indicate the spatializer output profile
+ void convertSpatializerFlag()
+ {
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->getOutputProfiles()) {
+ if (curProfile->getFlags()
+ == (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
+ curProfile->setFlags(AUDIO_OUTPUT_FLAG_SPATIALIZER);
+ }
+ }
+ }
+ }
+
private:
static const constexpr char* const kDefaultEngineLibraryNameSuffix = "default";
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 74b3405..dc2403c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -16,19 +16,21 @@
#pragma once
-#include <vector>
-#include <map>
-#include <unistd.h>
#include <sys/types.h>
+#include <unistd.h>
-#include <system/audio.h>
+#include <map>
+#include <vector>
+
+#include <android-base/stringprintf.h>
#include <audiomanager/AudioManager.h>
#include <media/AudioProductStrategy.h>
+#include <policy.h>
+#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
-#include <policy.h>
#include <Volume.h>
#include "AudioPatch.h"
#include "EffectDescriptor.h"
@@ -52,7 +54,7 @@
mPreferredDeviceForExclusiveUse(isPreferredDeviceForExclusiveUse){}
~ClientDescriptor() override = default;
- virtual void dump(String8 *dst, int spaces, int index) const;
+ virtual void dump(String8 *dst, int spaces) const;
virtual std::string toShortString() const;
audio_port_handle_t portId() const { return mPortId; }
@@ -100,7 +102,7 @@
~TrackClientDescriptor() override = default;
using ClientDescriptor::dump;
- void dump(String8 *dst, int spaces, int index) const override;
+ void dump(String8 *dst, int spaces) const override;
std::string toShortString() const override;
audio_output_flags_t flags() const { return mFlags; }
@@ -168,7 +170,7 @@
~RecordClientDescriptor() override = default;
using ClientDescriptor::dump;
- void dump(String8 *dst, int spaces, int index) const override;
+ void dump(String8 *dst, int spaces) const override;
audio_unique_id_t riid() const { return mRIId; }
audio_source_t source() const { return mSource; }
@@ -219,7 +221,7 @@
void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
using ClientDescriptor::dump;
- void dump(String8 *dst, int spaces, int index) const override;
+ void dump(String8 *dst, int spaces) const override;
private:
audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -269,10 +271,13 @@
size_t getClientCount() const {
return mClients.size();
}
- virtual void dump(String8 *dst) const {
+ virtual void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const {
+ (void)extraInfo;
size_t index = 0;
for (const auto& client: getClientIterable()) {
- client->dump(dst, 2, index++);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", ++index);
+ dst->appendFormat("%s", prefix.c_str());
+ client->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 20b4044..4adc920 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -43,7 +43,7 @@
DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr, const std::string &tagName = "",
const FormatVector &encodedFormats = FormatVector{});
- virtual ~DeviceDescriptor() {}
+ virtual ~DeviceDescriptor() = default;
virtual void addAudioProfile(const sp<AudioProfile> &profile) {
addAudioProfileAndSort(mProfiles, profile);
@@ -51,8 +51,6 @@
virtual const std::string getTagName() const { return mTagName; }
- const FormatVector& encodedFormats() const { return mEncodedFormats; }
-
audio_format_t getEncodedFormat() { return mCurrentEncodedFormat; }
void setEncodedFormat(audio_format_t format) {
@@ -63,8 +61,6 @@
bool hasCurrentEncodedFormat() const;
- bool supportsFormat(audio_format_t format);
-
void setDynamic() { mIsDynamic = true; }
bool isDynamic() const { return mIsDynamic; }
@@ -95,7 +91,7 @@
void setEncapsulationInfoFromHal(AudioPolicyClientInterface *clientInterface);
- void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
+ void dump(String8 *dst, int spaces, bool verbose = true) const;
private:
template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
@@ -106,7 +102,6 @@
}
std::string mTagName; // Unique human readable identifier for a device port found in conf file.
- FormatVector mEncodedFormats;
audio_format_t mCurrentEncodedFormat;
bool mIsDynamic = false;
const std::string mDeclaredAddress; // Original device address
@@ -168,6 +163,10 @@
DeviceVector getDevicesFromDeviceTypeAddrVec(
const AudioDeviceTypeAddrVector& deviceTypeAddrVector) const;
+ // Return the device vector that contains device descriptor whose AudioDeviceTypeAddr appears
+ // in the given AudioDeviceTypeAddrVector
+ AudioDeviceTypeAddrVector toTypeAddrVector() const;
+
// If there are devices with the given type and the devices to add is not empty,
// remove all the devices with the given type and add all the devices to add.
void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 9ba745a..436fcc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -80,6 +80,7 @@
sp<DeviceDescriptor> getRouteSinkDevice(const sp<AudioRoute> &route) const;
DeviceVector getRouteSourceDevices(const sp<AudioRoute> &route) const;
+ const AudioRouteVector& getRoutes() const { return mRoutes; }
void setRoutes(const AudioRouteVector &routes);
status_t addOutputProfile(const sp<IOProfile> &profile);
@@ -114,7 +115,7 @@
const sp<PolicyAudioPort> &dstPort) const;
// TODO remove from here (split serialization)
- void dump(String8 *dst) const;
+ void dump(String8 *dst, int spaces) const;
private:
void refreshSupportedDevices();
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index a74cefa..90b812d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -37,9 +37,7 @@
public:
IOProfile(const std::string &name, audio_port_role_t role)
: AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
- maxOpenCount(1),
curOpenCount(0),
- maxActiveCount(1),
curActiveCount(0) {}
virtual ~IOProfile() = default;
@@ -59,11 +57,12 @@
// Once capture clients are tracked individually and not per session this can be removed
// MMAP no IRQ input streams do not have the default limitation of one active client
// max as they can be used in shared mode by the same application.
+ // NOTE: Please consider moving to AudioPort when addressing the FIXME
// NOTE: this works for explicit values set in audio_policy_configuration.xml because
// flags are parsed before maxActiveCount by the serializer.
void setFlags(uint32_t flags) override
{
- PolicyAudioPort::setFlags(flags);
+ AudioPort::setFlags(flags);
if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
maxActiveCount = 0;
}
@@ -98,7 +97,7 @@
uint32_t flags,
bool exactMatchRequiredForInputFlags = false) const;
- void dump(String8 *dst) const;
+ void dump(String8 *dst, int spaces) const;
void log();
bool hasSupportedDevices() const { return !mSupportedDevices.isEmpty(); }
@@ -194,16 +193,8 @@
return false;
}
- // Maximum number of input or output streams that can be simultaneously opened for this profile.
- // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
- // profiles and 0 for input profiles
- uint32_t maxOpenCount;
// Number of streams currently opened for this profile.
uint32_t curOpenCount;
- // Maximum number of input or output streams that can be simultaneously active for this profile.
- // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
- // profiles and 1 for input profiles
- uint32_t maxActiveCount;
// Number of streams currently active for this profile. This is not the number of active clients
// (AudioTrack or AudioRecord) but the number of active HAL streams.
uint32_t curActiveCount;
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
index ab33b38..acf787b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -36,7 +36,7 @@
class PolicyAudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
{
public:
- PolicyAudioPort() : mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
+ PolicyAudioPort() = default;
virtual ~PolicyAudioPort() = default;
@@ -49,19 +49,6 @@
virtual sp<AudioPort> asAudioPort() const = 0;
- virtual void setFlags(uint32_t flags)
- {
- //force direct flag if offload flag is set: offloading implies a direct output stream
- // and all common behaviors are driven by checking only the direct flag
- // this should normally be set appropriately in the policy configuration file
- if (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE &&
- (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- flags |= AUDIO_OUTPUT_FLAG_DIRECT;
- }
- mFlags = flags;
- }
- uint32_t getFlags() const { return mFlags; }
-
virtual void attach(const sp<HwModule>& module);
virtual void detach();
bool isAttached() { return mModule != 0; }
@@ -105,22 +92,6 @@
const char *getModuleName() const;
sp<HwModule> getModule() const { return mModule; }
- inline bool isDirectOutput() const
- {
- return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX) &&
- (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
- (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
- }
-
- inline bool isMmap() const
- {
- return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX)
- && (((asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
- ((mFlags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0))
- || ((asAudioPort()->getRole() == AUDIO_PORT_ROLE_SINK) &&
- ((mFlags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0)));
- }
-
void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
const AudioRouteVector &getRoutes() const { return mRoutes; }
@@ -129,7 +100,6 @@
const ChannelMaskSet &channelMasks) const;
void pickSamplingRate(uint32_t &rate, const SampleRateSet &samplingRates) const;
- uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
sp<HwModule> mModule; // audio HW module exposing this I/O stream
AudioRouteVector mRoutes; // Routes involving this port
};
@@ -141,27 +111,18 @@
virtual sp<PolicyAudioPort> getPolicyAudioPort() const = 0;
- status_t validationBeforeApplyConfig(const struct audio_port_config *config) const;
-
- void applyPolicyAudioPortConfig(const struct audio_port_config *config) {
- if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
- mFlags = config->flags;
- }
+ status_t validationBeforeApplyConfig(const struct audio_port_config *config) const {
+ sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
+ return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
}
- void toPolicyAudioPortConfig(
- struct audio_port_config *dstConfig,
- const struct audio_port_config *srcConfig = NULL) const;
-
-
- virtual bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
+ bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
return (other.get() != nullptr) && (other->getPolicyAudioPort().get() != nullptr) &&
(getPolicyAudioPort().get() != nullptr) &&
(other->getPolicyAudioPort()->getModuleHandle() ==
getPolicyAudioPort()->getModuleHandle());
}
- union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index cd10010..580938e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -17,6 +17,8 @@
#define LOG_TAG "APM::AudioCollections"
//#define LOG_NDEBUG 0
+#include <android-base/stringprintf.h>
+
#include "AudioCollections.h"
#include "AudioRoute.h"
#include "HwModule.h"
@@ -40,10 +42,11 @@
if (audioRouteVector.isEmpty()) {
return;
}
- dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", audioRouteVector.size());
+ dst->appendFormat("%*s- Audio Routes (%zu):\n", spaces - 2, "", audioRouteVector.size());
for (size_t i = 0; i < audioRouteVector.size(); i++) {
- dst->appendFormat("%*s- Route %zu:\n", spaces, "", i + 1);
- audioRouteVector.itemAt(i)->dump(dst, 4);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->append(prefix.c_str());
+ audioRouteVector.itemAt(i)->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 7016a08..966b8cb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,8 @@
#define LOG_TAG "APM::AudioInputDescriptor"
//#define LOG_NDEBUG 0
+#include <android-base/stringprintf.h>
+
#include <audiomanager/AudioManager.h>
#include <media/AudioPolicy.h>
#include <policy.h>
@@ -62,7 +64,6 @@
toAudioPortConfig(&localBackupConfig);
if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
AudioPortConfig::applyAudioPortConfig(config, backupConfig);
- applyPolicyAudioPortConfig(config);
}
if (backupConfig != NULL) {
@@ -83,7 +84,6 @@
}
AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
- toPolicyAudioPortConfig(dstConfig, srcConfig);
dstConfig->role = AUDIO_PORT_ROLE_SINK;
dstConfig->type = AUDIO_PORT_TYPE_MIX;
@@ -510,17 +510,20 @@
}
}
-void AudioInputDescriptor::dump(String8 *dst) const
+void AudioInputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
{
- dst->appendFormat(" ID: %d\n", getId());
- dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
- dst->appendFormat(" Format: %d\n", mFormat);
- dst->appendFormat(" Channels: %08x\n", mChannelMask);
- dst->appendFormat(" Devices %s\n", mDevice->toString(true /*includeSensitiveInfo*/).c_str());
- mEnabledEffects.dump(dst, 1 /*spaces*/, false /*verbose*/);
- dst->append(" AudioRecord Clients:\n");
- ClientMapHandler<RecordClientDescriptor>::dump(dst);
- dst->append("\n");
+ dst->appendFormat("Port ID: %d%s%s\n",
+ getId(), extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+ dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+ audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+ dst->appendFormat("%*sDevices: %s\n", spaces, "",
+ mDevice->toString(true /*includeSensitiveInfo*/).c_str());
+ mEnabledEffects.dump(dst, spaces /*spaces*/, false /*verbose*/);
+ if (getClientCount() != 0) {
+ dst->appendFormat("%*sAudioRecord Clients (%zu):\n", spaces, "", getClientCount());
+ ClientMapHandler<RecordClientDescriptor>::dump(dst, spaces);
+ dst->append("\n");
+ }
}
bool AudioInputCollection::isSourceActive(audio_source_t source) const
@@ -608,10 +611,12 @@
void AudioInputCollection::dump(String8 *dst) const
{
- dst->append("\nInputs dump:\n");
+ dst->appendFormat("\n Inputs (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("- Input %d dump:\n", keyAt(i));
- valueAt(i)->dump(dst);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 6b08f7c..235e4aa 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -17,6 +17,8 @@
#define LOG_TAG "APM::AudioOutputDescriptor"
//#define LOG_NDEBUG 0
+#include <android-base/stringprintf.h>
+
#include <AudioPolicyInterface.h>
#include "AudioOutputDescriptor.h"
#include "AudioPolicyMix.h"
@@ -188,7 +190,6 @@
toAudioPortConfig(&localBackupConfig);
if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
AudioPortConfig::applyAudioPortConfig(config, backupConfig);
- applyPolicyAudioPortConfig(config);
}
if (backupConfig != NULL) {
@@ -207,7 +208,6 @@
dstConfig->config_mask |= srcConfig->config_mask;
}
AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
- toPolicyAudioPortConfig(dstConfig, srcConfig);
dstConfig->role = AUDIO_PORT_ROLE_SOURCE;
dstConfig->type = AUDIO_PORT_TYPE_MIX;
@@ -245,32 +245,45 @@
return client->volumeSource() != volumeSourceToIgnore; }) != end(mActiveClients);
}
-void AudioOutputDescriptor::dump(String8 *dst) const
+void AudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
{
- dst->appendFormat(" ID: %d\n", mId);
- dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
- dst->appendFormat(" Format: %08x\n", mFormat);
- dst->appendFormat(" Channels: %08x\n", mChannelMask);
- dst->appendFormat(" Devices: %s\n", devices().toString(true /*includeSensitiveInfo*/).c_str());
- dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
- for (const auto &iter : mRoutingActivities) {
- dst->appendFormat(" Product Strategy id: %d", iter.first);
- iter.second.dump(dst, 4);
+ dst->appendFormat("Port ID: %d%s%s\n",
+ mId, extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+ dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+ audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+ dst->appendFormat("%*sDevices: %s\n", spaces, "",
+ devices().toString(true /*includeSensitiveInfo*/).c_str());
+ dst->appendFormat("%*sGlobal active count: %u\n", spaces, "", mGlobalActiveCount);
+ if (!mRoutingActivities.empty()) {
+ dst->appendFormat("%*s- Product Strategies (%zu):\n", spaces - 2, "",
+ mRoutingActivities.size());
+ for (const auto &iter : mRoutingActivities) {
+ dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+ iter.second.dump(dst, 0);
+ }
}
- for (const auto &iter : mVolumeActivities) {
- dst->appendFormat(" Volume Activities id: %d", iter.first);
- iter.second.dump(dst, 4);
+ if (!mVolumeActivities.empty()) {
+ dst->appendFormat("%*s- Volume Activities (%zu):\n", spaces - 2, "",
+ mVolumeActivities.size());
+ for (const auto &iter : mVolumeActivities) {
+ dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+ iter.second.dump(dst, 0);
+ }
}
- dst->append(" AudioTrack Clients:\n");
- ClientMapHandler<TrackClientDescriptor>::dump(dst);
- dst->append("\n");
+ if (getClientCount() != 0) {
+ dst->appendFormat("%*s- AudioTrack clients (%zu):\n", spaces - 2, "", getClientCount());
+ ClientMapHandler<TrackClientDescriptor>::dump(dst, spaces);
+ }
if (!mActiveClients.empty()) {
- dst->append(" AudioTrack active (stream) clients:\n");
+ dst->appendFormat("%*s- AudioTrack active (stream) clients (%zu):\n", spaces - 2, "",
+ mActiveClients.size());
size_t index = 0;
for (const auto& client : mActiveClients) {
- client->dump(dst, 2, index++);
+ const std::string prefix = base::StringPrintf(
+ "%*sid %zu: ", spaces + 1, "", index + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ client->dump(dst, prefix.size());
}
- dst->append(" \n");
}
}
@@ -294,11 +307,18 @@
}
}
-void SwAudioOutputDescriptor::dump(String8 *dst) const
+void SwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
{
- dst->appendFormat(" Latency: %d\n", mLatency);
- dst->appendFormat(" Flags %08x\n", mFlags);
- AudioOutputDescriptor::dump(dst);
+ String8 allExtraInfo;
+ if (extraInfo != nullptr) {
+ allExtraInfo.appendFormat("%s; ", extraInfo);
+ }
+ std::string flagsLiteral = toString(mFlags);
+ allExtraInfo.appendFormat("Latency: %d; 0x%04x", mLatency, mFlags);
+ if (!flagsLiteral.empty()) {
+ allExtraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
+ }
+ AudioOutputDescriptor::dump(dst, spaces, allExtraInfo.c_str());
}
DeviceVector SwAudioOutputDescriptor::devices() const
@@ -491,7 +511,8 @@
return true;
}
-status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+status_t SwAudioOutputDescriptor::open(const audio_config_t *halConfig,
+ const audio_config_base_t *mixerConfig,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
@@ -504,45 +525,62 @@
"with the requested devices, all device types: %s",
__func__, dumpDeviceTypes(devices.types()).c_str());
- audio_config_t lConfig;
- if (config == nullptr) {
- lConfig = AUDIO_CONFIG_INITIALIZER;
- lConfig.sample_rate = mSamplingRate;
- lConfig.channel_mask = mChannelMask;
- lConfig.format = mFormat;
+ audio_config_t lHalConfig;
+ if (halConfig == nullptr) {
+ lHalConfig = AUDIO_CONFIG_INITIALIZER;
+ lHalConfig.sample_rate = mSamplingRate;
+ lHalConfig.channel_mask = mChannelMask;
+ lHalConfig.format = mFormat;
} else {
- lConfig = *config;
+ lHalConfig = *halConfig;
}
// if the selected profile is offloaded and no offload info was specified,
// create a default one
if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
- lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+ lHalConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- lConfig.offload_info = AUDIO_INFO_INITIALIZER;
- lConfig.offload_info.sample_rate = lConfig.sample_rate;
- lConfig.offload_info.channel_mask = lConfig.channel_mask;
- lConfig.offload_info.format = lConfig.format;
- lConfig.offload_info.stream_type = stream;
- lConfig.offload_info.duration_us = -1;
- lConfig.offload_info.has_video = true; // conservative
- lConfig.offload_info.is_streaming = true; // likely
- lConfig.offload_info.encapsulation_mode = lConfig.offload_info.encapsulation_mode;
- lConfig.offload_info.content_id = lConfig.offload_info.content_id;
- lConfig.offload_info.sync_id = lConfig.offload_info.sync_id;
+ lHalConfig.offload_info = AUDIO_INFO_INITIALIZER;
+ lHalConfig.offload_info.sample_rate = lHalConfig.sample_rate;
+ lHalConfig.offload_info.channel_mask = lHalConfig.channel_mask;
+ lHalConfig.offload_info.format = lHalConfig.format;
+ lHalConfig.offload_info.stream_type = stream;
+ lHalConfig.offload_info.duration_us = -1;
+ lHalConfig.offload_info.has_video = true; // conservative
+ lHalConfig.offload_info.is_streaming = true; // likely
+ lHalConfig.offload_info.encapsulation_mode = lHalConfig.offload_info.encapsulation_mode;
+ lHalConfig.offload_info.content_id = lHalConfig.offload_info.content_id;
+ lHalConfig.offload_info.sync_id = lHalConfig.offload_info.sync_id;
+ }
+
+ audio_config_base_t lMixerConfig;
+ if (mixerConfig == nullptr) {
+ lMixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ lMixerConfig.sample_rate = lHalConfig.sample_rate;
+ lMixerConfig.channel_mask = lHalConfig.channel_mask;
+ lMixerConfig.format = lHalConfig.format;
+ } else {
+ lMixerConfig = *mixerConfig;
}
mFlags = (audio_output_flags_t)(mFlags | flags);
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ audio_output_flags_t halFlags = mFlags;
+ if ((mFlags & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0) {
+ halFlags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
+ }
+
ALOGV("opening output for device %s profile %p name %s",
mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
output,
- &lConfig,
+ &lHalConfig,
+ &lMixerConfig,
device,
&mLatency,
- mFlags);
+ halFlags);
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
@@ -550,9 +588,10 @@
"selected device %s for opening",
__FUNCTION__, *output, devices.toString().c_str(),
device->toString().c_str());
- mSamplingRate = lConfig.sample_rate;
- mChannelMask = lConfig.channel_mask;
- mFormat = lConfig.format;
+ mSamplingRate = lHalConfig.sample_rate;
+ mChannelMask = lHalConfig.channel_mask;
+ mFormat = lHalConfig.format;
+ mMixerChannelMask = lMixerConfig.channel_mask;
mId = PolicyAudioPort::getNextUniqueId();
mIoHandle = *output;
mProfile->curOpenCount++;
@@ -651,6 +690,15 @@
return NO_ERROR;
}
+uint32_t SwAudioOutputDescriptor::getRecommendedMuteDurationMs() const
+{
+ if (isDuplicated()) {
+ return std::max(mOutput1->getRecommendedMuteDurationMs(),
+ mOutput2->getRecommendedMuteDurationMs());
+ }
+ return mProfile->recommendedMuteDurationMs;
+}
+
// HwAudioOutputDescriptor implementation
HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
@@ -659,11 +707,11 @@
{
}
-void HwAudioOutputDescriptor::dump(String8 *dst) const
+void HwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
{
- AudioOutputDescriptor::dump(dst);
- dst->append("Source:\n");
- mSource->dump(dst, 0, 0);
+ AudioOutputDescriptor::dump(dst, spaces, extraInfo);
+ dst->appendFormat("%*sSource:\n", spaces, "");
+ mSource->dump(dst, spaces);
}
void HwAudioOutputDescriptor::toAudioPortConfig(
@@ -836,10 +884,12 @@
void SwAudioOutputCollection::dump(String8 *dst) const
{
- dst->append("\nOutputs dump:\n");
+ dst->appendFormat("\n Outputs (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("- Output %d dump:\n", keyAt(i));
- valueAt(i)->dump(dst);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
}
}
@@ -858,10 +908,12 @@
void HwAudioOutputCollection::dump(String8 *dst) const
{
- dst->append("\nOutputs dump:\n");
+ dst->appendFormat("\n Outputs (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("- Output %d dump:\n", keyAt(i));
- valueAt(i)->dump(dst);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index d79110a..4f03db9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -20,7 +20,9 @@
#include "AudioPatch.h"
#include "TypeConverter.h"
+#include <android-base/stringprintf.h>
#include <log/log.h>
+#include <media/AudioDeviceTypeAddr.h>
#include <utils/String8.h>
namespace android {
@@ -37,20 +39,21 @@
{
for (int i = 0; i < count; ++i) {
const audio_port_config &cfg = cfgs[i];
- dst->appendFormat("%*s [%s %d] ", spaces, "", prefix, i + 1);
+ dst->appendFormat("%*s[%s %d] ", spaces, "", prefix, i + 1);
if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
- dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
+ AudioDeviceTypeAddr device(cfg.ext.device.type, cfg.ext.device.address);
+ dst->appendFormat("Device Port ID: %d; {%s}",
+ cfg.id, device.toString(true /*includeSensitiveInfo*/).c_str());
} else {
- dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+ dst->appendFormat("Mix Port ID: %d; I/O handle: %d;", cfg.id, cfg.ext.mix.handle);
}
dst->append("\n");
}
}
-void AudioPatch::dump(String8 *dst, int spaces, int index) const
+void AudioPatch::dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
- spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+ dst->appendFormat("owner uid %4d; handle %2d; af handle %2d\n", mUid, mHandle, mAfPatchHandle);
dumpPatchEndpoints(dst, spaces, "src ", mPatch.num_sources, mPatch.sources);
dumpPatchEndpoints(dst, spaces, "sink", mPatch.num_sinks, mPatch.sinks);
}
@@ -135,9 +138,11 @@
void AudioPatchCollection::dump(String8 *dst) const
{
- dst->append("\nAudio Patches:\n");
+ dst->appendFormat("\n Audio Patches (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- valueAt(i)->dump(dst, 2, i);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index b209a88..546f56b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -675,7 +675,7 @@
void AudioPolicyMixCollection::dump(String8 *dst) const
{
- dst->append("\nAudio Policy Mix:\n");
+ dst->append("\n Audio Policy Mix:\n");
for (size_t i = 0; i < size(); i++) {
itemAt(i)->dump(dst, 2, i);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 866417e..53cc473 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -25,15 +25,16 @@
void AudioRoute::dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*s- Type: %s\n", spaces, "", mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix");
- dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().c_str());
+ dst->appendFormat("%s; Sink: \"%s\"\n",
+ mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix", mSink->getTagName().c_str());
if (mSources.size() != 0) {
- dst->appendFormat("%*s- Sources: \n", spaces, "");
+ dst->appendFormat("%*sSources: ", spaces, "");
for (size_t i = 0; i < mSources.size(); i++) {
- dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().c_str());
+ dst->appendFormat("\"%s\"", mSources[i]->getTagName().c_str());
+ if (i + 1 < mSources.size()) dst->append(", ");
}
+ dst->append("\n");
}
- dst->append("\n");
}
bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index afc4d01..035bef2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -18,9 +18,12 @@
//#define LOG_NDEBUG 0
#include <sstream>
+
+#include <android-base/stringprintf.h>
+#include <TypeConverter.h>
#include <utils/Log.h>
#include <utils/String8.h>
-#include <TypeConverter.h>
+
#include "AudioOutputDescriptor.h"
#include "AudioPatch.h"
#include "AudioPolicyMix.h"
@@ -39,35 +42,36 @@
return ss.str();
}
-void ClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void ClientDescriptor::dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*sClient %d:\n", spaces, "", index+1);
- dst->appendFormat("%*s- Port Id: %d Session Id: %d UID: %d\n", spaces, "",
- mPortId, mSessionId, mUid);
- dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
- mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
- dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
- dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
- dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+ dst->appendFormat("Port ID: %d; Session ID: %d; uid %d; State: %s\n",
+ mPortId, mSessionId, mUid, mActive ? "Active" : "Inactive");
+ dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+ audio_format_to_string(mConfig.format), mConfig.sample_rate, mConfig.channel_mask);
+ dst->appendFormat("%*sAttributes: %s\n", spaces, "", toString(mAttributes).c_str());
+ if (mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE) {
+ dst->appendFormat("%*sPreferred Device Port ID: %d;\n", spaces, "", mPreferredDeviceId);
+ }
}
-void TrackClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void TrackClientDescriptor::dump(String8 *dst, int spaces) const
{
- ClientDescriptor::dump(dst, spaces, index);
- dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
- dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
- dst->appendFormat("%*s- DAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
- dst->appendFormat("%*s- DAP Secondary Outputs:\n", spaces, "");
- for (auto desc : mSecondaryOutputs) {
- dst->appendFormat("%*s - %d\n", spaces, "",
- desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+ ClientDescriptor::dump(dst, spaces);
+ dst->appendFormat("%*sStream: %d; Flags: %08x; Refcount: %d\n", spaces, "",
+ mStream, mFlags, mActivityCount);
+ dst->appendFormat("%*sDAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
+ if (!mSecondaryOutputs.empty()) {
+ dst->appendFormat("%*sDAP Secondary Outputs: ", spaces - 2, "");
+ for (auto desc : mSecondaryOutputs) {
+ dst->appendFormat("%d, ", desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+ }
+ dst->append("\n");
}
}
std::string TrackClientDescriptor::toShortString() const
{
std::stringstream ss;
-
ss << ClientDescriptor::toShortString() << " Stream: " << mStream;
return ss.str();
}
@@ -81,10 +85,10 @@
}
}
-void RecordClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void RecordClientDescriptor::dump(String8 *dst, int spaces) const
{
- ClientDescriptor::dump(dst, spaces, index);
- dst->appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+ ClientDescriptor::dump(dst, spaces);
+ dst->appendFormat("%*sSource: %d; Flags: %08x\n", spaces, "", mSource, mFlags);
mEnabledEffects.dump(dst, spaces + 2 /*spaces*/, false /*verbose*/);
}
@@ -109,18 +113,21 @@
mHwOutput = hwOutput;
}
-void SourceClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void SourceClientDescriptor::dump(String8 *dst, int spaces) const
{
- TrackClientDescriptor::dump(dst, spaces, index);
- dst->appendFormat("%*s- Device:\n", spaces, "");
- mSrcDevice->dump(dst, 2, 0);
+ TrackClientDescriptor::dump(dst, spaces);
+ const std::string prefix = base::StringPrintf("%*sDevice: ", spaces, "");
+ dst->appendFormat("%s", prefix.c_str());
+ mSrcDevice->dump(dst, prefix.size());
}
void SourceClientCollection::dump(String8 *dst) const
{
- dst->append("\nAudio sources:\n");
+ dst->append("\n Audio sources (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- valueAt(i)->dump(dst, 2, i);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a92d31e..a909331 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -19,10 +19,11 @@
#include <set>
-#include <AudioPolicyInterface.h>
+#include <android-base/stringprintf.h>
#include <audio_utils/string.h>
#include <media/AudioParameter.h>
#include <media/TypeConverter.h>
+#include <AudioPolicyInterface.h>
#include "DeviceDescriptor.h"
#include "TypeConverter.h"
#include "HwModule.h"
@@ -54,19 +55,10 @@
DeviceDescriptor::DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr,
const std::string &tagName,
const FormatVector &encodedFormats) :
- DeviceDescriptorBase(deviceTypeAddr), mTagName(tagName), mEncodedFormats(encodedFormats),
+ DeviceDescriptorBase(deviceTypeAddr, encodedFormats), mTagName(tagName),
mDeclaredAddress(DeviceDescriptorBase::address())
{
mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
- /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
- * FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
- * For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
- * something like 'encodedFormats="AUDIO_FORMAT_PCM_16_BIT"' on the HDMI devicePort.
- */
- if (mDeviceTypeAddr.mType == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.empty()) {
- mEncodedFormats.push_back(AUDIO_FORMAT_AC3);
- mEncodedFormats.push_back(AUDIO_FORMAT_IEC61937);
- }
}
void DeviceDescriptor::attach(const sp<HwModule>& module)
@@ -118,20 +110,6 @@
return (mCurrentEncodedFormat != AUDIO_FORMAT_DEFAULT);
}
-bool DeviceDescriptor::supportsFormat(audio_format_t format)
-{
- if (mEncodedFormats.empty()) {
- return true;
- }
-
- for (const auto& devFormat : mEncodedFormats) {
- if (devFormat == format) {
- return true;
- }
- }
- return false;
-}
-
status_t DeviceDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
audio_port_config *backupConfig)
{
@@ -141,7 +119,6 @@
toAudioPortConfig(&localBackupConfig);
if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
AudioPortConfig::applyAudioPortConfig(config, backupConfig);
- applyPolicyAudioPortConfig(config);
}
if (backupConfig != NULL) {
@@ -154,8 +131,6 @@
const struct audio_port_config *srcConfig) const
{
DeviceDescriptorBase::toAudioPortConfig(dstConfig, srcConfig);
- toPolicyAudioPortConfig(dstConfig, srcConfig);
-
dstConfig->ext.device.hw_module = getModuleHandle();
}
@@ -202,15 +177,15 @@
}
}
-void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
+void DeviceDescriptor::dump(String8 *dst, int spaces, bool verbose) const
{
String8 extraInfo;
if (!mTagName.empty()) {
- extraInfo.appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.c_str());
+ extraInfo.appendFormat("\"%s\"", mTagName.c_str());
}
std::string descBaseDumpStr;
- DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, index, extraInfo.string(), verbose);
+ DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, extraInfo.string(), verbose);
dst->append(descBaseDumpStr.c_str());
}
@@ -451,6 +426,14 @@
return devices;
}
+AudioDeviceTypeAddrVector DeviceVector::toTypeAddrVector() const {
+ AudioDeviceTypeAddrVector result;
+ for (const auto& device : *this) {
+ result.push_back(AudioDeviceTypeAddr(device->type(), device->address()));
+ }
+ return result;
+}
+
void DeviceVector::replaceDevicesByType(
audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
DeviceVector devicesToRemove = getDevicesFromType(typeToRemove);
@@ -465,9 +448,11 @@
if (isEmpty()) {
return;
}
- dst->appendFormat("%*s- %s devices:\n", spaces, "", tag.string());
+ dst->appendFormat("%*s%s devices (%zu):\n", spaces, "", tag.string(), size());
for (size_t i = 0; i < size(); i++) {
- itemAt(i)->dump(dst, spaces + 2, i, verbose);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ itemAt(i)->dump(dst, prefix.size(), verbose);
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 843f5da..3f9c8b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "APM::EffectDescriptor"
//#define LOG_NDEBUG 0
+#include <android-base/stringprintf.h>
#include "EffectDescriptor.h"
#include <utils/String8.h>
@@ -24,13 +25,11 @@
void EffectDescriptor::dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*sID: %d\n", spaces, "", mId);
- dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
- dst->appendFormat("%*sMusic Effect: %s\n", spaces, "", isMusicEffect()? "yes" : "no");
- dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
- dst->appendFormat("%*sName: %s\n", spaces, "", mDesc.name);
- dst->appendFormat("%*s%s\n", spaces, "", mEnabled ? "Enabled" : "Disabled");
- dst->appendFormat("%*s%s\n", spaces, "", mSuspended ? "Suspended" : "Active");
+ dst->appendFormat("Effect ID: %d; Attached to I/O handle: %d; Session: %d;\n",
+ mId, mIo, mSession);
+ dst->appendFormat("%*sMusic Effect? %s; \"%s\"; %s; %s\n", spaces, "",
+ isMusicEffect()? "yes" : "no", mDesc.name,
+ mEnabled ? "Enabled" : "Disabled", mSuspended ? "Suspended" : "Active");
}
EffectDescriptorCollection::EffectDescriptorCollection() :
@@ -237,10 +236,14 @@
mTotalEffectsMemory,
mTotalEffectsMemoryMaxUsed);
}
- dst->appendFormat("%*sEffects:\n", spaces, "");
- for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("%*s- Effect %d:\n", spaces, "", keyAt(i));
- valueAt(i)->dump(dst, spaces + 2);
+ if (size() > 0) {
+ if (spaces > 1) spaces -= 2;
+ dst->appendFormat("%*s- Effects (%zu):\n", spaces, "", size());
+ for (size_t i = 0; i < size(); i++) {
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size());
+ }
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 3a143b0..418b7eb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -17,11 +17,13 @@
#define LOG_TAG "APM::HwModule"
//#define LOG_NDEBUG 0
-#include "HwModule.h"
-#include "IOProfile.h"
+#include <android-base/stringprintf.h>
#include <policy.h>
#include <system/audio.h>
+#include "HwModule.h"
+#include "IOProfile.h"
+
namespace android {
HwModule::HwModule(const char *name, uint32_t halVersionMajor, uint32_t halVersionMinor)
@@ -247,28 +249,28 @@
return false;
}
-void HwModule::dump(String8 *dst) const
+void HwModule::dump(String8 *dst, int spaces) const
{
- dst->appendFormat(" - name: %s\n", getName());
- dst->appendFormat(" - handle: %d\n", mHandle);
- dst->appendFormat(" - version: %u.%u\n", getHalVersionMajor(), getHalVersionMinor());
+ dst->appendFormat("Handle: %d; \"%s\"\n", mHandle, getName());
if (mOutputProfiles.size()) {
- dst->append(" - outputs:\n");
+ dst->appendFormat("%*s- Output MixPorts (%zu):\n", spaces - 2, "", mOutputProfiles.size());
for (size_t i = 0; i < mOutputProfiles.size(); i++) {
- dst->appendFormat(" output %zu:\n", i);
- mOutputProfiles[i]->dump(dst);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->append(prefix.c_str());
+ mOutputProfiles[i]->dump(dst, prefix.size());
}
}
if (mInputProfiles.size()) {
- dst->append(" - inputs:\n");
+ dst->appendFormat("%*s- Input MixPorts (%zu):\n", spaces - 2, "", mInputProfiles.size());
for (size_t i = 0; i < mInputProfiles.size(); i++) {
- dst->appendFormat(" input %zu:\n", i);
- mInputProfiles[i]->dump(dst);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->append(prefix.c_str());
+ mInputProfiles[i]->dump(dst, prefix.size());
}
}
- mDeclaredDevices.dump(dst, String8("Declared"), 2, true);
- mDynamicDevices.dump(dst, String8("Dynamic"), 2, true);
- dumpAudioRouteVector(mRoutes, dst, 2);
+ mDeclaredDevices.dump(dst, String8("- Declared"), spaces - 2, true);
+ mDynamicDevices.dump(dst, String8("- Dynamic"), spaces - 2, true);
+ dumpAudioRouteVector(mRoutes, dst, spaces);
}
sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
@@ -462,10 +464,11 @@
void HwModuleCollection::dump(String8 *dst) const
{
- dst->append("\nHW Modules dump:\n");
+ dst->appendFormat("\n Hardware modules (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("- HW Module %zu:\n", i + 1);
- itemAt(i)->dump(dst);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ dst->append(prefix.c_str());
+ itemAt(i)->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 09b614d..21f2018 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -116,27 +116,30 @@
return device == deviceDesc && deviceDesc->hasCurrentEncodedFormat(); }) == 1;
}
-void IOProfile::dump(String8 *dst) const
+void IOProfile::dump(String8 *dst, int spaces) const
{
- std::string portStr;
- AudioPort::dump(&portStr, 4);
- dst->append(portStr.c_str());
-
- dst->appendFormat(" - flags: 0x%04x", getFlags());
+ String8 extraInfo;
+ extraInfo.appendFormat("0x%04x", getFlags());
std::string flagsLiteral =
getRole() == AUDIO_PORT_ROLE_SINK ?
toString(static_cast<audio_input_flags_t>(getFlags())) :
getRole() == AUDIO_PORT_ROLE_SOURCE ?
toString(static_cast<audio_output_flags_t>(getFlags())) : "";
if (!flagsLiteral.empty()) {
- dst->appendFormat(" (%s)", flagsLiteral.c_str());
+ extraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
}
- dst->append("\n");
- mSupportedDevices.dump(dst, String8("Supported"), 4, false);
- dst->appendFormat("\n - maxOpenCount: %u - curOpenCount: %u\n",
- maxOpenCount, curOpenCount);
- dst->appendFormat(" - maxActiveCount: %u - curActiveCount: %u\n",
- maxActiveCount, curActiveCount);
+
+ std::string portStr;
+ AudioPort::dump(&portStr, spaces, extraInfo.c_str());
+ dst->append(portStr.c_str());
+
+ mSupportedDevices.dump(dst, String8("- Supported"), spaces - 2, false);
+ dst->appendFormat("%*s- maxOpenCount: %u; curOpenCount: %u\n",
+ spaces - 2, "", maxOpenCount, curOpenCount);
+ dst->appendFormat("%*s- maxActiveCount: %u; curActiveCount: %u\n",
+ spaces - 2, "", maxActiveCount, curActiveCount);
+ dst->appendFormat("%*s- recommendedMuteDurationMs: %u ms\n",
+ spaces - 2, "", recommendedMuteDurationMs);
}
void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
index 8c61b90..ce8178f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -30,9 +30,9 @@
// --- PolicyAudioPort class implementation
void PolicyAudioPort::attach(const sp<HwModule>& module)
{
+ mModule = module;
ALOGV("%s: attaching module %s to port %s",
__FUNCTION__, getModuleName(), asAudioPort()->getName().c_str());
- mModule = module;
}
void PolicyAudioPort::detach()
@@ -87,7 +87,7 @@
// For direct outputs, pick minimum sampling rate: this helps ensuring that the
// channel count / sampling rate combination chosen will be supported by the connected
// sink
- if (isDirectOutput()) {
+ if (asAudioPort()->isDirectOutput()) {
uint32_t samplingRate = UINT_MAX;
for (const auto rate : samplingRates) {
if ((rate < samplingRate) && (rate > 0)) {
@@ -122,7 +122,7 @@
// For direct outputs, pick minimum channel count: this helps ensuring that the
// channel count / sampling rate combination chosen will be supported by the connected
// sink
- if (isDirectOutput()) {
+ if (asAudioPort()->isDirectOutput()) {
uint32_t channelCount = UINT_MAX;
for (const auto channelMask : channelMasks) {
uint32_t cnlCount;
@@ -236,7 +236,7 @@
audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
// For mixed output and inputs, use best mixer output format.
// Do not limit format otherwise
- if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
+ if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || asAudioPort()->isDirectOutput()) {
bestFormat = AUDIO_FORMAT_INVALID;
}
@@ -266,29 +266,4 @@
asAudioPort()->getName().c_str(), samplingRate, channelMask, format);
}
-// --- PolicyAudioPortConfig class implementation
-
-status_t PolicyAudioPortConfig::validationBeforeApplyConfig(
- const struct audio_port_config *config) const
-{
- sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
- return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
-}
-
-void PolicyAudioPortConfig::toPolicyAudioPortConfig(struct audio_port_config *dstConfig,
- const struct audio_port_config *srcConfig) const
-{
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
- if ((srcConfig != nullptr) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS)) {
- dstConfig->flags = srcConfig->flags;
- } else {
- dstConfig->flags = mFlags;
- }
- } else {
- dstConfig->flags = { AUDIO_INPUT_FLAG_NONE };
- }
-}
-
-
-
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 84ed656..4dfef73 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -123,6 +123,7 @@
static constexpr const char *flags = "flags";
static constexpr const char *maxOpenCount = "maxOpenCount";
static constexpr const char *maxActiveCount = "maxActiveCount";
+ static constexpr const char *recommendedMuteDurationMs = "recommendedMuteDurationMs";
};
// Children: GainTraits
@@ -496,6 +497,13 @@
if (!maxActiveCount.empty()) {
convertTo(maxActiveCount, mixPort->maxActiveCount);
}
+
+ std::string recommendedmuteDurationMsLiteral =
+ getXmlAttribute(child, Attributes::recommendedMuteDurationMs);
+ if (!recommendedmuteDurationMsLiteral.empty()) {
+ convertTo(recommendedmuteDurationMsLiteral, mixPort->recommendedMuteDurationMs);
+ }
+
// Deserialize children
AudioGainTraits::Collection gains;
status = deserializeCollection<AudioGainTraits>(child, &gains, NULL);
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
index 98415b7..ce78eb0 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -10,18 +10,6 @@
samplingRates="24000,16000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
</mixPort>
- <!-- Le Audio Audio Ports -->
- <mixPort name="le audio output" role="source">
- <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
- samplingRates="8000,16000,24000,32000,44100,48000"
- channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
- <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
- samplingRates="8000,16000,24000,32000,44100,48000"
- channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
- <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
- samplingRates="8000,16000,24000,32000,44100,48000"
- channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
- </mixPort>
</mixPorts>
<devicePorts>
<!-- A2DP Audio Ports -->
@@ -42,13 +30,6 @@
</devicePort>
<!-- Hearing AIDs Audio Ports -->
<devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
- <!-- BLE Audio Ports -->
- <!-- Note that these device types are not valid in HAL versions < 7. Any device
- running pre-V7 HAL and using this file will not pass VTS. Need to use
- bluetooth_audio_policy_configuration_7_0.xml instead.
- -->
- <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
- <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
</devicePorts>
<routes>
<route type="mix" sink="BT A2DP Out"
@@ -59,9 +40,5 @@
sources="a2dp output"/>
<route type="mix" sink="BT Hearing Aid Out"
sources="hearing aid output"/>
- <route type="mix" sink="BLE Headset Out"
- sources="le audio output"/>
- <route type="mix" sink="BLE Speaker Out"
- sources="le audio output"/>
</routes>
</module>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
index fbe7571..2dffe02 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
@@ -10,18 +10,6 @@
samplingRates="24000 16000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
</mixPort>
- <!-- Le Audio Audio Ports -->
- <mixPort name="le audio output" role="source">
- <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
- samplingRates="8000 16000 24000 32000 44100 48000"
- channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
- <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
- samplingRates="8000 16000 24000 32000 44100 48000"
- channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
- <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
- samplingRates="8000 16000 24000 32000 44100 48000"
- channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
- </mixPort>
</mixPorts>
<devicePorts>
<!-- A2DP Audio Ports -->
@@ -42,9 +30,6 @@
</devicePort>
<!-- Hearing AIDs Audio Ports -->
<devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
- <!-- BLE Audio Ports -->
- <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
- <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
</devicePorts>
<routes>
<route type="mix" sink="BT A2DP Out"
@@ -55,9 +40,5 @@
sources="a2dp output"/>
<route type="mix" sink="BT Hearing Aid Out"
sources="hearing aid output"/>
- <route type="mix" sink="BLE Headset Out"
- sources="le audio output"/>
- <route type="mix" sink="BLE Speaker Out"
- sources="le audio output"/>
</routes>
</module>
diff --git a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml
new file mode 100644
index 0000000..22ff954
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration.xml
@@ -0,0 +1,81 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+ <mixPorts>
+ <!-- A2DP Audio Ports -->
+ <mixPort name="a2dp output" role="source"/>
+ <!-- Hearing AIDs Audio Ports -->
+ <mixPort name="hearing aid output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000,16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <!-- Le Audio Audio Ports -->
+ <mixPort name="le audio output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="le audio input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- A2DP Audio Ports -->
+ <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <!-- Hearing AIDs Audio Ports -->
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ <!-- BLE Audio Ports -->
+ <!-- Note that these device types are not valid in HAL versions < 7. Any device
+ running pre-V7 HAL and using this file will not pass VTS. Need to use
+ bluetooth_audio_policy_configuration_7_0.xml instead.
+ -->
+ <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
+ <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+ <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT A2DP Out"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Headphones"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Speaker"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT Hearing Aid Out"
+ sources="hearing aid output"/>
+ <route type="mix" sink="BLE Headset Out"
+ sources="le audio output"/>
+ <route type="mix" sink="le audio input"
+ sources="BLE Headset In"/>
+ <route type="mix" sink="BLE Speaker Out"
+ sources="le audio output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..aad00d6
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+ <mixPorts>
+ <!-- A2DP Audio Ports -->
+ <mixPort name="a2dp output" role="source"/>
+ <!-- Hearing AIDs Audio Ports -->
+ <mixPort name="hearing aid output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000 16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <!-- Le Audio Audio Ports -->
+ <mixPort name="le audio output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="le audio input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- A2DP Audio Ports -->
+ <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000 88200 96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000 88200 96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000 88200 96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <!-- Hearing AIDs Audio Ports -->
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ <!-- BLE Audio Ports -->
+ <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
+ <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+ <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT A2DP Out"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Headphones"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Speaker"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT Hearing Aid Out"
+ sources="hearing aid output"/>
+ <route type="mix" sink="BLE Headset Out"
+ sources="le audio output"/>
+ <route type="mix" sink="le audio input"
+ sources="BLE Headset In"/>
+ <route type="mix" sink="BLE Speaker Out"
+ sources="le audio output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/le_audio_policy_configuration.xml b/services/audiopolicy/config/le_audio_policy_configuration.xml
index a3dc72b..dcdd805 100644
--- a/services/audiopolicy/config/le_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/le_audio_policy_configuration.xml
@@ -7,13 +7,20 @@
samplingRates="8000,16000,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
</mixPort>
+ <mixPort name="le audio input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT,AUDIO_FORMAT_PCM_24_BIT,AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
</mixPorts>
<devicePorts>
<devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
<devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+ <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
</devicePorts>
<routes>
<route type="mix" sink="BLE Headset Out" sources="le audio output"/>
<route type="mix" sink="BLE Speaker Out" sources="le audio output"/>
+ <route type="mix" sink="le audio input" sources="BLE Headset In"/>
</routes>
</module>
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index d39eff6..665c2dd 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -69,12 +69,6 @@
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT,
AUDIO_FLAG_NONE, ""},
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
- AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
- AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
- AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_EVENT,
AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
}
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index b3d144f..fbfcf72 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -150,12 +150,8 @@
void ProductStrategy::dump(String8 *dst, int spaces) const
{
dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
- std::string deviceLiteral;
- if (!deviceTypesToString(mApplicableDevices, deviceLiteral)) {
- ALOGE("%s: failed to convert device %s",
- __FUNCTION__, dumpDeviceTypes(mApplicableDevices).c_str());
- }
- dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
+ std::string deviceLiteral = deviceTypesToString(mApplicableDevices);
+ dst->appendFormat("%*sSelected Device: {%s, @:%s}\n", spaces + 2, "",
deviceLiteral.c_str(), mDeviceAddress.c_str());
for (const auto &attr : mAttributesVector) {
@@ -333,4 +329,3 @@
dst->appendFormat("\n");
}
}
-
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index a747822..dc8d9cf 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -41,8 +41,9 @@
"libaudiopolicyengineconfigurable_pfwwrapper",
],
- shared_libs: [
+ shared_libs: [
"libaudiofoundation",
+ "libbase",
"liblog",
"libcutils",
"libutils",
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
index bc32416..0ddf66d 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -57,9 +57,6 @@
<ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
<AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
- <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
- <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
- <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 7f9c0ac..4671fe9 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -31,6 +31,7 @@
],
shared_libs: [
"libaudiofoundation",
+ "libbase",
"liblog",
"libcutils",
"libutils",
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
index bc32416..0ddf66d 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -57,9 +57,6 @@
<ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
<AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
- <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
- <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
- <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
index 7000cd9..8584702 100644
--- a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -163,7 +163,9 @@
AUDIO_FLAG_BYPASS_MUTE, AUDIO_FLAG_LOW_LATENCY,
AUDIO_FLAG_DEEP_BUFFER, AUDIO_FLAG_NO_MEDIA_PROJECTION,
AUDIO_FLAG_MUTE_HAPTIC, AUDIO_FLAG_NO_SYSTEM_CAPTURE,
- AUDIO_FLAG_CAPTURE_PRIVATE};
+ AUDIO_FLAG_CAPTURE_PRIVATE, AUDIO_FLAG_CONTENT_SPATIALIZED,
+ AUDIO_FLAG_NEVER_SPATIALIZE,
+ };
std::vector<audio_policy_dev_state_t> kAudioPolicyDeviceStates = {
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index cc2d8e8..00c1f26 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -246,8 +246,8 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
// close unused outputs after device disconnection or direct outputs that have
// been opened by checkOutputsForDevice() to query dynamic parameters
- if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
- (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+ if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE)
+ || (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
(desc->mDirectOpenCount == 0))) {
clearAudioSourcesForOutput(output);
closeOutput(output);
@@ -525,10 +525,10 @@
return NO_ERROR;
}
-status_t AudioPolicyManager::getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<audio_format_t> *formats)
+status_t AudioPolicyManager::getHwOffloadFormatsSupportedForBluetoothMedia(
+ audio_devices_t device, std::vector<audio_format_t> *formats)
{
- ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()");
+ ALOGV("getHwOffloadFormatsSupportedForBluetoothMedia()");
status_t status = NO_ERROR;
std::unordered_set<audio_format_t> formatSet;
sp<HwModule> primaryModule =
@@ -537,8 +537,23 @@
ALOGE("%s() unable to get primary module", __func__);
return NO_INIT;
}
+
+ DeviceTypeSet audioDeviceSet;
+
+ switch(device) {
+ case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+ audioDeviceSet = getAudioDeviceOutAllA2dpSet();
+ break;
+ case AUDIO_DEVICE_OUT_BLE_HEADSET:
+ audioDeviceSet = getAudioDeviceOutAllBleSet();
+ break;
+ default:
+ ALOGE("%s() device type 0x%08x not supported", __func__, device);
+ return BAD_VALUE;
+ }
+
DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypes(
- getAudioDeviceOutAllA2dpSet());
+ audioDeviceSet);
for (const auto& device : declaredDevices) {
formatSet.insert(device->encodedFormats().begin(), device->encodedFormats().end());
}
@@ -925,6 +940,32 @@
return profile;
}
+sp<IOProfile> AudioPolicyManager::getSpatializerOutputProfile(
+ const audio_config_t *config __unused, const AudioDeviceTypeAddrVector &devices) const
+{
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->getOutputProfiles()) {
+ if (curProfile->getFlags() != AUDIO_OUTPUT_FLAG_SPATIALIZER) {
+ continue;
+ }
+ // reject profiles not corresponding to a device currently available
+ DeviceVector supportedDevices = curProfile->getSupportedDevices();
+ if (!mAvailableOutputDevices.containsAtLeastOne(supportedDevices)) {
+ continue;
+ }
+ if (!devices.empty()) {
+ if (supportedDevices.getDevicesFromDeviceTypeAddrVec(devices).size()
+ != devices.size()) {
+ continue;
+ }
+ }
+ ALOGV("%s found profile %s", __func__, curProfile->getName().c_str());
+ return curProfile;
+ }
+ }
+ return nullptr;
+}
+
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
{
DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/);
@@ -1094,7 +1135,7 @@
*output = AUDIO_IO_HANDLE_NONE;
if (!msdDevices.isEmpty()) {
- *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
+ *output = getOutputForDevices(msdDevices, session, resultAttr, config, flags);
if (*output != AUDIO_IO_HANDLE_NONE && setMsdOutputPatches(&outputDevices) == NO_ERROR) {
ALOGV("%s() Using MSD devices %s instead of devices %s",
__func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
@@ -1103,7 +1144,7 @@
}
}
if (*output == AUDIO_IO_HANDLE_NONE) {
- *output = getOutputForDevices(outputDevices, session, *stream, config,
+ *output = getOutputForDevices(outputDevices, session, resultAttr, config,
flags, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC);
}
if (*output == AUDIO_IO_HANDLE_NONE) {
@@ -1265,7 +1306,8 @@
// all MSD patches to prioritize this request over any active output on MSD.
releaseMsdOutputPatches(devices);
- status_t status = outputDesc->open(config, devices, stream, flags, output);
+ status_t status =
+ outputDesc->open(config, nullptr /* mixerConfig */, devices, stream, flags, output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
@@ -1300,7 +1342,7 @@
audio_io_handle_t AudioPolicyManager::getOutputForDevices(
const DeviceVector &devices,
audio_session_t session,
- audio_stream_type_t stream,
+ const audio_attributes_t *attr,
const audio_config_t *config,
audio_output_flags_t *flags,
bool forceMutingHaptic)
@@ -1322,6 +1364,9 @@
if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
+
+ audio_stream_type_t stream = mEngine->getStreamTypeForAttributes(*attr);
+
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
*flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
@@ -1341,6 +1386,11 @@
ALOGV("Set VoIP and Direct output flags for PCM format");
}
+ if (mSpatializerOutput != nullptr
+ && canBeSpatialized(attr, config, devices.toTypeAddrVector())) {
+ return mSpatializerOutput->mIoHandle;
+ }
+
audio_config_t directConfig = *config;
directConfig.channel_mask = channelMask;
status_t status = openDirectOutput(stream, session, &directConfig, *flags, devices, &output);
@@ -2123,8 +2173,9 @@
audio_port_handle_t *portId)
{
ALOGV("%s() source %d, sampling rate %d, format %#x, channel mask %#x, session %d, "
- "flags %#x attributes=%s", __func__, attr->source, config->sample_rate,
- config->format, config->channel_mask, session, flags, toString(*attr).c_str());
+ "flags %#x attributes=%s requested device ID %d",
+ __func__, attr->source, config->sample_rate, config->format, config->channel_mask,
+ session, flags, toString(*attr).c_str(), *selectedDeviceId);
status_t status = NO_ERROR;
audio_source_t halInputSource;
@@ -2147,7 +2198,7 @@
}
// Explicit routing?
- sp<DeviceDescriptor> explicitRoutingDevice =
+ sp<DeviceDescriptor> explicitRoutingDevice =
mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
// special case for mmap capture: if an input IO handle is specified, we reuse this input if
@@ -2333,7 +2384,7 @@
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
ALOGW("%s could not find profile for device %s, sampling rate %u, format %#x, "
- "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(),
+ "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(),
config->sample_rate, config->format, config->channel_mask, flags);
return input;
}
@@ -3532,7 +3583,7 @@
void AudioPolicyManager::dump(String8 *dst) const
{
dst->appendFormat("\nAudioPolicyManager Dump: %p\n", this);
- dst->appendFormat(" Primary Output: %d\n",
+ dst->appendFormat(" Primary Output I/O handle: %d\n",
hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
std::string stateLiteral;
AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
@@ -3557,12 +3608,14 @@
dst->appendFormat(" Communnication Strategy: %d\n", mCommunnicationStrategy);
dst->appendFormat(" Config source: %s\n", mConfig.getSource().c_str()); // getConfig not const
- mAvailableOutputDevices.dump(dst, String8("Available output"));
- mAvailableInputDevices.dump(dst, String8("Available input"));
+ dst->append("\n");
+ mAvailableOutputDevices.dump(dst, String8("Available output"), 1);
+ dst->append("\n");
+ mAvailableInputDevices.dump(dst, String8("Available input"), 1);
mHwModulesAll.dump(dst);
mOutputs.dump(dst);
mInputs.dump(dst);
- mEffects.dump(dst);
+ mEffects.dump(dst, 1);
mAudioPatches.dump(dst);
mPolicyMixes.dump(dst);
mAudioSources.dump(dst);
@@ -4065,7 +4118,7 @@
}
if (outputDesc != nullptr) {
audio_port_config srcMixPortConfig = {};
- outputDesc->toAudioPortConfig(&srcMixPortConfig, &patch->sources[0]);
+ outputDesc->toAudioPortConfig(&srcMixPortConfig, nullptr);
// for volume control, we may need a valid stream
srcMixPortConfig.ext.mix.usecase.stream = sourceDesc != nullptr ?
sourceDesc->stream() : AUDIO_STREAM_PATCH;
@@ -4802,6 +4855,205 @@
return source;
}
+/* static */
+bool AudioPolicyManager::isChannelMaskSpatialized(audio_channel_mask_t channels) {
+ switch (channels) {
+ case AUDIO_CHANNEL_OUT_5POINT1:
+ case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+ case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+ case AUDIO_CHANNEL_OUT_7POINT1:
+ case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+ case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool AudioPolicyManager::canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const
+{
+ // The caller can have the audio attributes criteria ignored by either passing a null ptr or
+ // the AUDIO_ATTRIBUTES_INITIALIZER value.
+ // If attributes are specified, current policy is to only allow spatialization for media
+ // and game usages.
+ if (attr != nullptr && *attr != AUDIO_ATTRIBUTES_INITIALIZER) {
+ if (attr->usage != AUDIO_USAGE_MEDIA && attr->usage != AUDIO_USAGE_GAME) {
+ return false;
+ }
+ if ((attr->flags & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) != 0) {
+ return false;
+ }
+ }
+
+ // The caller can have the devices criteria ignored by passing and empty vector, and
+ // getSpatializerOutputProfile() will ignore the devices when looking for a match.
+ // Otherwise an output profile supporting a spatializer effect that can be routed
+ // to the specified devices must exist.
+ sp<IOProfile> profile =
+ getSpatializerOutputProfile(config, devices);
+ if (profile == nullptr) {
+ return false;
+ }
+
+ // The caller can have the audio config criteria ignored by either passing a null ptr or
+ // the AUDIO_CONFIG_INITIALIZER value.
+ // If an audio config is specified, current policy is to only allow spatialization for
+ // some positional channel masks.
+ // If the spatializer output is already opened, only channel masks included in the
+ // spatializer output mixer channel mask are allowed.
+
+ if (config != nullptr && *config != AUDIO_CONFIG_INITIALIZER) {
+ if (!isChannelMaskSpatialized(config->channel_mask)) {
+ return false;
+ }
+ if (mSpatializerOutput != nullptr && mSpatializerOutput->mProfile == profile) {
+ if ((config->channel_mask & mSpatializerOutput->mMixerChannelMask)
+ != config->channel_mask) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void AudioPolicyManager::checkVirtualizerClientRoutes() {
+ std::set<audio_stream_type_t> streamsToInvalidate;
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ const sp<SwAudioOutputDescriptor>& desc = mOutputs[i];
+ for (const sp<TrackClientDescriptor>& client : desc->getClientIterable()) {
+ audio_attributes_t attr = client->attributes();
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, false);
+ AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+ audio_config_base_t clientConfig = client->config();
+ audio_config_t config = audio_config_initializer(&clientConfig);
+ if (desc != mSpatializerOutput
+ && canBeSpatialized(&attr, &config, devicesTypeAddress)) {
+ streamsToInvalidate.insert(client->stream());
+ }
+ }
+ }
+
+ for (audio_stream_type_t stream : streamsToInvalidate) {
+ mpClientInterface->invalidateStream(stream);
+ }
+}
+
+status_t AudioPolicyManager::getSpatializerOutput(const audio_config_base_t *mixerConfig,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output) {
+ *output = AUDIO_IO_HANDLE_NONE;
+
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(*attr, nullptr, false);
+ AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+ audio_config_t *configPtr = nullptr;
+ audio_config_t config;
+ if (mixerConfig != nullptr) {
+ config = audio_config_initializer(mixerConfig);
+ configPtr = &config;
+ }
+ if (!canBeSpatialized(attr, configPtr, devicesTypeAddress)) {
+ ALOGW("%s provided attributes or mixer config cannot be spatialized", __func__);
+ return BAD_VALUE;
+ }
+
+ sp<IOProfile> profile =
+ getSpatializerOutputProfile(configPtr, devicesTypeAddress);
+ if (profile == nullptr) {
+ ALOGW("%s no suitable output profile for provided attributes or mixer config", __func__);
+ return BAD_VALUE;
+ }
+
+ if (mSpatializerOutput != nullptr && mSpatializerOutput->mProfile == profile
+ && configPtr != nullptr
+ && configPtr->channel_mask == mSpatializerOutput->mMixerChannelMask) {
+ *output = mSpatializerOutput->mIoHandle;
+ ALOGV("%s returns current spatializer output %d", __func__, *output);
+ return NO_ERROR;
+ }
+ mSpatializerOutput.clear();
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+ if (!desc->isDuplicated() && desc->mProfile == profile) {
+ mSpatializerOutput = desc;
+ break;
+ }
+ }
+ if (mSpatializerOutput == nullptr) {
+ ALOGW("%s no opened spatializer output for profile %s",
+ __func__, profile->getName().c_str());
+ return BAD_VALUE;
+ }
+
+ if (configPtr != nullptr
+ && configPtr->channel_mask != mSpatializerOutput->mMixerChannelMask) {
+ audio_config_base_t savedMixerConfig = {
+ .sample_rate = mSpatializerOutput->getSamplingRate(),
+ .format = mSpatializerOutput->getFormat(),
+ .channel_mask = mSpatializerOutput->mMixerChannelMask,
+ };
+ DeviceVector savedDevices = mSpatializerOutput->devices();
+
+ closeOutput(mSpatializerOutput->mIoHandle);
+ mSpatializerOutput.clear();
+
+ const sp<SwAudioOutputDescriptor> desc =
+ new SwAudioOutputDescriptor(profile, mpClientInterface);
+ status_t status = desc->open(nullptr, mixerConfig, devices,
+ mEngine->getStreamTypeForAttributes(*attr),
+ AUDIO_OUTPUT_FLAG_SPATIALIZER, output);
+ if (status != NO_ERROR) {
+ ALOGW("%s failed opening output: status %d, output %d", __func__, status, *output);
+ if (*output != AUDIO_IO_HANDLE_NONE) {
+ desc->close();
+ }
+ // re open the spatializer output with previous channel mask
+ status_t newStatus = desc->open(nullptr, &savedMixerConfig, savedDevices,
+ mEngine->getStreamTypeForAttributes(*attr),
+ AUDIO_OUTPUT_FLAG_SPATIALIZER, output);
+ if (newStatus != NO_ERROR) {
+ if (*output != AUDIO_IO_HANDLE_NONE) {
+ desc->close();
+ }
+ ALOGE("%s failed to re-open mSpatializerOutput, status %d", __func__, newStatus);
+ } else {
+ mSpatializerOutput = desc;
+ addOutput(*output, desc);
+ }
+ mPreviousOutputs = mOutputs;
+ mpClientInterface->onAudioPortListUpdate();
+ *output = AUDIO_IO_HANDLE_NONE;
+ return status;
+ }
+ mSpatializerOutput = desc;
+ addOutput(*output, desc);
+ mPreviousOutputs = mOutputs;
+ mpClientInterface->onAudioPortListUpdate();
+ }
+
+ checkVirtualizerClientRoutes();
+
+ *output = mSpatializerOutput->mIoHandle;
+ ALOGV("%s returns new spatializer output %d", __func__, *output);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::releaseSpatializerOutput(audio_io_handle_t output) {
+ if (mSpatializerOutput == nullptr) {
+ return INVALID_OPERATION;
+ }
+ if (mSpatializerOutput->mIoHandle != output) {
+ return BAD_VALUE;
+ }
+
+ mSpatializerOutput.clear();
+
+ checkVirtualizerClientRoutes();
+
+ return NO_ERROR;
+}
+
// ----------------------------------------------------------------------------
// AudioPolicyManager
// ----------------------------------------------------------------------------
@@ -4851,6 +5103,8 @@
ALOGE("could not load audio policy configuration file, setting defaults");
getConfig().setDefault();
}
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ getConfig().convertSpatializerFlag();
}
status_t AudioPolicyManager::initialize() {
@@ -4953,9 +5207,8 @@
continue;
}
mHwModules.push_back(hwModule);
- // open all output streams needed to access attached devices
- // except for direct output streams that are only opened when they are actually
- // required by an app.
+ // open all output streams needed to access attached devices.
+ // direct outputs are closed immediately after checking the availability of attached devices
// This also validates mAvailableOutputDevices list
for (const auto& outProfile : hwModule->getOutputProfiles()) {
if (!outProfile->canOpenNewIo()) {
@@ -4990,7 +5243,8 @@
sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice),
+ status_t status = outputDesc->open(nullptr /* halConfig */, nullptr /* mixerConfig */,
+ DeviceVector(supportedDevice),
AUDIO_STREAM_DEFAULT,
AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
@@ -5351,7 +5605,7 @@
} // endif input != 0
if (input == AUDIO_IO_HANDLE_NONE) {
- ALOGW("%s could not open input for device %s", __func__,
+ ALOGW("%s could not open input for device %s", __func__,
device->toString().c_str());
profiles.removeAt(profile_index);
profile_index--;
@@ -5720,14 +5974,20 @@
client->getSecondaryOutputs().begin(),
client->getSecondaryOutputs().end(),
secondaryDescs.begin(), secondaryDescs.end())) {
- std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
- std::vector<audio_io_handle_t> secondaryOutputIds;
- for (const auto& secondaryDesc : secondaryDescs) {
- secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
- weakSecondaryDescs.push_back(secondaryDesc);
+ if (!audio_is_linear_pcm(client->config().format)) {
+ // If the format is not PCM, the tracks should be invalidated to get correct
+ // behavior when the secondary output is changed.
+ streamsToInvalidate.insert(client->stream());
+ } else {
+ std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
+ std::vector<audio_io_handle_t> secondaryOutputIds;
+ for (const auto &secondaryDesc: secondaryDescs) {
+ secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
+ weakSecondaryDescs.push_back(secondaryDesc);
+ }
+ trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
+ client->setSecondaryOutputs(std::move(weakSecondaryDescs));
}
- trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
- client->setSecondaryOutputs(std::move(weakSecondaryDescs));
}
}
}
@@ -5895,11 +6155,11 @@
uid_t uid;
sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
if (topClient != nullptr) {
- attributes = topClient->attributes();
- uid = topClient->uid();
+ attributes = topClient->attributes();
+ uid = topClient->uid();
} else {
- attributes = { .source = AUDIO_SOURCE_DEFAULT };
- uid = 0;
+ attributes = { .source = AUDIO_SOURCE_DEFAULT };
+ uid = 0;
}
if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
@@ -5917,13 +6177,13 @@
return (stream1 == stream2);
}
-audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
// By checking the range of stream before calling getStrategy, we avoid
// getOutputDevicesForStream's behavior for invalid streams.
// engine's getOutputDevicesForStream would fallback on its default behavior (most probably
// device for music stream), but we want to return the empty set.
if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
- return AUDIO_DEVICE_NONE;
+ return DeviceTypeSet{};
}
DeviceVector activeDevices;
DeviceVector devices;
@@ -5954,8 +6214,7 @@
devices.merge(mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
devices.remove(speakerSafeDevices);
}
- // FIXME: use DeviceTypeSet when Java layer is ready for it.
- return deviceTypesToBitMask(devices.types());
+ return devices.types();
}
status_t AudioPolicyManager::getDevicesForAttributes(
@@ -6119,11 +6378,18 @@
// different per device volumes
if (outputDesc->isActive() && (devices != prevDevices)) {
uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
- // temporary mute duration is conservatively set to 4 times the reported latency
- uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
+
if (muteWaitMs < tempMuteWaitMs) {
muteWaitMs = tempMuteWaitMs;
}
+
+ // If recommended duration is defined, replace temporary mute duration to avoid
+ // truncated notifications at beginning, which depends on duration of changing path in HAL.
+ // Otherwise, temporary mute duration is conservatively set to 4 times the reported latency.
+ uint32_t tempRecommendedMuteDuration = outputDesc->getRecommendedMuteDurationMs();
+ uint32_t tempMuteDurationMs = tempRecommendedMuteDuration > 0 ?
+ tempRecommendedMuteDuration : outputDesc->latency() * 4;
+
for (const auto &activeVs : outputDesc->getActiveVolumeSources()) {
// make sure that we do not start the temporary mute period too early in case of
// delayed device change
@@ -6548,7 +6814,7 @@
outputDesc->setVolume(
volumeDb, volumeSource, curves.getStreamTypes(), deviceTypes, delayMs, force);
- if (isVoiceVolSrc || isBtScoVolSrc) {
+ if (outputDesc == mPrimaryOutput && (isVoiceVolSrc || isBtScoVolSrc)) {
float voiceVolume;
// Force voice volume to max or mute for Bluetooth SCO as other attenuations are managed by the headset
if (isVoiceVolSrc) {
@@ -6693,8 +6959,8 @@
{
audio_mode_t mode = mEngine->getPhoneState();
return (mode == AUDIO_MODE_IN_CALL)
- || (mode == AUDIO_MODE_IN_COMMUNICATION)
- || (mode == AUDIO_MODE_CALL_SCREEN);
+ || (mode == AUDIO_MODE_CALL_SCREEN)
+ || (mode == AUDIO_MODE_CALL_REDIRECT);
}
void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
@@ -6995,7 +7261,7 @@
}
sp<SwAudioOutputDescriptor> desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = desc->open(nullptr, devices,
+ status_t status = desc->open(nullptr /* halConfig */, nullptr /* mixerConfig */, devices,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
return nullptr;
@@ -7025,7 +7291,7 @@
config.offload_info.channel_mask = config.channel_mask;
config.offload_info.format = config.format;
- status = desc->open(&config, devices,
+ status = desc->open(&config, nullptr /* mixerConfig */, devices,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
return nullptr;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 98f96d1..8a85b95 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -193,7 +193,7 @@
}
// return the enabled output devices for the given stream type
- virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+ virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
virtual status_t getDevicesForAttributes(
const audio_attributes_t &attributes,
@@ -320,8 +320,8 @@
audio_format_t *surroundFormats);
virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
- virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<audio_format_t> *formats);
+ virtual status_t getHwOffloadFormatsSupportedForBluetoothMedia(
+ audio_devices_t device, std::vector<audio_format_t> *formats);
virtual void setAppState(audio_port_handle_t portId, app_state_t state);
@@ -356,6 +356,16 @@
BAD_VALUE : NO_ERROR;
}
+ virtual bool canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const;
+
+ virtual status_t getSpatializerOutput(const audio_config_base_t *config,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output);
+
+ virtual status_t releaseSpatializerOutput(audio_io_handle_t output);
+
bool isCallScreenModeSupported() override;
void onNewAudioModulesAvailable() override;
@@ -797,6 +807,8 @@
sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor
// list of descriptors for outputs currently opened
+ sp<SwAudioOutputDescriptor> mSpatializerOutput;
+
SwAudioOutputCollection mOutputs;
// copy of mOutputs before setDeviceConnectionState() opens new outputs
// reset to mOutputs when updateDevicesAndOutputs() is called.
@@ -933,7 +945,7 @@
audio_io_handle_t getOutputForDevices(
const DeviceVector &devices,
audio_session_t session,
- audio_stream_type_t stream,
+ const audio_attributes_t *attr,
const audio_config_t *config,
audio_output_flags_t *flags,
bool forceMutingHaptic = false);
@@ -948,6 +960,14 @@
audio_output_flags_t flags,
const DeviceVector &devices,
audio_io_handle_t *output);
+
+ sp<IOProfile> getSpatializerOutputProfile(const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const;
+
+ static bool isChannelMaskSpatialized(audio_channel_mask_t channels);
+
+ void checkVirtualizerClientRoutes();
+
/**
* @brief getInputForDevice selects an input handle for a given input device and
* requester context
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index 454c020..cdad9a6 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -16,6 +16,8 @@
"AudioPolicyInterfaceImpl.cpp",
"AudioPolicyService.cpp",
"CaptureStateNotifier.cpp",
+ "Spatializer.cpp",
+ "SpatializerPoseController.cpp",
],
include_dirs: [
@@ -27,6 +29,7 @@
"libaudioclient",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
+ "libaudiohal",
"libaudiopolicy",
"libaudiopolicymanagerdefault",
"libaudioutils",
@@ -34,19 +37,27 @@
"libcutils",
"libeffectsconfig",
"libhardware_legacy",
+ "libheadtracking",
+ "libheadtracking-binding",
"liblog",
"libmedia_helper",
"libmediametrics",
"libmediautils",
"libpermission",
+ "libsensor",
"libsensorprivacy",
+ "libshmemcompat",
"libutils",
+ "libstagefright_foundation",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
"audiopolicy-types-aidl-cpp",
"capture_state_listener-aidl-cpp",
"framework-permission-aidl-cpp",
+ "packagemanager_aidl-cpp",
+ "spatializer-aidl-cpp",
],
static_libs: [
@@ -55,6 +66,7 @@
],
header_libs: [
+ "libaudiohal_headers",
"libaudiopolicycommon",
"libaudiopolicyengine_interface_headers",
"libaudiopolicymanager_interface_headers",
@@ -70,6 +82,8 @@
export_shared_lib_headers: [
"libactivitymanager_aidl",
+ "libheadtracking",
+ "libheadtracking-binding",
"libsensorprivacy",
"framework-permission-aidl-cpp",
],
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index cd53073..aaf6fba 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -40,7 +40,8 @@
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags)
@@ -55,14 +56,18 @@
media::OpenOutputResponse response;
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+ request.halConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(*halConfig, false /*isInput*/));
+ request.mixerConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(*mixerConfig, false /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
status_t status = af->openOutput(request, &response);
if (status == OK) {
*output = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.output));
- *config = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfig_audio_config_t(response.config));
+ *halConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(response.config, false /*isInput*/));
*latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(response.latencyMs));
}
return status;
@@ -131,9 +136,10 @@
media::OpenInputRequest request;
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+ request.config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
- request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+ request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSource(source));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
media::OpenInputResponse response;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 77223b6..1fbea7d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -32,6 +32,9 @@
if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
std::move(_tmp.value()); })
+#define RETURN_BINDER_STATUS_IF_ERROR(x) \
+ if (status_t _tmp = (x); _tmp != OK) return aidl_utils::binderStatusFromStatusT(_tmp);
+
#define RETURN_IF_BINDER_ERROR(x) \
{ \
binder::Status _tmp = (x); \
@@ -44,6 +47,19 @@
using binder::Status;
using aidl_utils::binderStatusFromStatusT;
using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
const std::vector<audio_usage_t>& SYSTEM_USAGES = {
AUDIO_USAGE_CALL_ASSISTANT,
@@ -63,15 +79,22 @@
!= std::end(mSupportedSystemUsages);
}
-status_t AudioPolicyService::validateUsage(audio_usage_t usage) {
- return validateUsage(usage, getCallingAttributionSource());
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr) {
+ return validateUsage(attr, getCallingAttributionSource());
}
-status_t AudioPolicyService::validateUsage(audio_usage_t usage,
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr,
const AttributionSourceState& attributionSource) {
- if (isSystemUsage(usage)) {
- if (isSupportedSystemUsage(usage)) {
- if (!modifyAudioRoutingAllowed(attributionSource)) {
+ if (isSystemUsage(attr.usage)) {
+ if (isSupportedSystemUsage(attr.usage)) {
+ if (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
+ && ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)) {
+ if (!callAudioInterceptionAllowed(attributionSource)) {
+ ALOGE(("permission denied: modify audio routing not allowed "
+ "for attributionSource %s"), attributionSource.toString().c_str());
+ return PERMISSION_DENIED;
+ }
+ } else if (!modifyAudioRoutingAllowed(attributionSource)) {
ALOGE(("permission denied: modify audio routing not allowed "
"for attributionSource %s"), attributionSource.toString().c_str());
return PERMISSION_DENIED;
@@ -96,16 +119,18 @@
}
Status AudioPolicyService::setDeviceConnectionState(
- const media::AudioDevice& deviceAidl,
+ const AudioDevice& deviceAidl,
media::AudioPolicyDeviceState stateAidl,
const std::string& deviceNameAidl,
- media::audio::common::AudioFormat encodedFormatAidl) {
- audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+ const AudioFormatDescription& encodedFormatAidl) {
+ audio_devices_t device;
+ std::string address;
+ RETURN_BINDER_STATUS_IF_ERROR(
+ aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
audio_policy_dev_state_t state = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioPolicyDeviceState_audio_policy_dev_state_t(stateAidl));
audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+ aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -121,17 +146,20 @@
ALOGV("setDeviceConnectionState()");
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return binderStatusFromStatusT(
- mAudioPolicyManager->setDeviceConnectionState(device, state,
- deviceAidl.address.c_str(),
- deviceNameAidl.c_str(),
- encodedFormat));
+ status_t status = mAudioPolicyManager->setDeviceConnectionState(
+ device, state, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
-Status AudioPolicyService::getDeviceConnectionState(const media::AudioDevice& deviceAidl,
+Status AudioPolicyService::getDeviceConnectionState(const AudioDevice& deviceAidl,
media::AudioPolicyDeviceState* _aidl_return) {
- audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+ audio_devices_t device;
+ std::string address;
+ RETURN_BINDER_STATUS_IF_ERROR(
+ aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
if (mAudioPolicyManager == NULL) {
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
@@ -141,19 +169,21 @@
AutoCallerClear acc;
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
- mAudioPolicyManager->getDeviceConnectionState(device,
- deviceAidl.address.c_str())));
+ mAudioPolicyManager->getDeviceConnectionState(
+ device, address.c_str())));
return Status::ok();
}
Status AudioPolicyService::handleDeviceConfigChange(
- const media::AudioDevice& deviceAidl,
+ const AudioDevice& deviceAidl,
const std::string& deviceNameAidl,
- media::audio::common::AudioFormat encodedFormatAidl) {
- audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+ const AudioFormatDescription& encodedFormatAidl) {
+ audio_devices_t device;
+ std::string address;
+ RETURN_BINDER_STATUS_IF_ERROR(
+ aidl2legacy_AudioDevice_audio_device(deviceAidl, &device, &address));
audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+ aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -165,12 +195,16 @@
ALOGV("handleDeviceConfigChange()");
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return binderStatusFromStatusT(
- mAudioPolicyManager->handleDeviceConfigChange(device, deviceAidl.address.c_str(),
- deviceNameAidl.c_str(), encodedFormat));
+ status_t status = mAudioPolicyManager->handleDeviceConfigChange(
+ device, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
-Status AudioPolicyService::setPhoneState(media::AudioMode stateAidl, int32_t uidAidl)
+Status AudioPolicyService::setPhoneState(AudioMode stateAidl, int32_t uidAidl)
{
audio_mode_t state = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioMode_audio_mode_t(stateAidl));
@@ -192,7 +226,15 @@
// can be interleaved).
Mutex::Autolock _l(mLock);
// TODO: check if it is more appropriate to do it in platform specific policy manager
- AudioSystem::setMode(state);
+
+ // Audio HAL mode conversion for call redirect modes
+ audio_mode_t halMode = state;
+ if (state == AUDIO_MODE_CALL_REDIRECT) {
+ halMode = AUDIO_MODE_CALL_SCREEN;
+ } else if (state == AUDIO_MODE_COMMUNICATION_REDIRECT) {
+ halMode = AUDIO_MODE_NORMAL;
+ }
+ AudioSystem::setMode(halMode);
AutoCallerClear acc;
mAudioPolicyManager->setPhoneState(state);
@@ -202,7 +244,7 @@
return Status::ok();
}
-Status AudioPolicyService::getPhoneState(media::AudioMode* _aidl_return) {
+Status AudioPolicyService::getPhoneState(AudioMode* _aidl_return) {
Mutex::Autolock _l(mLock);
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_mode_t_AudioMode(mPhoneState));
return Status::ok();
@@ -234,6 +276,7 @@
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
mAudioPolicyManager->setForceUse(usage, config);
+ onCheckSpatializer_l();
return Status::ok();
}
@@ -257,7 +300,7 @@
return Status::ok();
}
-Status AudioPolicyService::getOutput(media::AudioStreamType streamAidl, int32_t* _aidl_return)
+Status AudioPolicyService::getOutput(AudioStreamType streamAidl, int32_t* _aidl_return)
{
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -281,7 +324,7 @@
Status AudioPolicyService::getOutputForAttr(const media::AudioAttributesInternal& attrAidl,
int32_t sessionAidl,
const AttributionSourceState& attributionSource,
- const media::AudioConfig& configAidl,
+ const AudioConfig& configAidl,
int32_t flagsAidl,
int32_t selectedDeviceIdAidl,
media::GetOutputForAttrResponse* _aidl_return)
@@ -292,7 +335,7 @@
aidl2legacy_int32_t_audio_session_t(sessionAidl));
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfig_audio_config_t(configAidl));
+ aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
audio_output_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_output_flags_t_mask(flagsAidl));
audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -308,7 +351,7 @@
RETURN_IF_BINDER_ERROR(
binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, attributionSource)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
ALOGV("%s()", __func__);
Mutex::Autolock _l(mLock);
@@ -350,7 +393,12 @@
case AudioPolicyInterface::API_OUTPUT_LEGACY:
break;
case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
- if (!modifyPhoneStateAllowed(adjAttributionSource)) {
+ if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
+ && !callAudioInterceptionAllowed(adjAttributionSource)) {
+ ALOGE("%s() permission denied: call redirection not allowed for uid %d",
+ __func__, adjAttributionSource.uid);
+ result = PERMISSION_DENIED;
+ } else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
__func__, adjAttributionSource.uid);
result = PERMISSION_DENIED;
@@ -510,7 +558,7 @@
int32_t riidAidl,
int32_t sessionAidl,
const AttributionSourceState& attributionSource,
- const media::AudioConfigBase& configAidl,
+ const AudioConfigBase& configAidl,
int32_t flagsAidl,
int32_t selectedDeviceIdAidl,
media::GetInputForAttrResponse* _aidl_return) {
@@ -523,7 +571,7 @@
audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_session_t(sessionAidl));
audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, true /*isInput*/));
audio_input_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_input_flags_t_mask(flagsAidl));
audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -577,32 +625,43 @@
adjAttributionSource.pid = callingPid;
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage,
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
adjAttributionSource)));
// check calling permissions.
- // Capturing from FM_TUNER source is controlled by captureTunerAudioInputAllowed() and
- // captureAudioOutputAllowed() (deprecated) as this does not affect users privacy
- // as does capturing from an actual microphone.
- if (!(recordingAllowed(adjAttributionSource, attr.source)
- || attr.source == AUDIO_SOURCE_FM_TUNER)) {
+ // Capturing from the following sources does not require permission RECORD_AUDIO
+ // as the captured audio does not come from a microphone:
+ // - FM_TUNER source is controlled by captureTunerAudioInputAllowed() or
+ // captureAudioOutputAllowed() (deprecated).
+ // - REMOTE_SUBMIX source is controlled by captureAudioOutputAllowed() if the input
+ // type is API_INPUT_MIX_EXT_POLICY_REROUTE and by AudioService if a media projection
+ // is used and input type is API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK
+ // - ECHO_REFERENCE source is controlled by captureAudioOutputAllowed()
+ if (!(recordingAllowed(adjAttributionSource, inputSource)
+ || inputSource == AUDIO_SOURCE_FM_TUNER
+ || inputSource == AUDIO_SOURCE_REMOTE_SUBMIX
+ || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)) {
ALOGE("%s permission denied: recording not allowed for %s",
__func__, adjAttributionSource.toString().c_str());
return binderStatusFromStatusT(PERMISSION_DENIED);
}
bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
- if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK ||
- inputSource == AUDIO_SOURCE_VOICE_DOWNLINK ||
- inputSource == AUDIO_SOURCE_VOICE_CALL ||
- inputSource == AUDIO_SOURCE_ECHO_REFERENCE)
- && !canCaptureOutput) {
+ bool canInterceptCallAudio = callAudioInterceptionAllowed(adjAttributionSource);
+ bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
+ || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
+ || inputSource == AUDIO_SOURCE_VOICE_CALL;
+
+ if (isCallAudioSource && !canInterceptCallAudio && !canCaptureOutput) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
-
+ if (inputSource == AUDIO_SOURCE_ECHO_REFERENCE
+ && !canCaptureOutput) {
+ return binderStatusFromStatusT(PERMISSION_DENIED);
+ }
if (inputSource == AUDIO_SOURCE_FM_TUNER
- && !captureTunerAudioInputAllowed(adjAttributionSource)
- && !canCaptureOutput) {
+ && !canCaptureOutput
+ && !captureTunerAudioInputAllowed(adjAttributionSource)) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -644,23 +703,30 @@
case AudioPolicyInterface::API_INPUT_LEGACY:
break;
case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
+ if ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+ && canInterceptCallAudio) {
+ break;
+ }
// FIXME: use the same permission as for remote submix for now.
+ FALLTHROUGH_INTENDED;
case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
if (!canCaptureOutput) {
- ALOGE("getInputForAttr() permission denied: capture not allowed");
+ ALOGE("%s permission denied: capture not allowed", __func__);
status = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
- if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
- ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
+ if (!(modifyAudioRoutingAllowed(adjAttributionSource)
+ || ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+ && canInterceptCallAudio))) {
+ ALOGE("%s permission denied for remote submix capture", __func__);
status = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_INPUT_INVALID:
default:
- LOG_ALWAYS_FATAL("getInputForAttr() encountered an invalid input type %d",
- (int)inputType);
+ LOG_ALWAYS_FATAL("%s encountered an invalid input type %d",
+ __func__, (int)inputType);
}
}
@@ -730,8 +796,10 @@
// check calling permissions
if (!(startRecording(client->attributionSource, String16(msg.str().c_str()),
- client->attributes.source)
- || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
+ client->attributes.source)
+ || client->attributes.source == AUDIO_SOURCE_FM_TUNER
+ || client->attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX
+ || client->attributes.source == AUDIO_SOURCE_ECHO_REFERENCE)) {
ALOGE("%s permission denied: recording not allowed for attribution source %s",
__func__, client->attributionSource.toString().c_str());
return binderStatusFromStatusT(PERMISSION_DENIED);
@@ -901,7 +969,7 @@
return Status::ok();
}
-Status AudioPolicyService::initStreamVolume(media::AudioStreamType streamAidl,
+Status AudioPolicyService::initStreamVolume(AudioStreamType streamAidl,
int32_t indexMinAidl,
int32_t indexMaxAidl) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -924,13 +992,14 @@
return binderStatusFromStatusT(NO_ERROR);
}
-Status AudioPolicyService::setStreamVolumeIndex(media::AudioStreamType streamAidl,
- int32_t deviceAidl, int32_t indexAidl) {
+Status AudioPolicyService::setStreamVolumeIndex(AudioStreamType streamAidl,
+ const AudioDeviceDescription& deviceAidl,
+ int32_t indexAidl) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -948,12 +1017,13 @@
device));
}
-Status AudioPolicyService::getStreamVolumeIndex(media::AudioStreamType streamAidl,
- int32_t deviceAidl, int32_t* _aidl_return) {
+Status AudioPolicyService::getStreamVolumeIndex(AudioStreamType streamAidl,
+ const AudioDeviceDescription& deviceAidl,
+ int32_t* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
int index;
if (mAudioPolicyManager == NULL) {
@@ -971,12 +1041,13 @@
}
Status AudioPolicyService::setVolumeIndexForAttributes(
- const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t indexAidl) {
+ const media::AudioAttributesInternal& attrAidl,
+ const AudioDeviceDescription& deviceAidl, int32_t indexAidl) {
audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
AudioValidator::validateAudioAttributes(attributes, "169572641")));
@@ -993,11 +1064,12 @@
}
Status AudioPolicyService::getVolumeIndexForAttributes(
- const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t* _aidl_return) {
+ const media::AudioAttributesInternal& attrAidl,
+ const AudioDeviceDescription& deviceAidl, int32_t* _aidl_return) {
audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
int index;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
AudioValidator::validateAudioAttributes(attributes, "169572641")));
@@ -1051,7 +1123,7 @@
return Status::ok();
}
-Status AudioPolicyService::getStrategyForStream(media::AudioStreamType streamAidl,
+Status AudioPolicyService::getStrategyForStream(AudioStreamType streamAidl,
int32_t* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1075,14 +1147,14 @@
//audio policy: use audio_device_t appropriately
-Status AudioPolicyService::getDevicesForStream(media::AudioStreamType streamAidl,
- int32_t* _aidl_return) {
+Status AudioPolicyService::getDevicesForStream(
+ AudioStreamType streamAidl,
+ std::vector<AudioDeviceDescription>* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- legacy2aidl_audio_devices_t_int32_t(AUDIO_DEVICE_NONE));
+ *_aidl_return = std::vector<AudioDeviceDescription>{};
return Status::ok();
}
if (mAudioPolicyManager == NULL) {
@@ -1091,12 +1163,14 @@
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- legacy2aidl_audio_devices_t_int32_t(mAudioPolicyManager->getDevicesForStream(stream)));
+ convertContainer<std::vector<AudioDeviceDescription>>(
+ mAudioPolicyManager->getDevicesForStream(stream),
+ legacy2aidl_audio_devices_t_AudioDeviceDescription));
return Status::ok();
}
Status AudioPolicyService::getDevicesForAttributes(const media::AudioAttributesEx& attrAidl,
- std::vector<media::AudioDevice>* _aidl_return)
+ std::vector<AudioDevice>* _aidl_return)
{
AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesEx_AudioAttributes(attrAidl));
@@ -1110,8 +1184,8 @@
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
mAudioPolicyManager->getDevicesForAttributes(aa.getAttributes(), &devices)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
return Status::ok();
}
@@ -1197,7 +1271,7 @@
return binderStatusFromStatusT(mAudioPolicyManager->moveEffectsToIo(ids, io));
}
-Status AudioPolicyService::isStreamActive(media::AudioStreamType streamAidl, int32_t inPastMsAidl,
+Status AudioPolicyService::isStreamActive(AudioStreamType streamAidl, int32_t inPastMsAidl,
bool* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
@@ -1216,7 +1290,7 @@
return Status::ok();
}
-Status AudioPolicyService::isStreamActiveRemotely(media::AudioStreamType streamAidl,
+Status AudioPolicyService::isStreamActiveRemotely(AudioStreamType streamAidl,
int32_t inPastMsAidl,
bool* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1236,9 +1310,9 @@
return Status::ok();
}
-Status AudioPolicyService::isSourceActive(media::AudioSourceType sourceAidl, bool* _aidl_return) {
+Status AudioPolicyService::isSourceActive(AudioSource sourceAidl, bool* _aidl_return) {
audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+ aidl2legacy_AudioSource_audio_source_t(sourceAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
@@ -1266,7 +1340,7 @@
Status AudioPolicyService::queryDefaultPreProcessing(
int32_t audioSessionAidl,
- media::Int* countAidl,
+ Int* countAidl,
std::vector<media::EffectDescriptor>* _aidl_return) {
audio_session_t audioSession = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_session_t(audioSessionAidl));
@@ -1290,11 +1364,11 @@
return Status::ok();
}
-Status AudioPolicyService::addSourceDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addSourceDefaultEffect(const AudioUuid& typeAidl,
const std::string& opPackageNameAidl,
- const media::AudioUuid& uuidAidl,
+ const AudioUuid& uuidAidl,
int32_t priority,
- media::AudioSourceType sourceAidl,
+ AudioSource sourceAidl,
int32_t* _aidl_return) {
effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1303,7 +1377,7 @@
effect_uuid_t uuid = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioUuid_audio_uuid_t(uuidAidl));
audio_source_t source = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(sourceAidl));
+ aidl2legacy_AudioSource_audio_source_t(sourceAidl));
audio_unique_id_t id;
sp<AudioPolicyEffects>audioPolicyEffects;
@@ -1317,10 +1391,10 @@
return Status::ok();
}
-Status AudioPolicyService::addStreamDefaultEffect(const media::AudioUuid& typeAidl,
+Status AudioPolicyService::addStreamDefaultEffect(const AudioUuid& typeAidl,
const std::string& opPackageNameAidl,
- const media::AudioUuid& uuidAidl,
- int32_t priority, media::AudioUsage usageAidl,
+ const AudioUuid& uuidAidl,
+ int32_t priority, AudioUsage usageAidl,
int32_t* _aidl_return) {
effect_uuid_t type = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioUuid_audio_uuid_t(typeAidl));
@@ -1368,7 +1442,7 @@
}
Status AudioPolicyService::setSupportedSystemUsages(
- const std::vector<media::AudioUsage>& systemUsagesAidl) {
+ const std::vector<AudioUsage>& systemUsagesAidl) {
size_t size = systemUsagesAidl.size();
if (size > MAX_ITEMS_PER_LIST) {
size = MAX_ITEMS_PER_LIST;
@@ -1407,7 +1481,7 @@
mAudioPolicyManager->setAllowedCapturePolicy(uid, capturePolicy));
}
-Status AudioPolicyService::getOffloadSupport(const media::AudioOffloadInfo& infoAidl,
+Status AudioPolicyService::getOffloadSupport(const AudioOffloadInfo& infoAidl,
media::AudioOffloadMode* _aidl_return) {
audio_offload_info_t info = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioOffloadInfo_audio_offload_info_t(infoAidl));
@@ -1423,11 +1497,11 @@
}
Status AudioPolicyService::isDirectOutputSupported(
- const media::AudioConfigBase& configAidl,
+ const AudioConfigBase& configAidl,
const media::AudioAttributesInternal& attributesAidl,
bool* _aidl_return) {
audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, false /*isInput*/));
audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attributesAidl));
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
@@ -1438,7 +1512,7 @@
return binderStatusFromStatusT(NO_INIT);
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
Mutex::Autolock _l(mLock);
*_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
@@ -1447,7 +1521,7 @@
Status AudioPolicyService::listAudioPorts(media::AudioPortRole roleAidl,
- media::AudioPortType typeAidl, media::Int* count,
+ media::AudioPortType typeAidl, Int* count,
std::vector<media::AudioPort>* portsAidl,
int32_t* _aidl_return) {
audio_port_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1535,7 +1609,7 @@
IPCThreadState::self()->getCallingUid()));
}
-Status AudioPolicyService::listAudioPatches(media::Int* count,
+Status AudioPolicyService::listAudioPatches(Int* count,
std::vector<media::AudioPatch>* patchesAidl,
int32_t* _aidl_return) {
unsigned int num_patches = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1602,7 +1676,7 @@
_aidl_return->ioHandle = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
_aidl_return->device = VALUE_OR_RETURN_BINDER_STATUS(
- legacy2aidl_audio_devices_t_int32_t(device));
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
return Status::ok();
}
@@ -1672,7 +1746,7 @@
Status AudioPolicyService::setUidDeviceAffinities(
int32_t uidAidl,
- const std::vector<media::AudioDevice>& devicesAidl) {
+ const std::vector<AudioDevice>& devicesAidl) {
uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1705,7 +1779,7 @@
Status AudioPolicyService::setUserIdDeviceAffinities(
int32_t userIdAidl,
- const std::vector<media::AudioDevice>& devicesAidl) {
+ const std::vector<AudioDevice>& devicesAidl) {
int userId = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(userIdAidl));
AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
@@ -1754,7 +1828,7 @@
return binderStatusFromStatusT(NO_INIT);
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
// startAudioSource should be created as the calling uid
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
@@ -1802,13 +1876,14 @@
}
-Status AudioPolicyService::getStreamVolumeDB(media::AudioStreamType streamAidl, int32_t indexAidl,
- int32_t deviceAidl, float* _aidl_return) {
+Status AudioPolicyService::getStreamVolumeDB(
+ AudioStreamType streamAidl, int32_t indexAidl,
+ const AudioDeviceDescription& deviceAidl, float* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -1819,8 +1894,8 @@
return Status::ok();
}
-Status AudioPolicyService::getSurroundFormats(media::Int* count,
- std::vector<media::audio::common::AudioFormat>* formats,
+Status AudioPolicyService::getSurroundFormats(Int* count,
+ std::vector<AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
@@ -1842,7 +1917,8 @@
numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
- std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+ std::back_inserter(*formats),
+ legacy2aidl_audio_format_t_AudioFormatDescription)));
formatsEnabled->insert(
formatsEnabled->begin(),
surroundFormatsEnabled.get(),
@@ -1852,7 +1928,7 @@
}
Status AudioPolicyService::getReportedSurroundFormats(
- media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) {
+ Int* count, std::vector<AudioFormatDescription>* formats) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
if (numSurroundFormats > MAX_ITEMS_PER_LIST) {
@@ -1872,13 +1948,15 @@
numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
- std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+ std::back_inserter(*formats),
+ legacy2aidl_audio_format_t_AudioFormatDescription)));
count->value = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<uint32_t>(numSurroundFormats));
return Status::ok();
}
-Status AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::audio::common::AudioFormat>* _aidl_return) {
+Status AudioPolicyService::getHwOffloadFormatsSupportedForBluetoothMedia(
+ const AudioDeviceDescription& deviceAidl,
+ std::vector<AudioFormatDescription>* _aidl_return) {
std::vector<audio_format_t> formats;
if (mAudioPolicyManager == NULL) {
@@ -1886,19 +1964,21 @@
}
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
+ audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
- mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(&formats)));
+ mAudioPolicyManager->getHwOffloadFormatsSupportedForBluetoothMedia(device, &formats)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<media::audio::common::AudioFormat>>(
+ convertContainer<std::vector<AudioFormatDescription>>(
formats,
- legacy2aidl_audio_format_t_AudioFormat));
+ legacy2aidl_audio_format_t_AudioFormatDescription));
return Status::ok();
}
Status AudioPolicyService::setSurroundFormatEnabled(
- media::audio::common::AudioFormat audioFormatAidl, bool enabled) {
+ const AudioFormatDescription& audioFormatAidl, bool enabled) {
audio_format_t audioFormat = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioFormat_audio_format_t(audioFormatAidl));
+ aidl2legacy_AudioFormatDescription_audio_format_t(audioFormatAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
@@ -2049,7 +2129,7 @@
Status AudioPolicyService::setDevicesRoleForStrategy(
int32_t strategyAidl,
media::DeviceRole roleAidl,
- const std::vector<media::AudioDevice>& devicesAidl) {
+ const std::vector<AudioDevice>& devicesAidl) {
product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_product_strategy_t(strategyAidl));
device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2062,8 +2142,11 @@
return binderStatusFromStatusT(NO_INIT);
}
Mutex::Autolock _l(mLock);
- return binderStatusFromStatusT(
- mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices));
+ status_t status = mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices);
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
Status AudioPolicyService::removeDevicesRoleForStrategy(int32_t strategyAidl,
@@ -2076,14 +2159,17 @@
return binderStatusFromStatusT(NO_INIT);
}
Mutex::Autolock _l(mLock);
- return binderStatusFromStatusT(
- mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role));
+ status_t status = mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role);
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
Status AudioPolicyService::getDevicesForRoleAndStrategy(
int32_t strategyAidl,
media::DeviceRole roleAidl,
- std::vector<media::AudioDevice>* _aidl_return) {
+ std::vector<AudioDevice>* _aidl_return) {
product_strategy_t strategy = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_product_strategy_t(strategyAidl));
device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2097,8 +2183,8 @@
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
mAudioPolicyManager->getDevicesForRoleAndStrategy(strategy, role, devices)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
return Status::ok();
}
@@ -2109,11 +2195,11 @@
}
Status AudioPolicyService::setDevicesRoleForCapturePreset(
- media::AudioSourceType audioSourceAidl,
+ AudioSource audioSourceAidl,
media::DeviceRole roleAidl,
- const std::vector<media::AudioDevice>& devicesAidl) {
+ const std::vector<AudioDevice>& devicesAidl) {
audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+ aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_DeviceRole_device_role_t(roleAidl));
AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2129,11 +2215,11 @@
}
Status AudioPolicyService::addDevicesRoleForCapturePreset(
- media::AudioSourceType audioSourceAidl,
+ AudioSource audioSourceAidl,
media::DeviceRole roleAidl,
- const std::vector<media::AudioDevice>& devicesAidl) {
+ const std::vector<AudioDevice>& devicesAidl) {
audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+ aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_DeviceRole_device_role_t(roleAidl));
AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2149,11 +2235,11 @@
}
Status AudioPolicyService::removeDevicesRoleForCapturePreset(
- media::AudioSourceType audioSourceAidl,
+ AudioSource audioSourceAidl,
media::DeviceRole roleAidl,
- const std::vector<media::AudioDevice>& devicesAidl) {
+ const std::vector<AudioDevice>& devicesAidl) {
audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+ aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_DeviceRole_device_role_t(roleAidl));
AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2168,10 +2254,10 @@
mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
}
-Status AudioPolicyService::clearDevicesRoleForCapturePreset(media::AudioSourceType audioSourceAidl,
+Status AudioPolicyService::clearDevicesRoleForCapturePreset(AudioSource audioSourceAidl,
media::DeviceRole roleAidl) {
audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+ aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_DeviceRole_device_role_t(roleAidl));
@@ -2184,11 +2270,11 @@
}
Status AudioPolicyService::getDevicesForRoleAndCapturePreset(
- media::AudioSourceType audioSourceAidl,
+ AudioSource audioSourceAidl,
media::DeviceRole roleAidl,
- std::vector<media::AudioDevice>* _aidl_return) {
+ std::vector<AudioDevice>* _aidl_return) {
audio_source_t audioSource = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioSourceType_audio_source_t(audioSourceAidl));
+ aidl2legacy_AudioSource_audio_source_t(audioSourceAidl));
device_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_DeviceRole_device_role_t(roleAidl));
AudioDeviceTypeAddrVector devices;
@@ -2200,8 +2286,51 @@
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<media::AudioDevice>>(devices,
- legacy2aidl_AudioDeviceTypeAddress));
+ convertContainer<std::vector<AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
+ return Status::ok();
+}
+
+Status AudioPolicyService::getSpatializer(
+ const sp<media::INativeSpatializerCallback>& callback,
+ media::GetSpatializerResponse* _aidl_return) {
+ _aidl_return->spatializer = nullptr;
+ if (callback == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ if (mSpatializer != nullptr) {
+ RETURN_IF_BINDER_ERROR(
+ binderStatusFromStatusT(mSpatializer->registerCallback(callback)));
+ _aidl_return->spatializer = mSpatializer;
+ }
+ return Status::ok();
+}
+
+Status AudioPolicyService::canBeSpatialized(
+ const std::optional<media::AudioAttributesInternal>& attrAidl,
+ const std::optional<AudioConfig>& configAidl,
+ const std::vector<AudioDevice>& devicesAidl,
+ bool* _aidl_return) {
+ if (mAudioPolicyManager == nullptr) {
+ return binderStatusFromStatusT(NO_INIT);
+ }
+ audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
+ if (attrAidl.has_value()) {
+ attr = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl.value()));
+ }
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ if (configAidl.has_value()) {
+ config = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(configAidl.value(),
+ false /*isInput*/));
+ }
+ AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
+ convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
+ aidl2legacy_AudioDeviceTypeAddress));
+
+ Mutex::Autolock _l(mLock);
+ *_aidl_return = mAudioPolicyManager->canBeSpatialized(&attr, &config, devices);
return Status::ok();
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 4d0e1f1..ef7a83b 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -127,6 +127,7 @@
loadAudioPolicyManager();
mAudioPolicyManager = mCreateAudioPolicyManager(mAudioPolicyClient);
}
+
// load audio processing modules
sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects();
sp<UidPolicy> uidPolicy = new UidPolicy(this);
@@ -139,6 +140,18 @@
}
uidPolicy->registerSelf();
sensorPrivacyPolicy->registerSelf();
+
+ // Create spatializer if supported
+ if (mAudioPolicyManager != nullptr) {
+ Mutex::Autolock _l(mLock);
+ const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+ AudioDeviceTypeAddrVector devices;
+ bool hasSpatializer = mAudioPolicyManager->canBeSpatialized(&attr, nullptr, devices);
+ if (hasSpatializer) {
+ mSpatializer = Spatializer::create(this);
+ }
+ }
+ AudioSystem::audioPolicyReady();
}
void AudioPolicyService::unloadAudioPolicyManager()
@@ -353,6 +366,60 @@
}
}
+void AudioPolicyService::onCheckSpatializer()
+{
+ Mutex::Autolock _l(mLock);
+ onCheckSpatializer_l();
+}
+
+void AudioPolicyService::onCheckSpatializer_l()
+{
+ if (mSpatializer != nullptr) {
+ mOutputCommandThread->checkSpatializerCommand();
+ }
+}
+
+void AudioPolicyService::doOnCheckSpatializer()
+{
+ Mutex::Autolock _l(mLock);
+
+ if (mSpatializer != nullptr) {
+ // Note: mSpatializer != nullptr => mAudioPolicyManager != nullptr
+ if (mSpatializer->getLevel() != media::SpatializationLevel::NONE) {
+ audio_io_handle_t currentOutput = mSpatializer->getOutput();
+ audio_io_handle_t newOutput;
+ const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+ audio_config_base_t config = mSpatializer->getAudioInConfig();
+ status_t status =
+ mAudioPolicyManager->getSpatializerOutput(&config, &attr, &newOutput);
+
+ if (status == NO_ERROR && currentOutput == newOutput) {
+ return;
+ }
+ mLock.unlock();
+ // It is OK to call detachOutput() is none is already attached.
+ mSpatializer->detachOutput();
+ if (status != NO_ERROR || newOutput == AUDIO_IO_HANDLE_NONE) {
+ mLock.lock();
+ return;
+ }
+ status = mSpatializer->attachOutput(newOutput);
+ mLock.lock();
+ if (status != NO_ERROR) {
+ mAudioPolicyManager->releaseSpatializerOutput(newOutput);
+ }
+ } else if (mSpatializer->getLevel() == media::SpatializationLevel::NONE
+ && mSpatializer->getOutput() != AUDIO_IO_HANDLE_NONE) {
+ mLock.unlock();
+ audio_io_handle_t output = mSpatializer->detachOutput();
+ mLock.lock();
+ if (output != AUDIO_IO_HANDLE_NONE) {
+ mAudioPolicyManager->releaseSpatializerOutput(output);
+ }
+ }
+ }
+}
+
status_t AudioPolicyService::clientCreateAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
int delayMs)
@@ -409,7 +476,7 @@
}
}
-void AudioPolicyService::NotificationClient::onAudioVolumeGroupChanged(volume_group_t group,
+void AudioPolicyService::NotificationClient::onAudioVolumeGroupChanged(volume_group_t group,
int flags)
{
if (mAudioPolicyServiceClient != 0 && mAudioVolumeGroupCallbacksEnabled) {
@@ -442,22 +509,24 @@
int32_t eventAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(event));
media::RecordClientInfo clientInfoAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_record_client_info_t_RecordClientInfo(*clientInfo));
- media::AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_base_t_AudioConfigBase(*clientConfig));
+ AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(
+ *clientConfig, true /*isInput*/));
std::vector<media::EffectDescriptor> clientEffectsAidl = VALUE_OR_RETURN_STATUS(
convertContainer<std::vector<media::EffectDescriptor>>(
clientEffects,
legacy2aidl_effect_descriptor_t_EffectDescriptor));
- media::AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_base_t_AudioConfigBase(*deviceConfig));
+ AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(
+ *deviceConfig, true /*isInput*/));
std::vector<media::EffectDescriptor> effectsAidl = VALUE_OR_RETURN_STATUS(
convertContainer<std::vector<media::EffectDescriptor>>(
effects,
legacy2aidl_effect_descriptor_t_EffectDescriptor));
int32_t patchHandleAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_patch_handle_t_int32_t(patchHandle));
- media::AudioSourceType sourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_source_t_AudioSourceType(source));
+ media::audio::common::AudioSource sourceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_source_t_AudioSource(source));
return aidl_utils::statusTFromBinderStatus(
mAudioPolicyServiceClient->onRecordingConfigurationUpdate(eventAidl,
clientInfoAidl,
@@ -660,7 +729,8 @@
if (current->attributes.source != AUDIO_SOURCE_HOTWORD) {
onlyHotwordActive = false;
}
- if (currentUid == mPhoneStateOwnerUid) {
+ if (currentUid == mPhoneStateOwnerUid &&
+ !isVirtualSource(current->attributes.source)) {
isPhoneStateOwnerActive = true;
}
}
@@ -839,6 +909,7 @@
switch (source) {
case AUDIO_SOURCE_FM_TUNER:
case AUDIO_SOURCE_ECHO_REFERENCE:
+ case AUDIO_SOURCE_REMOTE_SUBMIX:
return false;
default:
break;
@@ -970,7 +1041,7 @@
case TRANSACTION_removeUidDeviceAffinities:
case TRANSACTION_setUserIdDeviceAffinities:
case TRANSACTION_removeUserIdDeviceAffinities:
- case TRANSACTION_getHwOffloadEncodingFormatsSupportedForA2DP:
+ case TRANSACTION_getHwOffloadFormatsSupportedForBluetoothMedia:
case TRANSACTION_listAudioVolumeGroups:
case TRANSACTION_getVolumeGroupFromAudioAttributes:
case TRANSACTION_acquireSoundTriggerSession:
@@ -990,7 +1061,8 @@
case TRANSACTION_addDevicesRoleForCapturePreset:
case TRANSACTION_removeDevicesRoleForCapturePreset:
case TRANSACTION_clearDevicesRoleForCapturePreset:
- case TRANSACTION_getDevicesForRoleAndCapturePreset: {
+ case TRANSACTION_getDevicesForRoleAndCapturePreset:
+ case TRANSACTION_getSpatializer: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1764,6 +1836,17 @@
mLock.lock();
} break;
+ case CHECK_SPATIALIZER: {
+ ALOGV("AudioCommandThread() processing updateUID states");
+ svc = mService.promote();
+ if (svc == 0) {
+ break;
+ }
+ mLock.unlock();
+ svc->doOnCheckSpatializer();
+ mLock.lock();
+ } break;
+
default:
ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
}
@@ -2075,6 +2158,14 @@
sendCommand(command);
}
+void AudioPolicyService::AudioCommandThread::checkSpatializerCommand()
+{
+ sp<AudioCommand>command = new AudioCommand();
+ command->mCommand = CHECK_SPATIALIZER;
+ ALOGV("AudioCommandThread() adding check spatializer");
+ sendCommand(command);
+}
+
status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
{
{
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 7ed829c..b3ac21b 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -19,6 +19,7 @@
#define ANDROID_AUDIOPOLICYSERVICE_H
#include <android/media/BnAudioPolicyService.h>
+#include <android/media/GetSpatializerResponse.h>
#include <android-base/thread_annotations.h>
#include <cutils/misc.h>
#include <cutils/config_utils.h>
@@ -38,6 +39,7 @@
#include <mediautils/ServiceUtilities.h>
#include "AudioPolicyEffects.h"
#include "CaptureStateNotifier.h"
+#include "Spatializer.h"
#include <AudioPolicyInterface.h>
#include <android/hardware/BnSensorPrivacyListener.h>
#include <android/content/AttributionSourceState.h>
@@ -47,13 +49,25 @@
namespace android {
using content::AttributionSourceState;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::Int;
// ----------------------------------------------------------------------------
class AudioPolicyService :
public BinderService<AudioPolicyService>,
public media::BnAudioPolicyService,
- public IBinder::DeathRecipient
+ public IBinder::DeathRecipient,
+ public SpatializerPolicyCallback
{
friend class BinderService<AudioPolicyService>;
@@ -68,25 +82,25 @@
//
binder::Status onNewAudioModulesAvailable() override;
binder::Status setDeviceConnectionState(
- const media::AudioDevice& device,
+ const AudioDevice& device,
media::AudioPolicyDeviceState state,
const std::string& deviceName,
- media::audio::common::AudioFormat encodedFormat) override;
- binder::Status getDeviceConnectionState(const media::AudioDevice& device,
+ const AudioFormatDescription& encodedFormat) override;
+ binder::Status getDeviceConnectionState(const AudioDevice& device,
media::AudioPolicyDeviceState* _aidl_return) override;
binder::Status handleDeviceConfigChange(
- const media::AudioDevice& device,
+ const AudioDevice& device,
const std::string& deviceName,
- media::audio::common::AudioFormat encodedFormat) override;
- binder::Status setPhoneState(media::AudioMode state, int32_t uid) override;
+ const AudioFormatDescription& encodedFormat) override;
+ binder::Status setPhoneState(AudioMode state, int32_t uid) override;
binder::Status setForceUse(media::AudioPolicyForceUse usage,
media::AudioPolicyForcedConfig config) override;
binder::Status getForceUse(media::AudioPolicyForceUse usage,
media::AudioPolicyForcedConfig* _aidl_return) override;
- binder::Status getOutput(media::AudioStreamType stream, int32_t* _aidl_return) override;
+ binder::Status getOutput(AudioStreamType stream, int32_t* _aidl_return) override;
binder::Status getOutputForAttr(const media::AudioAttributesInternal& attr, int32_t session,
const AttributionSourceState &attributionSource,
- const media::AudioConfig& config,
+ const AudioConfig& config,
int32_t flags, int32_t selectedDeviceId,
media::GetOutputForAttrResponse* _aidl_return) override;
binder::Status startOutput(int32_t portId) override;
@@ -95,32 +109,37 @@
binder::Status getInputForAttr(const media::AudioAttributesInternal& attr, int32_t input,
int32_t riid, int32_t session,
const AttributionSourceState &attributionSource,
- const media::AudioConfigBase& config, int32_t flags,
+ const AudioConfigBase& config, int32_t flags,
int32_t selectedDeviceId,
media::GetInputForAttrResponse* _aidl_return) override;
binder::Status startInput(int32_t portId) override;
binder::Status stopInput(int32_t portId) override;
binder::Status releaseInput(int32_t portId) override;
- binder::Status initStreamVolume(media::AudioStreamType stream, int32_t indexMin,
+ binder::Status initStreamVolume(AudioStreamType stream, int32_t indexMin,
int32_t indexMax) override;
- binder::Status setStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+ binder::Status setStreamVolumeIndex(AudioStreamType stream,
+ const AudioDeviceDescription& device,
int32_t index) override;
- binder::Status getStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+ binder::Status getStreamVolumeIndex(AudioStreamType stream,
+ const AudioDeviceDescription& device,
int32_t* _aidl_return) override;
binder::Status setVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
- int32_t device, int32_t index) override;
+ const AudioDeviceDescription& device,
+ int32_t index) override;
binder::Status getVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
- int32_t device, int32_t* _aidl_return) override;
+ const AudioDeviceDescription& device,
+ int32_t* _aidl_return) override;
binder::Status getMaxVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
int32_t* _aidl_return) override;
binder::Status getMinVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
int32_t* _aidl_return) override;
- binder::Status getStrategyForStream(media::AudioStreamType stream,
+ binder::Status getStrategyForStream(AudioStreamType stream,
int32_t* _aidl_return) override;
- binder::Status getDevicesForStream(media::AudioStreamType stream,
- int32_t* _aidl_return) override;
+ binder::Status getDevicesForStream(
+ AudioStreamType stream,
+ std::vector<AudioDeviceDescription>* _aidl_return) override;
binder::Status getDevicesForAttributes(const media::AudioAttributesEx& attr,
- std::vector<media::AudioDevice>* _aidl_return) override;
+ std::vector<AudioDevice>* _aidl_return) override;
binder::Status getOutputForEffect(const media::EffectDescriptor& desc,
int32_t* _aidl_return) override;
binder::Status registerEffect(const media::EffectDescriptor& desc, int32_t io, int32_t strategy,
@@ -128,42 +147,42 @@
binder::Status unregisterEffect(int32_t id) override;
binder::Status setEffectEnabled(int32_t id, bool enabled) override;
binder::Status moveEffectsToIo(const std::vector<int32_t>& ids, int32_t io) override;
- binder::Status isStreamActive(media::AudioStreamType stream, int32_t inPastMs,
+ binder::Status isStreamActive(AudioStreamType stream, int32_t inPastMs,
bool* _aidl_return) override;
- binder::Status isStreamActiveRemotely(media::AudioStreamType stream, int32_t inPastMs,
+ binder::Status isStreamActiveRemotely(AudioStreamType stream, int32_t inPastMs,
bool* _aidl_return) override;
- binder::Status isSourceActive(media::AudioSourceType source, bool* _aidl_return) override;
+ binder::Status isSourceActive(AudioSource source, bool* _aidl_return) override;
binder::Status queryDefaultPreProcessing(
- int32_t audioSession, media::Int* count,
+ int32_t audioSession, Int* count,
std::vector<media::EffectDescriptor>* _aidl_return) override;
- binder::Status addSourceDefaultEffect(const media::AudioUuid& type,
+ binder::Status addSourceDefaultEffect(const AudioUuid& type,
const std::string& opPackageName,
- const media::AudioUuid& uuid, int32_t priority,
- media::AudioSourceType source,
+ const AudioUuid& uuid, int32_t priority,
+ AudioSource source,
int32_t* _aidl_return) override;
- binder::Status addStreamDefaultEffect(const media::AudioUuid& type,
+ binder::Status addStreamDefaultEffect(const AudioUuid& type,
const std::string& opPackageName,
- const media::AudioUuid& uuid, int32_t priority,
- media::AudioUsage usage, int32_t* _aidl_return) override;
+ const AudioUuid& uuid, int32_t priority,
+ AudioUsage usage, int32_t* _aidl_return) override;
binder::Status removeSourceDefaultEffect(int32_t id) override;
binder::Status removeStreamDefaultEffect(int32_t id) override;
binder::Status setSupportedSystemUsages(
- const std::vector<media::AudioUsage>& systemUsages) override;
+ const std::vector<AudioUsage>& systemUsages) override;
binder::Status setAllowedCapturePolicy(int32_t uid, int32_t capturePolicy) override;
- binder::Status getOffloadSupport(const media::AudioOffloadInfo& info,
+ binder::Status getOffloadSupport(const media::audio::common::AudioOffloadInfo& info,
media::AudioOffloadMode* _aidl_return) override;
- binder::Status isDirectOutputSupported(const media::AudioConfigBase& config,
+ binder::Status isDirectOutputSupported(const AudioConfigBase& config,
const media::AudioAttributesInternal& attributes,
bool* _aidl_return) override;
binder::Status listAudioPorts(media::AudioPortRole role, media::AudioPortType type,
- media::Int* count, std::vector<media::AudioPort>* ports,
+ Int* count, std::vector<media::AudioPort>* ports,
int32_t* _aidl_return) override;
binder::Status getAudioPort(const media::AudioPort& port,
media::AudioPort* _aidl_return) override;
binder::Status createAudioPatch(const media::AudioPatch& patch, int32_t handle,
int32_t* _aidl_return) override;
binder::Status releaseAudioPatch(int32_t handle) override;
- binder::Status listAudioPatches(media::Int* count, std::vector<media::AudioPatch>* patches,
+ binder::Status listAudioPatches(Int* count, std::vector<media::AudioPatch>* patches,
int32_t* _aidl_return) override;
binder::Status setAudioPortConfig(const media::AudioPortConfig& config) override;
binder::Status registerClient(const sp<media::IAudioPolicyServiceClient>& client) override;
@@ -171,15 +190,15 @@
binder::Status setAudioVolumeGroupCallbacksEnabled(bool enabled) override;
binder::Status acquireSoundTriggerSession(media::SoundTriggerSession* _aidl_return) override;
binder::Status releaseSoundTriggerSession(int32_t session) override;
- binder::Status getPhoneState(media::AudioMode* _aidl_return) override;
+ binder::Status getPhoneState(AudioMode* _aidl_return) override;
binder::Status registerPolicyMixes(const std::vector<media::AudioMix>& mixes,
bool registration) override;
binder::Status setUidDeviceAffinities(int32_t uid,
- const std::vector<media::AudioDevice>& devices) override;
+ const std::vector<AudioDevice>& devices) override;
binder::Status removeUidDeviceAffinities(int32_t uid) override;
binder::Status setUserIdDeviceAffinities(
int32_t userId,
- const std::vector<media::AudioDevice>& devices) override;
+ const std::vector<AudioDevice>& devices) override;
binder::Status removeUserIdDeviceAffinities(int32_t userId) override;
binder::Status startAudioSource(const media::AudioPortConfig& source,
const media::AudioAttributesInternal& attributes,
@@ -187,16 +206,18 @@
binder::Status stopAudioSource(int32_t portId) override;
binder::Status setMasterMono(bool mono) override;
binder::Status getMasterMono(bool* _aidl_return) override;
- binder::Status getStreamVolumeDB(media::AudioStreamType stream, int32_t index, int32_t device,
+ binder::Status getStreamVolumeDB(AudioStreamType stream, int32_t index,
+ const AudioDeviceDescription& device,
float* _aidl_return) override;
- binder::Status getSurroundFormats(media::Int* count,
- std::vector<media::audio::common::AudioFormat>* formats,
+ binder::Status getSurroundFormats(Int* count,
+ std::vector<AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) override;
binder::Status getReportedSurroundFormats(
- media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) override;
- binder::Status getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::audio::common::AudioFormat>* _aidl_return) override;
- binder::Status setSurroundFormatEnabled(media::audio::common::AudioFormat audioFormat,
+ Int* count, std::vector<AudioFormatDescription>* formats) override;
+ binder::Status getHwOffloadFormatsSupportedForBluetoothMedia(
+ const AudioDeviceDescription& device,
+ std::vector<AudioFormatDescription>* _aidl_return) override;
+ binder::Status setSurroundFormatEnabled(const AudioFormatDescription& audioFormat,
bool enabled) override;
binder::Status setAssistantUid(int32_t uid) override;
binder::Status setHotwordDetectionServiceUid(int32_t uid) override;
@@ -217,37 +238,41 @@
binder::Status isCallScreenModeSupported(bool* _aidl_return) override;
binder::Status setDevicesRoleForStrategy(
int32_t strategy, media::DeviceRole role,
- const std::vector<media::AudioDevice>& devices) override;
+ const std::vector<AudioDevice>& devices) override;
binder::Status removeDevicesRoleForStrategy(int32_t strategy, media::DeviceRole role) override;
binder::Status getDevicesForRoleAndStrategy(
int32_t strategy, media::DeviceRole role,
- std::vector<media::AudioDevice>* _aidl_return) override;
+ std::vector<AudioDevice>* _aidl_return) override;
binder::Status setDevicesRoleForCapturePreset(
- media::AudioSourceType audioSource,
+ AudioSource audioSource,
media::DeviceRole role,
- const std::vector<media::AudioDevice>& devices) override;
+ const std::vector<AudioDevice>& devices) override;
binder::Status addDevicesRoleForCapturePreset(
- media::AudioSourceType audioSource,
+ AudioSource audioSource,
media::DeviceRole role,
- const std::vector<media::AudioDevice>& devices) override;
+ const std::vector<AudioDevice>& devices) override;
binder::Status removeDevicesRoleForCapturePreset(
- media::AudioSourceType audioSource,
+ AudioSource audioSource,
media::DeviceRole role,
- const std::vector<media::AudioDevice>& devices) override;
- binder::Status clearDevicesRoleForCapturePreset(media::AudioSourceType audioSource,
+ const std::vector<AudioDevice>& devices) override;
+ binder::Status clearDevicesRoleForCapturePreset(AudioSource audioSource,
media::DeviceRole role) override;
binder::Status getDevicesForRoleAndCapturePreset(
- media::AudioSourceType audioSource,
+ AudioSource audioSource,
media::DeviceRole role,
- std::vector<media::AudioDevice>* _aidl_return) override;
+ std::vector<AudioDevice>* _aidl_return) override;
binder::Status registerSoundTriggerCaptureStateListener(
const sp<media::ICaptureStateListener>& listener, bool* _aidl_return) override;
- virtual status_t onTransact(
- uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags);
+ binder::Status getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+ media::GetSpatializerResponse* _aidl_return) override;
+ binder::Status canBeSpatialized(
+ const std::optional<media::AudioAttributesInternal>& attr,
+ const std::optional<AudioConfig>& config,
+ const std::vector<AudioDevice>& devices,
+ bool* _aidl_return) override;
+
+ status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -313,6 +338,16 @@
void onRoutingUpdated();
void doOnRoutingUpdated();
+ /**
+ * Spatializer SpatializerPolicyCallback implementation.
+ * onCheckSpatializer() sends an event on mOutputCommandThread which executes
+ * doOnCheckSpatializer() to check if a Spatializer output must be opened or closed
+ * by audio policy manager and attach/detach the spatializer effect accordingly.
+ */
+ void onCheckSpatializer() override;
+ void onCheckSpatializer_l();
+ void doOnCheckSpatializer();
+
void setEffectSuspended(int effectId,
audio_session_t sessionId,
bool suspended);
@@ -350,8 +385,9 @@
app_state_t apmStatFromAmState(int amState);
bool isSupportedSystemUsage(audio_usage_t usage);
- status_t validateUsage(audio_usage_t usage);
- status_t validateUsage(audio_usage_t usage, const AttributionSourceState& attributionSource);
+ status_t validateUsage(const audio_attributes_t& attr);
+ status_t validateUsage(const audio_attributes_t& attr,
+ const AttributionSourceState& attributionSource);
void updateUidStates();
void updateUidStates_l() REQUIRES(mLock);
@@ -483,7 +519,8 @@
SET_EFFECT_SUSPENDED,
AUDIO_MODULES_UPDATE,
ROUTING_UPDATED,
- UPDATE_UID_STATES
+ UPDATE_UID_STATES,
+ CHECK_SPATIALIZER
};
AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
@@ -532,6 +569,7 @@
void audioModulesUpdateCommand();
void routingChangedCommand();
void updateUidStatesCommand();
+ void checkSpatializerCommand();
void insertCommand_l(AudioCommand *command, int delayMs = 0);
private:
class AudioCommandData;
@@ -667,7 +705,8 @@
// The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags);
@@ -985,6 +1024,8 @@
CaptureStateNotifier mCaptureStateNotifier;
+ sp<Spatializer> mSpatializer;
+
void *mLibraryHandle = nullptr;
CreateAudioPolicyManagerInstance mCreateAudioPolicyManager;
DestroyAudioPolicyManagerInstance mDestroyAudioPolicyManager;
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
new file mode 100644
index 0000000..0fdbe20
--- /dev/null
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -0,0 +1,746 @@
+/*
+**
+** Copyright 2021, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#define LOG_TAG "Spatializer"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <android/content/AttributionSourceState.h>
+#include <audio_utils/fixedfft.h>
+#include <cutils/bitops.h>
+#include <hardware/sensors.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/ShmemCompat.h>
+#include <mediautils/ServiceUtilities.h>
+#include <utils/Thread.h>
+
+#include "Spatializer.h"
+
+namespace android {
+
+using aidl_utils::statusTFromBinderStatus;
+using aidl_utils::binderStatusFromStatusT;
+using android::content::AttributionSourceState;
+using binder::Status;
+using media::HeadTrackingMode;
+using media::Pose3f;
+using media::SpatializationLevel;
+using media::SpatializationMode;
+using media::SpatializerHeadTrackingMode;
+using media::SensorPoseProvider;
+
+using namespace std::chrono_literals;
+
+#define VALUE_OR_RETURN_BINDER_STATUS(x) \
+ ({ auto _tmp = (x); \
+ if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
+ std::move(_tmp.value()); })
+
+// ---------------------------------------------------------------------------
+
+class Spatializer::EngineCallbackHandler : public AHandler {
+public:
+ EngineCallbackHandler(wp<Spatializer> spatializer)
+ : mSpatializer(spatializer) {
+ }
+
+ enum {
+ // Device state callbacks
+ kWhatOnFramesProcessed, // AudioEffect::EVENT_FRAMES_PROCESSED
+ kWhatOnHeadToStagePose, // SpatializerPoseController::Listener::onHeadToStagePose
+ kWhatOnActualModeChange, // SpatializerPoseController::Listener::onActualModeChange
+ };
+ static constexpr const char *kNumFramesKey = "numFrames";
+ static constexpr const char *kModeKey = "mode";
+ static constexpr const char *kTranslation0Key = "translation0";
+ static constexpr const char *kTranslation1Key = "translation1";
+ static constexpr const char *kTranslation2Key = "translation2";
+ static constexpr const char *kRotation0Key = "rotation0";
+ static constexpr const char *kRotation1Key = "rotation1";
+ static constexpr const char *kRotation2Key = "rotation2";
+
+ void onMessageReceived(const sp<AMessage> &msg) override {
+ switch (msg->what()) {
+ case kWhatOnFramesProcessed: {
+ sp<Spatializer> spatializer = mSpatializer.promote();
+ if (spatializer == nullptr) {
+ ALOGW("%s: Cannot promote spatializer", __func__);
+ return;
+ }
+ int numFrames;
+ if (!msg->findInt32(kNumFramesKey, &numFrames)) {
+ ALOGE("%s: Cannot find num frames!", __func__);
+ return;
+ }
+ if (numFrames > 0) {
+ spatializer->calculateHeadPose();
+ }
+ } break;
+ case kWhatOnHeadToStagePose: {
+ sp<Spatializer> spatializer = mSpatializer.promote();
+ if (spatializer == nullptr) {
+ ALOGW("%s: Cannot promote spatializer", __func__);
+ return;
+ }
+ std::vector<float> headToStage(sHeadPoseKeys.size());
+ for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+ if (!msg->findFloat(sHeadPoseKeys[i], &headToStage[i])) {
+ ALOGE("%s: Cannot find kTranslation0Key!", __func__);
+ return;
+ }
+ }
+ spatializer->onHeadToStagePoseMsg(headToStage);
+ } break;
+ case kWhatOnActualModeChange: {
+ sp<Spatializer> spatializer = mSpatializer.promote();
+ if (spatializer == nullptr) {
+ ALOGW("%s: Cannot promote spatializer", __func__);
+ return;
+ }
+ int mode;
+ if (!msg->findInt32(EngineCallbackHandler::kModeKey, &mode)) {
+ ALOGE("%s: Cannot find actualMode!", __func__);
+ return;
+ }
+ spatializer->onActualModeChangeMsg(static_cast<HeadTrackingMode>(mode));
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("Invalid callback message %d", msg->what());
+ }
+ }
+private:
+ wp<Spatializer> mSpatializer;
+};
+
+const std::vector<const char *> Spatializer::sHeadPoseKeys = {
+ Spatializer::EngineCallbackHandler::kTranslation0Key,
+ Spatializer::EngineCallbackHandler::kTranslation1Key,
+ Spatializer::EngineCallbackHandler::kTranslation2Key,
+ Spatializer::EngineCallbackHandler::kRotation0Key,
+ Spatializer::EngineCallbackHandler::kRotation1Key,
+ Spatializer::EngineCallbackHandler::kRotation2Key,
+};
+
+// ---------------------------------------------------------------------------
+sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) {
+ sp<Spatializer> spatializer;
+
+ sp<EffectsFactoryHalInterface> effectsFactoryHal = EffectsFactoryHalInterface::create();
+ if (effectsFactoryHal == nullptr) {
+ ALOGW("%s failed to create effect factory interface", __func__);
+ return spatializer;
+ }
+
+ std::vector<effect_descriptor_t> descriptors;
+ status_t status =
+ effectsFactoryHal->getDescriptors(FX_IID_SPATIALIZER, &descriptors);
+ if (status != NO_ERROR) {
+ ALOGW("%s failed to get spatializer descriptor, error %d", __func__, status);
+ return spatializer;
+ }
+ ALOG_ASSERT(!descriptors.empty(),
+ "%s getDescriptors() returned no error but empty list", __func__);
+
+ //TODO: get supported spatialization modes from FX engine or descriptor
+
+ sp<EffectHalInterface> effect;
+ status = effectsFactoryHal->createEffect(&descriptors[0].uuid, AUDIO_SESSION_OUTPUT_STAGE,
+ AUDIO_IO_HANDLE_NONE, AUDIO_PORT_HANDLE_NONE, &effect);
+ ALOGI("%s FX create status %d effect %p", __func__, status, effect.get());
+
+ if (status == NO_ERROR && effect != nullptr) {
+ spatializer = new Spatializer(descriptors[0], callback);
+ if (spatializer->loadEngineConfiguration(effect) != NO_ERROR) {
+ spatializer.clear();
+ }
+ }
+
+ return spatializer;
+}
+
+Spatializer::Spatializer(effect_descriptor_t engineDescriptor, SpatializerPolicyCallback* callback)
+ : mEngineDescriptor(engineDescriptor),
+ mPolicyCallback(callback) {
+ ALOGV("%s", __func__);
+}
+
+void Spatializer::onFirstRef() {
+ mLooper = new ALooper;
+ mLooper->setName("Spatializer-looper");
+ mLooper->start(
+ /*runOnCallingThread*/false,
+ /*canCallJava*/ false,
+ PRIORITY_AUDIO);
+
+ mHandler = new EngineCallbackHandler(this);
+ mLooper->registerHandler(mHandler);
+}
+
+Spatializer::~Spatializer() {
+ ALOGV("%s", __func__);
+ if (mLooper != nullptr) {
+ mLooper->stop();
+ mLooper->unregisterHandler(mHandler->id());
+ }
+ mLooper.clear();
+ mHandler.clear();
+}
+
+status_t Spatializer::loadEngineConfiguration(sp<EffectHalInterface> effect) {
+ ALOGV("%s", __func__);
+
+ std::vector<bool> supportsHeadTracking;
+ status_t status = getHalParameter<false>(effect, SPATIALIZER_PARAM_HEADTRACKING_SUPPORTED,
+ &supportsHeadTracking);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ mSupportsHeadTracking = supportsHeadTracking[0];
+
+ status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_LEVELS, &mLevels);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_SPATIALIZATION_MODES,
+ &mSpatializationModes);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_CHANNEL_MASKS,
+ &mChannelMasks);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return NO_ERROR;
+}
+
+/** Gets the channel mask, sampling rate and format set for the spatializer input. */
+audio_config_base_t Spatializer::getAudioInConfig() const {
+ std::lock_guard lock(mLock);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ // For now use highest supported channel count
+ uint32_t maxCount = 0;
+ for ( auto mask : mChannelMasks) {
+ if (audio_channel_count_from_out_mask(mask) > maxCount) {
+ config.channel_mask = mask;
+ }
+ }
+ return config;
+}
+
+status_t Spatializer::registerCallback(
+ const sp<media::INativeSpatializerCallback>& callback) {
+ std::lock_guard lock(mLock);
+ if (callback == nullptr) {
+ return BAD_VALUE;
+ }
+
+ sp<IBinder> binder = IInterface::asBinder(callback);
+ status_t status = binder->linkToDeath(this);
+ if (status == NO_ERROR) {
+ mSpatializerCallback = callback;
+ }
+ ALOGV("%s status %d", __func__, status);
+ return status;
+}
+
+// IBinder::DeathRecipient
+void Spatializer::binderDied(__unused const wp<IBinder> &who) {
+ {
+ std::lock_guard lock(mLock);
+ mLevel = SpatializationLevel::NONE;
+ mSpatializerCallback.clear();
+ }
+ ALOGV("%s", __func__);
+ mPolicyCallback->onCheckSpatializer();
+}
+
+// ISpatializer
+Status Spatializer::getSupportedLevels(std::vector<SpatializationLevel> *levels) {
+ ALOGV("%s", __func__);
+ if (levels == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ levels->push_back(SpatializationLevel::NONE);
+ levels->insert(levels->end(), mLevels.begin(), mLevels.end());
+ return Status::ok();
+}
+
+Status Spatializer::setLevel(SpatializationLevel level) {
+ ALOGV("%s level %d", __func__, (int)level);
+ if (level != SpatializationLevel::NONE
+ && std::find(mLevels.begin(), mLevels.end(), level) == mLevels.end()) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ sp<media::INativeSpatializerCallback> callback;
+ bool levelChanged = false;
+ {
+ std::lock_guard lock(mLock);
+ levelChanged = mLevel != level;
+ mLevel = level;
+ callback = mSpatializerCallback;
+
+ if (levelChanged && mEngine != nullptr) {
+ setEffectParameter_l(SPATIALIZER_PARAM_LEVEL, std::vector<SpatializationLevel>{level});
+ }
+ }
+
+ if (levelChanged) {
+ mPolicyCallback->onCheckSpatializer();
+ if (callback != nullptr) {
+ callback->onLevelChanged(level);
+ }
+ }
+ return Status::ok();
+}
+
+Status Spatializer::getLevel(SpatializationLevel *level) {
+ if (level == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *level = mLevel;
+ ALOGV("%s level %d", __func__, (int)*level);
+ return Status::ok();
+}
+
+Status Spatializer::isHeadTrackingSupported(bool *supports) {
+ ALOGV("%s mSupportsHeadTracking %d", __func__, mSupportsHeadTracking);
+ if (supports == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *supports = mSupportsHeadTracking;
+ return Status::ok();
+}
+
+Status Spatializer::getSupportedHeadTrackingModes(
+ std::vector<SpatializerHeadTrackingMode>* modes) {
+ std::lock_guard lock(mLock);
+ ALOGV("%s", __func__);
+ if (modes == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+
+ modes->push_back(SpatializerHeadTrackingMode::DISABLED);
+ if (mSupportsHeadTracking) {
+ if (mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
+ modes->push_back(SpatializerHeadTrackingMode::RELATIVE_WORLD);
+ if (mScreenSensor != SpatializerPoseController::INVALID_SENSOR) {
+ modes->push_back(SpatializerHeadTrackingMode::RELATIVE_SCREEN);
+ }
+ }
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode) {
+ ALOGV("%s mode %d", __func__, (int)mode);
+
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ switch (mode) {
+ case SpatializerHeadTrackingMode::OTHER:
+ return binderStatusFromStatusT(BAD_VALUE);
+ case SpatializerHeadTrackingMode::DISABLED:
+ mDesiredHeadTrackingMode = HeadTrackingMode::STATIC;
+ break;
+ case SpatializerHeadTrackingMode::RELATIVE_WORLD:
+ mDesiredHeadTrackingMode = HeadTrackingMode::WORLD_RELATIVE;
+ break;
+ case SpatializerHeadTrackingMode::RELATIVE_SCREEN:
+ mDesiredHeadTrackingMode = HeadTrackingMode::SCREEN_RELATIVE;
+ break;
+ }
+
+ if (mPoseController != nullptr) {
+ mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+ }
+
+ return Status::ok();
+}
+
+Status Spatializer::getActualHeadTrackingMode(SpatializerHeadTrackingMode *mode) {
+ if (mode == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *mode = mActualHeadTrackingMode;
+ ALOGV("%s mode %d", __func__, (int)*mode);
+ return Status::ok();
+}
+
+Status Spatializer::recenterHeadTracker() {
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ if (mPoseController != nullptr) {
+ mPoseController->recenter();
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setGlobalTransform(const std::vector<float>& screenToStage) {
+ ALOGV("%s", __func__);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::optional<Pose3f> maybePose = Pose3f::fromVector(screenToStage);
+ if (!maybePose.has_value()) {
+ ALOGW("Invalid screenToStage vector.");
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ if (mPoseController != nullptr) {
+ mPoseController->setScreenToStagePose(maybePose.value());
+ }
+ return Status::ok();
+}
+
+Status Spatializer::release() {
+ ALOGV("%s", __func__);
+ bool levelChanged = false;
+ {
+ std::lock_guard lock(mLock);
+ if (mSpatializerCallback == nullptr) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+
+ sp<IBinder> binder = IInterface::asBinder(mSpatializerCallback);
+ binder->unlinkToDeath(this);
+ mSpatializerCallback.clear();
+
+ levelChanged = mLevel != SpatializationLevel::NONE;
+ mLevel = SpatializationLevel::NONE;
+ }
+
+ if (levelChanged) {
+ mPolicyCallback->onCheckSpatializer();
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setHeadSensor(int sensorHandle) {
+ ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ mHeadSensor = sensorHandle;
+ if (mPoseController != nullptr) {
+ mPoseController->setHeadSensor(mHeadSensor);
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setScreenSensor(int sensorHandle) {
+ ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ mScreenSensor = sensorHandle;
+ if (mPoseController != nullptr) {
+ mPoseController->setScreenSensor(mScreenSensor);
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setDisplayOrientation(float physicalToLogicalAngle) {
+ ALOGV("%s physicalToLogicalAngle %f", __func__, physicalToLogicalAngle);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ mDisplayOrientation = physicalToLogicalAngle;
+ if (mPoseController != nullptr) {
+ mPoseController->setDisplayOrientation(mDisplayOrientation);
+ }
+ if (mEngine != nullptr) {
+ setEffectParameter_l(
+ SPATIALIZER_PARAM_DISPLAY_ORIENTATION, std::vector<float>{physicalToLogicalAngle});
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setHingeAngle(float hingeAngle) {
+ std::lock_guard lock(mLock);
+ ALOGV("%s hingeAngle %f", __func__, hingeAngle);
+ if (mEngine != nullptr) {
+ setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{hingeAngle});
+ }
+ return Status::ok();
+}
+
+Status Spatializer::getSupportedModes(std::vector<SpatializationMode> *modes) {
+ ALOGV("%s", __func__);
+ if (modes == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ *modes = mSpatializationModes;
+ return Status::ok();
+}
+
+Status Spatializer::registerHeadTrackingCallback(
+ const sp<media::ISpatializerHeadTrackingCallback>& callback) {
+ ALOGV("%s callback %p", __func__, callback.get());
+ std::lock_guard lock(mLock);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ mHeadTrackingCallback = callback;
+ return Status::ok();
+}
+
+Status Spatializer::setParameter(int key, const std::vector<unsigned char>& value) {
+ ALOGV("%s key %d", __func__, key);
+ std::lock_guard lock(mLock);
+ status_t status = INVALID_OPERATION;
+ if (mEngine != nullptr) {
+ status = setEffectParameter_l(key, value);
+ }
+ return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getParameter(int key, std::vector<unsigned char> *value) {
+ ALOGV("%s key %d value size %d", __func__, key,
+ (value != nullptr ? (int)value->size() : -1));
+ if (value == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ status_t status = INVALID_OPERATION;
+ if (mEngine != nullptr) {
+ ALOGV("%s key %d mEngine %p", __func__, key, mEngine.get());
+ status = getEffectParameter_l(key, value);
+ }
+ return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getOutput(int *output) {
+ ALOGV("%s", __func__);
+ if (output == nullptr) {
+ binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *output = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_io_handle_t_int32_t(mOutput));
+ ALOGV("%s got output %d", __func__, *output);
+ return Status::ok();
+}
+
+// SpatializerPoseController::Listener
+void Spatializer::onHeadToStagePose(const Pose3f& headToStage) {
+ ALOGV("%s", __func__);
+ LOG_ALWAYS_FATAL_IF(!mSupportsHeadTracking,
+ "onHeadToStagePose() called with no head tracking support!");
+
+ auto vec = headToStage.toVector();
+ LOG_ALWAYS_FATAL_IF(vec.size() != sHeadPoseKeys.size(),
+ "%s invalid head to stage vector size %zu", __func__, vec.size());
+
+ sp<AMessage> msg =
+ new AMessage(EngineCallbackHandler::kWhatOnHeadToStagePose, mHandler);
+ for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+ msg->setFloat(sHeadPoseKeys[i], vec[i]);
+ }
+ msg->post();
+}
+
+void Spatializer::onHeadToStagePoseMsg(const std::vector<float>& headToStage) {
+ ALOGV("%s", __func__);
+ sp<media::ISpatializerHeadTrackingCallback> callback;
+ {
+ std::lock_guard lock(mLock);
+ callback = mHeadTrackingCallback;
+ if (mEngine != nullptr) {
+ setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
+ }
+ }
+
+ if (callback != nullptr) {
+ callback->onHeadToSoundStagePoseUpdated(headToStage);
+ }
+}
+
+void Spatializer::onActualModeChange(HeadTrackingMode mode) {
+ ALOGV("%s(%d)", __func__, (int)mode);
+ sp<AMessage> msg =
+ new AMessage(EngineCallbackHandler::kWhatOnActualModeChange, mHandler);
+ msg->setInt32(EngineCallbackHandler::kModeKey, static_cast<int>(mode));
+ msg->post();
+}
+
+void Spatializer::onActualModeChangeMsg(HeadTrackingMode mode) {
+ ALOGV("%s(%d)", __func__, (int) mode);
+ sp<media::ISpatializerHeadTrackingCallback> callback;
+ SpatializerHeadTrackingMode spatializerMode;
+ {
+ std::lock_guard lock(mLock);
+ if (!mSupportsHeadTracking) {
+ spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+ } else {
+ switch (mode) {
+ case HeadTrackingMode::STATIC:
+ spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+ break;
+ case HeadTrackingMode::WORLD_RELATIVE:
+ spatializerMode = SpatializerHeadTrackingMode::RELATIVE_WORLD;
+ break;
+ case HeadTrackingMode::SCREEN_RELATIVE:
+ spatializerMode = SpatializerHeadTrackingMode::RELATIVE_SCREEN;
+ break;
+ default:
+ LOG_ALWAYS_FATAL("Unknown mode: %d", mode);
+ }
+ }
+ mActualHeadTrackingMode = spatializerMode;
+ callback = mHeadTrackingCallback;
+ }
+ if (callback != nullptr) {
+ callback->onHeadTrackingModeChanged(spatializerMode);
+ }
+}
+
+status_t Spatializer::attachOutput(audio_io_handle_t output) {
+ std::shared_ptr<SpatializerPoseController> poseController;
+ bool outputChanged = false;
+ sp<media::INativeSpatializerCallback> callback;
+
+ {
+ std::lock_guard lock(mLock);
+ ALOGV("%s output %d mOutput %d", __func__, (int)output, (int)mOutput);
+ if (mOutput != AUDIO_IO_HANDLE_NONE) {
+ LOG_ALWAYS_FATAL_IF(mEngine == nullptr, "%s output set without FX engine", __func__);
+ // remove FX instance
+ mEngine->setEnabled(false);
+ mEngine.clear();
+ }
+ // create FX instance on output
+ AttributionSourceState attributionSource = AttributionSourceState();
+ mEngine = new AudioEffect(attributionSource);
+ mEngine->set(nullptr, &mEngineDescriptor.uuid, 0, Spatializer::engineCallback /* cbf */,
+ this /* user */, AUDIO_SESSION_OUTPUT_STAGE, output, {} /* device */,
+ false /* probe */, true /* notifyFramesProcessed */);
+ status_t status = mEngine->initCheck();
+ ALOGV("%s mEngine create status %d", __func__, (int)status);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ setEffectParameter_l(SPATIALIZER_PARAM_LEVEL,
+ std::vector<SpatializationLevel>{mLevel});
+ setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
+ std::vector<SpatializerHeadTrackingMode>{mActualHeadTrackingMode});
+
+ mEngine->setEnabled(true);
+ outputChanged = mOutput != output;
+ mOutput = output;
+
+ if (mSupportsHeadTracking) {
+ mPoseController = std::make_shared<SpatializerPoseController>(
+ static_cast<SpatializerPoseController::Listener*>(this), 10ms, 50ms);
+ LOG_ALWAYS_FATAL_IF(mPoseController == nullptr,
+ "%s could not allocate pose controller", __func__);
+
+ mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+ mPoseController->setHeadSensor(mHeadSensor);
+ mPoseController->setScreenSensor(mScreenSensor);
+ mPoseController->setDisplayOrientation(mDisplayOrientation);
+ poseController = mPoseController;
+ }
+ callback = mSpatializerCallback;
+ }
+ if (poseController != nullptr) {
+ poseController->waitUntilCalculated();
+ }
+
+ if (outputChanged && callback != nullptr) {
+ callback->onOutputChanged(output);
+ }
+
+ return NO_ERROR;
+}
+
+audio_io_handle_t Spatializer::detachOutput() {
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ sp<media::INativeSpatializerCallback> callback;
+
+ {
+ std::lock_guard lock(mLock);
+ ALOGV("%s mOutput %d", __func__, (int)mOutput);
+ if (mOutput == AUDIO_IO_HANDLE_NONE) {
+ return output;
+ }
+ // remove FX instance
+ mEngine->setEnabled(false);
+ mEngine.clear();
+ output = mOutput;
+ mOutput = AUDIO_IO_HANDLE_NONE;
+ mPoseController.reset();
+
+ callback = mSpatializerCallback;
+ }
+
+ if (callback != nullptr) {
+ callback->onOutputChanged(AUDIO_IO_HANDLE_NONE);
+ }
+ return output;
+}
+
+void Spatializer::calculateHeadPose() {
+ ALOGV("%s", __func__);
+ std::lock_guard lock(mLock);
+ if (mPoseController != nullptr) {
+ mPoseController->calculateAsync();
+ }
+}
+
+void Spatializer::engineCallback(int32_t event, void *user, void *info) {
+ if (user == nullptr) {
+ return;
+ }
+ Spatializer* const me = reinterpret_cast<Spatializer *>(user);
+ switch (event) {
+ case AudioEffect::EVENT_FRAMES_PROCESSED: {
+ int frames = info == nullptr ? 0 : *(int*)info;
+ ALOGD("%s frames processed %d for me %p", __func__, frames, me);
+ me->postFramesProcessedMsg(frames);
+ } break;
+ default:
+ ALOGD("%s event %d", __func__, event);
+ break;
+ }
+}
+
+void Spatializer::postFramesProcessedMsg(int frames) {
+ sp<AMessage> msg =
+ new AMessage(EngineCallbackHandler::kWhatOnFramesProcessed, mHandler);
+ msg->setInt32(EngineCallbackHandler::kNumFramesKey, frames);
+ msg->post();
+}
+
+} // namespace android
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
new file mode 100644
index 0000000..4d77b78
--- /dev/null
+++ b/services/audiopolicy/service/Spatializer.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SPATIALIZER_H
+#define ANDROID_MEDIA_SPATIALIZER_H
+
+#include <android/media/BnEffect.h>
+#include <android/media/BnSpatializer.h>
+#include <android/media/SpatializationLevel.h>
+#include <android/media/SpatializationMode.h>
+#include <android/media/SpatializerHeadTrackingMode.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/AudioEffect.h>
+#include <system/audio_effects/effect_spatializer.h>
+
+#include "SpatializerPoseController.h"
+
+namespace android {
+
+
+// ----------------------------------------------------------------------------
+
+/**
+ * A callback interface from the Spatializer object or its parent AudioPolicyService.
+ * This is implemented by the audio policy service hosting the Spatializer to perform
+ * actions needed when a state change inside the Spatializer requires some audio system
+ * changes that cannot be performed by the Spatializer. For instance opening or closing a
+ * spatializer output stream when the spatializer is enabled or disabled
+ */
+class SpatializerPolicyCallback {
+public:
+ /** Called when a stage change occurs that requires the parent audio policy service to take
+ * some action.
+ */
+ virtual void onCheckSpatializer() = 0;
+
+ virtual ~SpatializerPolicyCallback() = default;
+};
+/**
+ * The Spatializer class implements all functional controlling the multichannel spatializer
+ * with head tracking implementation in the native audio service: audio policy and audio flinger.
+ * It presents an AIDL interface available to the java audio service to discover the availability
+ * of the feature and options, control its state and register an active head tracking sensor.
+ * It maintains the current state of the platform spatializer and applies the stored parameters
+ * when the spatializer engine is created and enabled.
+ * Based on the requested spatializer level, it will request the creation of a specialized output
+ * mixer to the audio policy service which will in turn notify the Spatializer of the output
+ * stream on which a spatializer engine should be created, configured and enabled.
+ * The spatializer also hosts the head tracking management logic. This logic receives the
+ * desired head tracking mode and selected head tracking sensor, registers a sensor event listener
+ * and derives the compounded head pose information to the spatializer engine.
+ *
+ * Workflow:
+ * - Initialization: when the audio policy service starts, it checks if a spatializer effect
+ * engine exists and if the audio policy manager reports a dedicated spatializer output profile.
+ * If both conditions are met, a Spatializer object is created
+ * - Capabilities discovery: AudioService will call AudioSystem::canBeSpatialized() and if true,
+ * acquire an ISpatializer interface with AudioSystem::getSpatializer(). This interface
+ * will be used to query the implementation capabilities and configure the spatializer.
+ * - Enabling: when ISpatializer::setLevel() sets a level different from NONE the spatializer
+ * is considered enabled. The audio policy callback onCheckSpatializer() is called. This
+ * triggers a request to audio policy manager to open a spatialization output stream and a
+ * spatializer mixer is created in audio flinger. When an output is returned by audio policy
+ * manager, Spatializer::attachOutput() is called which creates and enables the spatializer
+ * stage engine on the specified output.
+ * - Disabling: when the spatialization level is set to NONE, the spatializer is considered
+ * disabled. The audio policy callback onCheckSpatializer() is called. This triggers a call
+ * to Spatializer::detachOutput() and the spatializer engine is released. Then a request is
+ * made to audio policy manager to release and close the spatializer output stream and the
+ * spatializer mixer thread is destroyed.
+ */
+class Spatializer : public media::BnSpatializer,
+ public IBinder::DeathRecipient,
+ private SpatializerPoseController::Listener {
+ public:
+ static sp<Spatializer> create(SpatializerPolicyCallback *callback);
+
+ ~Spatializer() override;
+
+ /** RefBase */
+ void onFirstRef();
+
+ /** ISpatializer, see ISpatializer.aidl */
+ binder::Status release() override;
+ binder::Status getSupportedLevels(std::vector<media::SpatializationLevel>* levels) override;
+ binder::Status setLevel(media::SpatializationLevel level) override;
+ binder::Status getLevel(media::SpatializationLevel *level) override;
+ binder::Status isHeadTrackingSupported(bool *supports);
+ binder::Status getSupportedHeadTrackingModes(
+ std::vector<media::SpatializerHeadTrackingMode>* modes) override;
+ binder::Status setDesiredHeadTrackingMode(
+ media::SpatializerHeadTrackingMode mode) override;
+ binder::Status getActualHeadTrackingMode(
+ media::SpatializerHeadTrackingMode* mode) override;
+ binder::Status recenterHeadTracker() override;
+ binder::Status setGlobalTransform(const std::vector<float>& screenToStage) override;
+ binder::Status setHeadSensor(int sensorHandle) override;
+ binder::Status setScreenSensor(int sensorHandle) override;
+ binder::Status setDisplayOrientation(float physicalToLogicalAngle) override;
+ binder::Status setHingeAngle(float hingeAngle) override;
+ binder::Status getSupportedModes(std::vector<media::SpatializationMode>* modes) override;
+ binder::Status registerHeadTrackingCallback(
+ const sp<media::ISpatializerHeadTrackingCallback>& callback) override;
+ binder::Status setParameter(int key, const std::vector<unsigned char>& value) override;
+ binder::Status getParameter(int key, std::vector<unsigned char> *value) override;
+ binder::Status getOutput(int *output);
+
+ /** IBinder::DeathRecipient. Listen to the death of the INativeSpatializerCallback. */
+ virtual void binderDied(const wp<IBinder>& who);
+
+ /** Registers a INativeSpatializerCallback when a client is attached to this Spatializer
+ * by audio policy service.
+ */
+ status_t registerCallback(const sp<media::INativeSpatializerCallback>& callback);
+
+ status_t loadEngineConfiguration(sp<EffectHalInterface> effect);
+
+ /** Level getter for use by local classes. */
+ media::SpatializationLevel getLevel() const { std::lock_guard lock(mLock); return mLevel; }
+
+ /** Called by audio policy service when the special output mixer dedicated to spatialization
+ * is opened and the spatializer engine must be created.
+ */
+ status_t attachOutput(audio_io_handle_t output);
+ /** Called by audio policy service when the special output mixer dedicated to spatialization
+ * is closed and the spatializer engine must be release.
+ */
+ audio_io_handle_t detachOutput();
+ /** Returns the output stream the spatializer is attached to. */
+ audio_io_handle_t getOutput() const { std::lock_guard lock(mLock); return mOutput; }
+
+ /** Gets the channel mask, sampling rate and format set for the spatializer input. */
+ audio_config_base_t getAudioInConfig() const;
+
+ void calculateHeadPose();
+
+private:
+ Spatializer(effect_descriptor_t engineDescriptor,
+ SpatializerPolicyCallback *callback);
+
+ static void engineCallback(int32_t event, void* user, void *info);
+
+ // From VirtualizerStageController::Listener
+ void onHeadToStagePose(const media::Pose3f& headToStage) override;
+ void onActualModeChange(media::HeadTrackingMode mode) override;
+
+ void onHeadToStagePoseMsg(const std::vector<float>& headToStage);
+ void onActualModeChangeMsg(media::HeadTrackingMode mode);
+
+ static constexpr int kMaxEffectParamValues = 10;
+ /**
+ * Get a parameter from spatializer engine by calling the effect HAL command method directly.
+ * To be used when the engine instance mEngine is not yet created in the effect framework.
+ * When MULTI_VALUES is false, the expected reply is only one value of type T.
+ * When MULTI_VALUES is true, the expected reply is made of a number (of type T) indicating
+ * how many values are returned, followed by this number for values of type T.
+ */
+ template<bool MULTI_VALUES, typename T>
+ status_t getHalParameter(sp<EffectHalInterface> effect, uint32_t type,
+ std::vector<T> *values) {
+ static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+ uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1];
+ uint32_t reply[sizeof(effect_param_t) / sizeof(uint32_t) + 2 + kMaxEffectParamValues];
+
+ effect_param_t *p = (effect_param_t *)cmd;
+ p->psize = sizeof(uint32_t);
+ if (MULTI_VALUES) {
+ p->vsize = (kMaxEffectParamValues + 1) * sizeof(T);
+ } else {
+ p->vsize = sizeof(T);
+ }
+ *(uint32_t *)p->data = type;
+ uint32_t replySize = sizeof(effect_param_t) + p->psize + p->vsize;
+
+ status_t status = effect->command(EFFECT_CMD_GET_PARAM,
+ sizeof(effect_param_t) + sizeof(uint32_t), cmd,
+ &replySize, reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ if (p->status != NO_ERROR) {
+ return p->status;
+ }
+ if (replySize <
+ sizeof(effect_param_t) + sizeof(uint32_t) + (MULTI_VALUES ? 2 : 1) * sizeof(T)) {
+ return BAD_VALUE;
+ }
+
+ T *params = (T *)((uint8_t *)reply + sizeof(effect_param_t) + sizeof(uint32_t));
+ int numParams = 1;
+ if (MULTI_VALUES) {
+ numParams = (int)*params++;
+ }
+ if (numParams > kMaxEffectParamValues) {
+ return BAD_VALUE;
+ }
+ (*values).clear();
+ std::copy(¶ms[0], ¶ms[numParams], back_inserter(*values));
+ return NO_ERROR;
+ }
+
+ /**
+ * Set a parameter to spatializer engine by calling setParameter on mEngine AudioEffect object.
+ * It is possible to pass more than one value of type T according to the parameter type
+ * according to values vector size.
+ */
+ template<typename T>
+ status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mLock) {
+ static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+ uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values.size()];
+ effect_param_t *p = (effect_param_t *)cmd;
+ p->psize = sizeof(uint32_t);
+ p->vsize = sizeof(T) * values.size();
+ *(uint32_t *)p->data = type;
+ memcpy((uint32_t *)p->data + 1, values.data(), sizeof(T) * values.size());
+
+ status_t status = mEngine->setParameter(p);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ if (p->status != NO_ERROR) {
+ return p->status;
+ }
+ return NO_ERROR;
+ }
+
+ /**
+ * Get a parameter from spatializer engine by calling getParameter on AudioEffect object.
+ * It is possible to read more than one value of type T according to the parameter type
+ * by specifying values vector size.
+ */
+ template<typename T>
+ status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mLock) {
+ static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+ uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values->size()];
+ effect_param_t *p = (effect_param_t *)cmd;
+ p->psize = sizeof(uint32_t);
+ p->vsize = sizeof(T) * values->size();
+ *(uint32_t *)p->data = type;
+
+ status_t status = mEngine->getParameter(p);
+
+ if (status != NO_ERROR) {
+ return status;
+ }
+ if (p->status != NO_ERROR) {
+ return p->status;
+ }
+
+ int numValues = std::min(p->vsize / sizeof(T), values->size());
+ (*values).clear();
+ T *retValues = (T *)((uint8_t *)p->data + sizeof(uint32_t));
+ std::copy(&retValues[0], &retValues[numValues], back_inserter(*values));
+
+ return NO_ERROR;
+ }
+
+ void postFramesProcessedMsg(int frames);
+
+ /** Effect engine descriptor */
+ const effect_descriptor_t mEngineDescriptor;
+ /** Callback interface to parent audio policy service */
+ SpatializerPolicyCallback* mPolicyCallback;
+
+ /** Mutex protecting internal state */
+ mutable std::mutex mLock;
+
+ /** Client AudioEffect for the engine */
+ sp<AudioEffect> mEngine GUARDED_BY(mLock);
+ /** Output stream the spatializer mixer thread is attached to */
+ audio_io_handle_t mOutput GUARDED_BY(mLock) = AUDIO_IO_HANDLE_NONE;
+
+ /** Callback interface to the client (AudioService) controlling this`Spatializer */
+ sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mLock);
+
+ /** Callback interface for head tracking */
+ sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mLock);
+
+ /** Requested spatialization level */
+ media::SpatializationLevel mLevel GUARDED_BY(mLock) = media::SpatializationLevel::NONE;
+
+ /** Control logic for head-tracking, etc. */
+ std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mLock);
+
+ /** Last requested head tracking mode */
+ media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mLock)
+ = media::HeadTrackingMode::STATIC;
+
+ /** Last-reported actual head-tracking mode. */
+ media::SpatializerHeadTrackingMode mActualHeadTrackingMode GUARDED_BY(mLock)
+ = media::SpatializerHeadTrackingMode::DISABLED;
+
+ /** Selected Head pose sensor */
+ int32_t mHeadSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+ /** Selected Screen pose sensor */
+ int32_t mScreenSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+ /** Last display orientation received */
+ static constexpr float kDisplayOrientationInvalid = 1000;
+ float mDisplayOrientation GUARDED_BY(mLock) = kDisplayOrientationInvalid;
+
+ std::vector<media::SpatializationLevel> mLevels;
+ std::vector<media::SpatializationMode> mSpatializationModes;
+ std::vector<audio_channel_mask_t> mChannelMasks;
+ bool mSupportsHeadTracking;
+
+ // Looper thread for mEngine callbacks
+ class EngineCallbackHandler;
+
+ sp<ALooper> mLooper;
+ sp<EngineCallbackHandler> mHandler;
+
+ static const std::vector<const char *> sHeadPoseKeys;
+};
+
+
+}; // namespace android
+
+#endif // ANDROID_MEDIA_SPATIALIZER_H
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
new file mode 100644
index 0000000..ffedf63
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "SpatializerPoseController.h"
+
+#define LOG_TAG "SpatializerPoseController"
+//#define LOG_NDEBUG 0
+#include <sensor/Sensor.h>
+#include <utils/Log.h>
+#include <utils/SystemClock.h>
+
+namespace android {
+
+using media::createHeadTrackingProcessor;
+using media::HeadTrackingMode;
+using media::HeadTrackingProcessor;
+using media::Pose3f;
+using media::SensorPoseProvider;
+using media::Twist3f;
+
+using namespace std::chrono_literals;
+
+namespace {
+
+// This is how fast, in m/s, we allow position to shift during rate-limiting.
+constexpr auto kMaxTranslationalVelocity = 2;
+
+// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
+constexpr auto kMaxRotationalVelocity = 4 * M_PI;
+
+// This should be set to the typical time scale that the translation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kTranslationalDriftTimeConstant = 20s;
+
+// This should be set to the typical time scale that the rotation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kRotationalDriftTimeConstant = 20s;
+
+// This is how far into the future we predict the head pose, using linear extrapolation based on
+// twist (velocity). It should be set to a value that matches the characteristic durations of moving
+// one's head. The higher we set this, the more latency we are able to reduce, but setting this too
+// high will result in high prediction errors whenever the head accelerates (changes velocity).
+constexpr auto kPredictionDuration = 10ms;
+
+// After losing this many consecutive samples from either sensor, we would treat the measurement as
+// stale;
+constexpr auto kMaxLostSamples = 4;
+
+// Time units for system clock ticks. This is what the Sensor Framework timestamps represent and
+// what we use for pose filtering.
+using Ticks = std::chrono::nanoseconds;
+
+// How many ticks in a second.
+constexpr auto kTicksPerSecond = Ticks::period::den;
+
+} // namespace
+
+SpatializerPoseController::SpatializerPoseController(Listener* listener,
+ std::chrono::microseconds sensorPeriod,
+ std::chrono::microseconds maxUpdatePeriod)
+ : mListener(listener),
+ mSensorPeriod(sensorPeriod),
+ mProcessor(createHeadTrackingProcessor(HeadTrackingProcessor::Options{
+ .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
+ .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
+ .translationalDriftTimeConstant = Ticks(kTranslationalDriftTimeConstant).count(),
+ .rotationalDriftTimeConstant = Ticks(kRotationalDriftTimeConstant).count(),
+ .freshnessTimeout = Ticks(sensorPeriod * kMaxLostSamples).count(),
+ .predictionDuration = Ticks(kPredictionDuration).count(),
+ })),
+ mPoseProvider(SensorPoseProvider::create("headtracker", this)),
+ mThread([this, maxUpdatePeriod] {
+ while (true) {
+ Pose3f headToStage;
+ std::optional<HeadTrackingMode> modeIfChanged;
+ {
+ std::unique_lock lock(mMutex);
+ mCondVar.wait_for(lock, maxUpdatePeriod,
+ [this] { return mShouldExit || mShouldCalculate; });
+ if (mShouldExit) {
+ ALOGV("Exiting thread");
+ return;
+ }
+
+ // Calculate.
+ std::tie(headToStage, modeIfChanged) = calculate_l();
+ }
+
+ // Invoke the callbacks outside the lock.
+ mListener->onHeadToStagePose(headToStage);
+ if (modeIfChanged) {
+ mListener->onActualModeChange(modeIfChanged.value());
+ }
+
+ {
+ std::lock_guard lock(mMutex);
+ if (!mCalculated) {
+ mCalculated = true;
+ mCondVar.notify_all();
+ }
+ mShouldCalculate = false;
+ }
+ }
+ }) {}
+
+SpatializerPoseController::~SpatializerPoseController() {
+ {
+ std::unique_lock lock(mMutex);
+ mShouldExit = true;
+ mCondVar.notify_all();
+ }
+ mThread.join();
+}
+
+void SpatializerPoseController::setHeadSensor(int32_t sensor) {
+ std::lock_guard lock(mMutex);
+ // Stop current sensor, if valid and different from the other sensor.
+ if (mHeadSensor != INVALID_SENSOR && mHeadSensor != mScreenSensor) {
+ mPoseProvider->stopSensor(mHeadSensor);
+ }
+
+ if (sensor != INVALID_SENSOR) {
+ if (sensor != mScreenSensor) {
+ // Start new sensor.
+ mHeadSensor =
+ mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+ } else {
+ // Sensor is already enabled.
+ mHeadSensor = mScreenSensor;
+ }
+ } else {
+ mHeadSensor = INVALID_SENSOR;
+ }
+
+ mProcessor->recenter(true, false);
+}
+
+void SpatializerPoseController::setScreenSensor(int32_t sensor) {
+ std::lock_guard lock(mMutex);
+ // Stop current sensor, if valid and different from the other sensor.
+ if (mScreenSensor != INVALID_SENSOR && mScreenSensor != mHeadSensor) {
+ mPoseProvider->stopSensor(mScreenSensor);
+ }
+
+ if (sensor != INVALID_SENSOR) {
+ if (sensor != mHeadSensor) {
+ // Start new sensor.
+ mScreenSensor =
+ mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+ } else {
+ // Sensor is already enabled.
+ mScreenSensor = mHeadSensor;
+ }
+ } else {
+ mScreenSensor = INVALID_SENSOR;
+ }
+
+ mProcessor->recenter(false, true);
+}
+
+void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setDesiredMode(mode);
+}
+
+void SpatializerPoseController::setScreenToStagePose(const Pose3f& screenToStage) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setScreenToStagePose(screenToStage);
+}
+
+void SpatializerPoseController::setDisplayOrientation(float physicalToLogicalAngle) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setDisplayOrientation(physicalToLogicalAngle);
+}
+
+void SpatializerPoseController::calculateAsync() {
+ std::lock_guard lock(mMutex);
+ mShouldCalculate = true;
+ mCondVar.notify_all();
+}
+
+void SpatializerPoseController::waitUntilCalculated() {
+ std::unique_lock lock(mMutex);
+ mCondVar.wait(lock, [this] { return mCalculated; });
+}
+
+std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>>
+SpatializerPoseController::calculate_l() {
+ Pose3f headToStage;
+ HeadTrackingMode mode;
+ std::optional<media::HeadTrackingMode> modeIfChanged;
+
+ mProcessor->calculate(elapsedRealtimeNano());
+ headToStage = mProcessor->getHeadToStagePose();
+ mode = mProcessor->getActualMode();
+ if (!mActualMode.has_value() || mActualMode.value() != mode) {
+ mActualMode = mode;
+ modeIfChanged = mode;
+ }
+ return std::make_tuple(headToStage, modeIfChanged);
+}
+
+void SpatializerPoseController::recenter() {
+ std::lock_guard lock(mMutex);
+ mProcessor->recenter();
+}
+
+void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
+ const std::optional<Twist3f>& twist, bool isNewReference) {
+ std::lock_guard lock(mMutex);
+ if (sensor == mHeadSensor) {
+ mProcessor->setWorldToHeadPose(timestamp, pose, twist.value_or(Twist3f()));
+ if (isNewReference) {
+ mProcessor->recenter(true, false);
+ }
+ }
+ if (sensor == mScreenSensor) {
+ mProcessor->setWorldToScreenPose(timestamp, pose);
+ if (isNewReference) {
+ mProcessor->recenter(false, true);
+ }
+ }
+}
+
+} // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
new file mode 100644
index 0000000..2b5c189
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <condition_variable>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include <media/HeadTrackingProcessor.h>
+#include <media/SensorPoseProvider.h>
+
+namespace android {
+
+/**
+ * This class encapsulates the logic for pose processing, intended for driving a spatializer effect.
+ * This includes integration with the Sensor sub-system for retrieving sensor data, doing all the
+ * necessary processing, etc.
+ *
+ * Calculations happen on a dedicated thread and published to the client via the Listener interface.
+ * A calculation may be triggered in one of two ways:
+ * - By calling calculateAsync() - calculation will be kicked off in the background.
+ * - By setting a timeout in the ctor, a calculation will be triggered after the timeout elapsed
+ * from the last calculateAsync() call.
+ *
+ * This class is thread-safe.
+ */
+class SpatializerPoseController : private media::SensorPoseProvider::Listener {
+ public:
+ static constexpr int32_t INVALID_SENSOR = media::SensorPoseProvider::INVALID_HANDLE;
+
+ /**
+ * Listener interface for getting pose and mode updates.
+ * Methods will always be invoked from a designated thread.
+ */
+ class Listener {
+ public:
+ virtual ~Listener() = default;
+
+ virtual void onHeadToStagePose(const media::Pose3f&) = 0;
+ virtual void onActualModeChange(media::HeadTrackingMode) = 0;
+ };
+
+ /**
+ * Ctor.
+ * sensorPeriod determines how often to receive updates from the sensors (input rate).
+ * maxUpdatePeriod determines how often to produce an output when calculateAsync() isn't
+ * invoked.
+ */
+ SpatializerPoseController(Listener* listener, std::chrono::microseconds sensorPeriod,
+ std::chrono::microseconds maxUpdatePeriod);
+
+ /** Dtor. */
+ ~SpatializerPoseController();
+
+ /**
+ * Set the sensor that is to be used for head-tracking.
+ * INVALID_SENSOR can be used to disable head-tracking.
+ */
+ void setHeadSensor(int32_t sensor);
+
+ /**
+ * Set the sensor that is to be used for screen-tracking.
+ * INVALID_SENSOR can be used to disable screen-tracking.
+ */
+ void setScreenSensor(int32_t sensor);
+
+ /** Sets the desired head-tracking mode. */
+ void setDesiredMode(media::HeadTrackingMode mode);
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ void setScreenToStagePose(const media::Pose3f& screenToStage);
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ void setDisplayOrientation(float physicalToLogicalAngle);
+
+ /**
+ * This causes the current poses for both the head and screen to be considered "center".
+ */
+ void recenter();
+
+ /**
+ * This call triggers the recalculation of the output and the invocation of the relevant
+ * callbacks. This call is async and the callbacks will be triggered shortly after.
+ */
+ void calculateAsync();
+
+ /**
+ * Blocks until calculation and invocation of the respective callbacks has happened at least
+ * once. Do not call from within callbacks.
+ */
+ void waitUntilCalculated();
+
+ private:
+ mutable std::mutex mMutex;
+ Listener* const mListener;
+ const std::chrono::microseconds mSensorPeriod;
+ // Order matters for the following two members to ensure correct destruction.
+ std::unique_ptr<media::HeadTrackingProcessor> mProcessor;
+ std::unique_ptr<media::SensorPoseProvider> mPoseProvider;
+ int32_t mHeadSensor = media::SensorPoseProvider::INVALID_HANDLE;
+ int32_t mScreenSensor = media::SensorPoseProvider::INVALID_HANDLE;
+ std::optional<media::HeadTrackingMode> mActualMode;
+ std::thread mThread;
+ std::condition_variable mCondVar;
+ bool mShouldCalculate = true;
+ bool mShouldExit = false;
+ bool mCalculated = false;
+
+ void onPose(int64_t timestamp, int32_t sensor, const media::Pose3f& pose,
+ const std::optional<media::Twist3f>& twist, bool isNewReference) override;
+
+ /**
+ * Calculates the new outputs and updates internal state. Must be called with the lock held.
+ * Returns values that should be passed to the respective callbacks.
+ */
+ std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>> calculate_l();
+};
+
+} // namespace android
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index b296fb0..8fbe8b2 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -25,7 +25,7 @@
"libmedia_helper",
"libutils",
"libxml2",
- "libpermission",
+ "framework-permission-aidl-cpp",
"libbinder",
],
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
index f7b0565..84b40d2 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -37,7 +37,8 @@
status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t * /*config*/,
+ audio_config_t * /*halConfig*/,
+ audio_config_base_t * /*mixerConfig*/,
const sp<DeviceDescriptorBase>& /*device*/,
uint32_t * /*latencyMs*/,
audio_output_flags_t /*flags*/) override {
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
index a5ad9b1..7343b9b 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
@@ -28,19 +28,26 @@
class AudioPolicyManagerTestClientForHdmi : public AudioPolicyManagerTestClient {
public:
String8 getParameters(audio_io_handle_t /* ioHandle */, const String8& /* keys*/ ) override {
+ AudioParameter mAudioParameters;
+ std::string formats;
+ for (const auto& f : mSupportedFormats) {
+ if (!formats.empty()) formats += AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
+ formats += audio_format_to_string(f);
+ }
+ mAudioParameters.add(
+ String8(AudioParameter::keyStreamSupportedFormats),
+ String8(formats.c_str()));
+ mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
+ mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
return mAudioParameters.toString();
}
void addSupportedFormat(audio_format_t format) override {
- mAudioParameters.add(
- String8(AudioParameter::keyStreamSupportedFormats),
- String8(audio_format_to_string(format)));
- mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
- mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
+ mSupportedFormats.insert(format);
}
private:
- AudioParameter mAudioParameters;
+ std::set<audio_format_t> mSupportedFormats;
};
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 1384864..4e0735b 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -30,7 +30,8 @@
}
status_t openOutput(audio_module_handle_t /*module*/,
audio_io_handle_t* /*output*/,
- audio_config_t* /*config*/,
+ audio_config_t* /*halConfig*/,
+ audio_config_base_t* /*mixerConfig*/,
const sp<DeviceDescriptorBase>& /*device*/,
uint32_t* /*latencyMs*/,
audio_output_flags_t /*flags*/) override { return NO_INIT; }
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a16ab7d..9c1adc6 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -151,7 +151,7 @@
void AudioPolicyManagerTest::SetUp() {
mClient.reset(getClient());
mManager.reset(new AudioPolicyTestManager(mClient.get()));
- SetUpManagerConfig(); // Subclasses may want to customize the config.
+ ASSERT_NO_FATAL_FAILURE(SetUpManagerConfig()); // Subclasses may want to customize the config.
ASSERT_EQ(NO_ERROR, mManager->initialize());
ASSERT_EQ(NO_ERROR, mManager->initCheck());
}
@@ -401,7 +401,7 @@
void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
// TODO: Consider using Serializer to load part of the config from a string.
- AudioPolicyManagerTest::SetUpManagerConfig();
+ ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUpManagerConfig());
AudioPolicyConfig& config = mManager->getConfig();
mMsdOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_BUS);
sp<AudioProfile> pcmOutputProfile = new AudioProfile(
@@ -660,6 +660,7 @@
void AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig() {
status_t status = deserializeAudioPolicyFile(getConfigFile().c_str(), &mManager->getConfig());
ASSERT_EQ(NO_ERROR, status);
+ mManager->getConfig().setSource(getConfigFile());
}
TEST_F(AudioPolicyManagerTestWithConfigurationFile, InitSuccess) {
@@ -803,7 +804,8 @@
}
class AudioPolicyManagerTestForHdmi
- : public AudioPolicyManagerTestWithConfigurationFile {
+ : public AudioPolicyManagerTestWithConfigurationFile,
+ public testing::WithParamInterface<audio_format_t> {
protected:
void SetUp() override;
std::string getConfigFile() override { return sTvConfig; }
@@ -824,7 +826,8 @@
"test_settop_box_surround_configuration.xml";
void AudioPolicyManagerTestForHdmi::SetUp() {
- AudioPolicyManagerTest::SetUp();
+ ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTest::SetUp());
+ mClient->addSupportedFormat(AUDIO_FORMAT_AC3);
mClient->addSupportedFormat(AUDIO_FORMAT_E_AC3);
mManager->setDeviceConnectionState(
AUDIO_DEVICE_OUT_HDMI, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
@@ -914,76 +917,90 @@
return formats;
}
-TEST_F(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
mManager->setForceUse(
AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
auto surroundFormats = getSurroundFormatsHelper();
- ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+ ASSERT_EQ(1, surroundFormats.count(GetParam()));
}
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
GetSurroundFormatsReturnsManipulatedFormats) {
mManager->setForceUse(
AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
status_t ret =
- mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+ mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
ASSERT_EQ(NO_ERROR, ret);
auto surroundFormats = getSurroundFormatsHelper();
- ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
- ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+ ASSERT_EQ(1, surroundFormats.count(GetParam()));
+ ASSERT_FALSE(surroundFormats[GetParam()]);
- ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+ ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
ASSERT_EQ(NO_ERROR, ret);
surroundFormats = getSurroundFormatsHelper();
- ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
- ASSERT_TRUE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+ ASSERT_EQ(1, surroundFormats.count(GetParam()));
+ ASSERT_TRUE(surroundFormats[GetParam()]);
- ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+ ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
ASSERT_EQ(NO_ERROR, ret);
surroundFormats = getSurroundFormatsHelper();
- ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
- ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+ ASSERT_EQ(1, surroundFormats.count(GetParam()));
+ ASSERT_FALSE(surroundFormats[GetParam()]);
}
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
ListAudioPortsReturnManipulatedHdmiFormats) {
mManager->setForceUse(
AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
- ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/));
+ ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/));
auto formats = getFormatsFromPorts();
- ASSERT_EQ(0, formats.count(AUDIO_FORMAT_E_AC3));
+ ASSERT_EQ(0, formats.count(GetParam()));
- ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/));
+ ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/));
formats = getFormatsFromPorts();
- ASSERT_EQ(1, formats.count(AUDIO_FORMAT_E_AC3));
+ ASSERT_EQ(1, formats.count(GetParam()));
}
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
GetReportedSurroundFormatsReturnsHdmiReportedFormats) {
mManager->setForceUse(
AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
auto surroundFormats = getReportedSurroundFormatsHelper();
- ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+ ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
}
-TEST_F(AudioPolicyManagerTestForHdmi,
+TEST_P(AudioPolicyManagerTestForHdmi,
GetReportedSurroundFormatsReturnsNonManipulatedHdmiReportedFormats) {
mManager->setForceUse(
AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
- status_t ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+ status_t ret = mManager->setSurroundFormatEnabled(GetParam(), false /*enabled*/);
ASSERT_EQ(NO_ERROR, ret);
auto surroundFormats = getReportedSurroundFormatsHelper();
- ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+ ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
- ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+ ret = mManager->setSurroundFormatEnabled(GetParam(), true /*enabled*/);
ASSERT_EQ(NO_ERROR, ret);
surroundFormats = getReportedSurroundFormatsHelper();
- ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), AUDIO_FORMAT_E_AC3));
+ ASSERT_EQ(1, std::count(surroundFormats.begin(), surroundFormats.end(), GetParam()));
}
+TEST_P(AudioPolicyManagerTestForHdmi, GetSurroundFormatsIgnoresSupportedFormats) {
+ mManager->setForceUse(
+ AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER);
+ auto surroundFormats = getSurroundFormatsHelper();
+ ASSERT_EQ(1, surroundFormats.count(GetParam()));
+ ASSERT_FALSE(surroundFormats[GetParam()]);
+}
+
+INSTANTIATE_TEST_SUITE_P(SurroundFormatSupport, AudioPolicyManagerTestForHdmi,
+ testing::Values(AUDIO_FORMAT_AC3, AUDIO_FORMAT_E_AC3),
+ [](const ::testing::TestParamInfo<AudioPolicyManagerTestForHdmi::ParamType>& info) {
+ return audio_format_to_string(info.param);
+ });
+
class AudioPolicyManagerTestDPNoRemoteSubmixModule : public AudioPolicyManagerTestDynamicPolicy {
protected:
std::string getConfigFile() override { return sPrimaryOnlyConfig; }
@@ -1035,7 +1052,7 @@
};
void AudioPolicyManagerTestDPPlaybackReRouting::SetUp() {
- AudioPolicyManagerTestDynamicPolicy::SetUp();
+ ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
mTracker.reset(new RecordingActivityTracker());
@@ -1221,7 +1238,7 @@
};
void AudioPolicyManagerTestDPMixRecordInjection::SetUp() {
- AudioPolicyManagerTestDynamicPolicy::SetUp();
+ ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestDynamicPolicy::SetUp());
mTracker.reset(new RecordingActivityTracker());
@@ -1375,7 +1392,8 @@
if (type == AUDIO_DEVICE_OUT_HDMI) {
// Set device connection state failed due to no device descriptor found
// For HDMI case, it is easier to simulate device descriptor not found error
- // by using a undeclared encoded format.
+ // by using an encoded format which isn't listed in the 'encodedFormats'
+ // attribute for this devicePort.
ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address.c_str(), name.c_str(), AUDIO_FORMAT_MAT_2_1));
@@ -1519,7 +1537,7 @@
};
void AudioPolicyManagerDynamicHwModulesTest::SetUpManagerConfig() {
- AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig();
+ ASSERT_NO_FATAL_FAILURE(AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig());
// Only allow successful opening of "primary" hw module during APM initialization.
mClient->swapAllowedModuleNames({"primary"});
}
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
index 87f0ab9..41ed70c 100644
--- a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
+++ b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
@@ -50,7 +50,8 @@
</devicePort>
<devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
</devicePort>
- <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink">
+ <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink"
+ encodedFormats="AUDIO_FORMAT_AC3">
</devicePort>
<devicePort tagName="Hdmi-In Mic" type="AUDIO_DEVICE_IN_HDMI" role="source">
</devicePort>
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 26562e0..96da0ab 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -84,6 +84,7 @@
"device3/Camera3OutputUtils.cpp",
"device3/Camera3DeviceInjectionMethods.cpp",
"device3/UHRCropAndMeteringRegionMapper.cpp",
+ "device3/PreviewFrameScheduler.cpp",
"gui/RingBufferConsumer.cpp",
"hidl/AidlCameraDeviceCallbacks.cpp",
"hidl/AidlCameraServiceListener.cpp",
@@ -107,6 +108,7 @@
],
shared_libs: [
+ "libandroid",
"libbase",
"libdl",
"libexif",
@@ -154,6 +156,7 @@
"android.hardware.camera.device@3.5",
"android.hardware.camera.device@3.6",
"android.hardware.camera.device@3.7",
+ "android.hardware.camera.device@3.8",
"media_permission-aidl-cpp",
],
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
deleted file mode 100644
index 4cfecfd..0000000
--- a/services/camera/libcameraservice/Android.mk
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2010 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-# Build tests
-
-include $(LOCAL_PATH)/tests/Android.mk
-
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index d0d3a9d..569ab63 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -28,6 +28,7 @@
#include <sys/types.h>
#include <inttypes.h>
#include <pthread.h>
+#include <poll.h>
#include <android/hardware/ICamera.h>
#include <android/hardware/ICameraClient.h>
@@ -137,6 +138,7 @@
static constexpr int32_t kVendorClientState = ActivityManager::PROCESS_STATE_PERSISTENT_UI;
const String8 CameraService::kOfflineDevice("offline-");
+const String16 CameraService::kWatchAllClientsFlag("all");
// Set to keep track of logged service error events.
static std::set<String8> sServiceErrorEventSet;
@@ -560,6 +562,13 @@
onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
}
+
+void CameraService::onTorchStatusChanged(const String8& cameraId,
+ TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
+ Mutex::Autolock al(mTorchStatusMutex);
+ onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
+}
+
void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
@@ -613,8 +622,10 @@
broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
}
-static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
- return checkPermission(sSystemCameraPermission, callingPid, callingUid) &&
+static bool hasPermissionsForSystemCamera(int callingPid, int callingUid,
+ bool logPermissionFailure = false) {
+ return checkPermission(sSystemCameraPermission, callingPid, callingUid,
+ logPermissionFailure) &&
checkPermission(sCameraPermission, callingPid, callingUid);
}
@@ -693,8 +704,8 @@
const std::vector<std::string> *deviceIds = &mNormalDeviceIdsWithoutSystemCamera;
auto callingPid = CameraThreadState::getCallingPid();
auto callingUid = CameraThreadState::getCallingUid();
- if (checkPermission(sSystemCameraPermission, callingPid, callingUid) ||
- getpid() == callingPid) {
+ if (checkPermission(sSystemCameraPermission, callingPid, callingUid,
+ /*logPermissionFailure*/false) || getpid() == callingPid) {
deviceIds = &mNormalDeviceIds;
}
if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(deviceIds->size())) {
@@ -900,6 +911,7 @@
case CAMERA_DEVICE_API_VERSION_3_5:
case CAMERA_DEVICE_API_VERSION_3_6:
case CAMERA_DEVICE_API_VERSION_3_7:
+ case CAMERA_DEVICE_API_VERSION_3_8:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
*client = new Camera2Client(cameraService, tmp, packageName, featureId,
@@ -1334,7 +1346,7 @@
auto clientSp = current->getValue();
if (clientSp.get() != nullptr) { // should never be needed
if (!clientSp->canCastToApiClient(effectiveApiLevel)) {
- ALOGW("CameraService connect called from same client, but with a different"
+ ALOGW("CameraService connect called with a different"
" API level, evicting prior client...");
} else if (clientSp->getRemote() == remoteCallback) {
ALOGI("CameraService::connect X (PID %d) (second call from same"
@@ -1597,7 +1609,7 @@
// same behavior for system camera devices.
if (getCurrentServingCall() != BinderCallType::HWBINDER &&
systemCameraKind == SystemCameraKind::SYSTEM_ONLY_CAMERA &&
- !hasPermissionsForSystemCamera(cPid, cUid)) {
+ !hasPermissionsForSystemCamera(cPid, cUid, /*logPermissionFailure*/true)) {
ALOGW("Rejecting access to system only camera %s, inadequete permissions",
cameraId.c_str());
return true;
@@ -1786,7 +1798,8 @@
LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
__FUNCTION__);
- err = client->initialize(mCameraProviderManager, mMonitorTags);
+ String8 monitorTags = isClientWatched(client.get()) ? mMonitorTags : String8("");
+ err = client->initialize(mCameraProviderManager, monitorTags);
if (err != OK) {
ALOGE("%s: Could not initialize client from HAL.", __FUNCTION__);
// Errors could be from the HAL module open call or from AppOpsManager
@@ -1834,9 +1847,9 @@
// Set rotate-and-crop override behavior
if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
- } else if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(clientPackageName,
- orientation, facing)) {
- client->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
+ } else if (effectiveApiLevel == API_2) {
+ client->setRotateAndCropOverride(CameraServiceProxyWrapper::getRotateAndCropOverride(
+ clientPackageName, facing));
}
// Set camera muting behavior
@@ -1888,6 +1901,33 @@
CameraServiceProxyWrapper::logOpen(cameraId, facing, clientPackageName,
effectiveApiLevel, isNdk, openLatencyMs);
+ {
+ Mutex::Autolock lock(mInjectionParametersLock);
+ if (cameraId == mInjectionInternalCamId && mInjectionInitPending) {
+ mInjectionInitPending = false;
+ status_t res = NO_ERROR;
+ auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+ if (clientDescriptor != nullptr) {
+ sp<BasicClient> clientSp = clientDescriptor->getValue();
+ res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+ if(res != OK) {
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available",
+ mInjectionExternalCamId.string());
+ }
+ res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+ if (res != OK) {
+ mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+ }
+ } else {
+ ALOGE("%s: Internal camera ID = %s 's client does not exist!",
+ __FUNCTION__, mInjectionInternalCamId.string());
+ res = NO_INIT;
+ mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+ }
+ }
+ }
+
return ret;
}
@@ -1931,7 +1971,8 @@
return BAD_VALUE;
}
- auto err = offlineClient->initialize(mCameraProviderManager, mMonitorTags);
+ String8 monitorTags = isClientWatched(offlineClient.get()) ? mMonitorTags : String8("");
+ auto err = offlineClient->initialize(mCameraProviderManager, monitorTags);
if (err != OK) {
ALOGE("%s: Could not initialize offline client.", __FUNCTION__);
return err;
@@ -2054,6 +2095,11 @@
id.string());
errorCode = ERROR_ILLEGAL_ARGUMENT;
break;
+ case -EBUSY:
+ msg = String8::format("Camera \"%s\" is in use",
+ id.string());
+ errorCode = ERROR_CAMERA_IN_USE;
+ break;
default:
msg = String8::format(
"Setting torch mode of camera \"%s\" to %d failed: %s (%d)",
@@ -2178,7 +2224,6 @@
newDeviceState |= vendorBits;
ALOGV("%s: New device state 0x%" PRIx64, __FUNCTION__, newDeviceState);
- Mutex::Autolock l(mServiceLock);
mCameraProviderManager->notifyDeviceStateChange(newDeviceState);
return Status::ok();
@@ -2212,14 +2257,10 @@
for (auto& current : clients) {
if (current != nullptr) {
const auto basicClient = current->getValue();
- if (basicClient.get() != nullptr) {
- if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
- basicClient->getPackageName(), basicClient->getCameraOrientation(),
- basicClient->getCameraFacing())) {
- basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
- } else {
- basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE);
- }
+ if (basicClient.get() != nullptr && basicClient->canCastToApiClient(API_2)) {
+ basicClient->setRotateAndCropOverride(
+ CameraServiceProxyWrapper::getRotateAndCropOverride(
+ basicClient->getPackageName(), basicClient->getCameraFacing()));
}
}
}
@@ -2336,7 +2377,7 @@
auto clientUid = CameraThreadState::getCallingUid();
auto clientPid = CameraThreadState::getCallingPid();
bool openCloseCallbackAllowed = checkPermission(sCameraOpenCloseListenerPermission,
- clientPid, clientUid);
+ clientPid, clientUid, /*logPermissionFailure*/false);
Mutex::Autolock lock(mServiceLock);
@@ -2373,7 +2414,8 @@
Mutex::Autolock lock(mCameraStatesLock);
for (auto& i : mCameraStates) {
cameraStatuses->emplace_back(i.first,
- mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds());
+ mapToInterface(i.second->getStatus()), i.second->getUnavailablePhysicalIds(),
+ openCloseCallbackAllowed ? i.second->getClientPackage() : String8::empty());
}
}
// Remove the camera statuses that should be hidden from the client, we do
@@ -2511,6 +2553,7 @@
case CAMERA_DEVICE_API_VERSION_3_5:
case CAMERA_DEVICE_API_VERSION_3_6:
case CAMERA_DEVICE_API_VERSION_3_7:
+ case CAMERA_DEVICE_API_VERSION_3_8:
ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
__FUNCTION__, id.string());
*isSupported = true;
@@ -2548,7 +2591,7 @@
const String16& externalCamId,
const sp<ICameraInjectionCallback>& callback,
/*out*/
- sp<hardware::camera2::ICameraInjectionSession>* cameraInjectionSession) {
+ sp<ICameraInjectionSession>* cameraInjectionSession) {
ATRACE_CALL();
if (!checkCallingPermission(sCameraInjectExternalCameraPermission)) {
@@ -2565,18 +2608,36 @@
__FUNCTION__, String8(packageName).string(),
String8(internalCamId).string(), String8(externalCamId).string());
- binder::Status ret = binder::Status::ok();
- // TODO: Implement the injection camera function.
- // ret = internalInjectCamera(...);
- // if(!ret.isOk()) {
- // mInjectionStatusListener->notifyInjectionError(...);
- // return ret;
- // }
+ {
+ Mutex::Autolock lock(mInjectionParametersLock);
+ mInjectionInternalCamId = String8(internalCamId);
+ mInjectionExternalCamId = String8(externalCamId);
+ mInjectionStatusListener->addListener(callback);
+ *cameraInjectionSession = new CameraInjectionSession(this);
+ status_t res = NO_ERROR;
+ auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+ // If the client already exists, we can directly connect to the camera device through the
+ // client's injectCamera(), otherwise we need to wait until the client is established
+ // (execute connectHelper()) before injecting the camera to the camera device.
+ if (clientDescriptor != nullptr) {
+ mInjectionInitPending = false;
+ sp<BasicClient> clientSp = clientDescriptor->getValue();
+ res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+ if(res != OK) {
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available",
+ mInjectionExternalCamId.string());
+ }
+ res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+ if(res != OK) {
+ mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+ }
+ } else {
+ mInjectionInitPending = true;
+ }
+ }
- mInjectionStatusListener->addListener(callback);
- *cameraInjectionSession = new CameraInjectionSession(this);
-
- return ret;
+ return binder::Status::ok();
}
void CameraService::removeByClient(const BasicClient* client) {
@@ -2584,6 +2645,7 @@
for (auto& i : mActiveClientManager.getAll()) {
auto clientSp = i->getValue();
if (clientSp.get() == client) {
+ cacheClientTagDumpIfNeeded(client->mCameraIdStr, clientSp.get());
mActiveClientManager.remove(i);
}
}
@@ -2660,7 +2722,11 @@
return sp<BasicClient>{nullptr};
}
- return clientDescriptorPtr->getValue();
+ sp<BasicClient> client = clientDescriptorPtr->getValue();
+ if (client.get() != nullptr) {
+ cacheClientTagDumpIfNeeded(clientDescriptorPtr->getKey(), client.get());
+ }
+ return client;
}
void CameraService::doUserSwitch(const std::vector<int32_t>& newUserIds) {
@@ -3079,6 +3145,21 @@
return OK;
}
+status_t CameraService::BasicClient::startWatchingTags(const String8&, int) {
+ // Can't watch tags directly, must go through CameraService::startWatchingTags
+ return OK;
+}
+
+status_t CameraService::BasicClient::stopWatchingTags(int) {
+ // Can't watch tags directly, must go through CameraService::stopWatchingTags
+ return OK;
+}
+
+status_t CameraService::BasicClient::dumpWatchedEventsToVector(std::vector<std::string> &) {
+ // Can't watch tags directly, must go through CameraService::dumpWatchedEventsToVector
+ return OK;
+}
+
String16 CameraService::BasicClient::getPackageName() const {
return mClientPackageName;
}
@@ -3731,6 +3812,16 @@
return count > 0;
}
+void CameraService::CameraState::setClientPackage(const String8& clientPackage) {
+ Mutex::Autolock lock(mStatusLock);
+ mClientPackage = clientPackage;
+}
+
+String8 CameraService::CameraState::getClientPackage() const {
+ Mutex::Autolock lock(mStatusLock);
+ return mClientPackage;
+}
+
// ----------------------------------------------------------------------------
// ClientEventListener
// ----------------------------------------------------------------------------
@@ -3865,22 +3956,62 @@
}
void CameraService::InjectionStatusListener::notifyInjectionError(
- int errorCode) {
- Mutex::Autolock lock(mListenerLock);
+ String8 injectedCamId, status_t err) {
if (mCameraInjectionCallback == nullptr) {
ALOGW("InjectionStatusListener: mCameraInjectionCallback == nullptr");
return;
}
- mCameraInjectionCallback->onInjectionError(errorCode);
+
+ switch (err) {
+ case -ENODEV:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("No camera device with ID \"%s\" currently available!",
+ injectedCamId.string());
+ break;
+ case -EBUSY:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("Higher-priority client using camera, ID \"%s\" currently unavailable!",
+ injectedCamId.string());
+ break;
+ case DEAD_OBJECT:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("Camera ID \"%s\" object is dead!",
+ injectedCamId.string());
+ break;
+ case INVALID_OPERATION:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("Camera ID \"%s\" encountered an operating or internal error!",
+ injectedCamId.string());
+ break;
+ case UNKNOWN_TRANSACTION:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_UNSUPPORTED);
+ ALOGE("Camera ID \"%s\" method doesn't support!",
+ injectedCamId.string());
+ break;
+ default:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_INVALID_ERROR);
+ ALOGE("Unexpected error %s (%d) opening camera \"%s\"!",
+ strerror(-err), err, injectedCamId.string());
+ }
}
void CameraService::InjectionStatusListener::binderDied(
const wp<IBinder>& /*who*/) {
- Mutex::Autolock lock(mListenerLock);
ALOGV("InjectionStatusListener: ICameraInjectionCallback has died");
auto parent = mParent.promote();
if (parent != nullptr) {
- parent->stopInjectionImpl();
+ auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+ if (clientDescriptor != nullptr) {
+ BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+ baseClientPtr->stopInjection();
+ }
+ parent->clearInjectionParameters();
}
}
@@ -3896,7 +4027,20 @@
return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SERVICE,
"Camera service encountered error");
}
- parent->stopInjectionImpl();
+
+ status_t res = NO_ERROR;
+ auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+ if (clientDescriptor != nullptr) {
+ BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+ res = baseClientPtr->stopInjection();
+ if (res != OK) {
+ ALOGE("CameraInjectionSession: Failed to stop the injection camera!"
+ " ret != NO_ERROR: %d", res);
+ return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SESSION,
+ "Camera session encountered error");
+ }
+ }
+ parent->clearInjectionParameters();
return binder::Status::ok();
}
@@ -4037,7 +4181,7 @@
// Dump camera traces if there were any
dprintf(fd, "\n");
- camera3::CameraTraces::dump(fd, args);
+ camera3::CameraTraces::dump(fd);
// Process dump arguments, if any
int n = args.size();
@@ -4131,6 +4275,37 @@
dprintf(fd, "\n");
}
+void CameraService::cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient* client) {
+ Mutex::Autolock lock(mLogLock);
+ if (!isClientWatchedLocked(client)) { return; }
+
+ std::vector<std::string> dumpVector;
+ client->dumpWatchedEventsToVector(dumpVector);
+
+ if (dumpVector.empty()) { return; }
+
+ const String16 &packageName = client->getPackageName();
+
+ String8 packageName8 = String8(packageName);
+ const char *printablePackageName = packageName8.lockBuffer(packageName.size());
+
+ std::string dumpString;
+ size_t i = dumpVector.size();
+
+ // Store the string in reverse order (latest last)
+ while (i > 0) {
+ i--;
+ dumpString += cameraId;
+ dumpString += ":";
+ dumpString += printablePackageName;
+ dumpString += " ";
+ dumpString += dumpVector[i]; // implicitly ends with '\n'
+ }
+
+ packageName8.unlockBuffer();
+ mWatchedClientsDumpCache[packageName] = dumpString;
+}
+
void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
Mutex::Autolock al(mTorchClientMapMutex);
for (size_t i = 0; i < mTorchClientMap.size(); i++) {
@@ -4241,6 +4416,18 @@
void CameraService::updateOpenCloseStatus(const String8& cameraId, bool open,
const String16& clientPackageName) {
+ auto state = getCameraState(cameraId);
+ if (state == nullptr) {
+ ALOGW("%s: Could not update the status for %s, no such device exists", __FUNCTION__,
+ cameraId.string());
+ return;
+ }
+ if (open) {
+ state->setClientPackage(String8(clientPackageName));
+ } else {
+ state->setClientPackage(String8::empty());
+ }
+
Mutex::Autolock lock(mStatusListenerLock);
for (const auto& it : mListenerList) {
@@ -4423,9 +4610,11 @@
return handleGetImageDumpMask(out);
} else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
return handleSetCameraMute(args);
+ } else if (args.size() >= 2 && args[0] == String16("watch")) {
+ return handleWatchCommand(args, in, out);
} else if (args.size() == 1 && args[0] == String16("help")) {
printHelp(out);
- return NO_ERROR;
+ return OK;
}
printHelp(err);
return BAD_VALUE;
@@ -4569,6 +4758,348 @@
return OK;
}
+status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) {
+ if (args.size() >= 3 && args[1] == String16("start")) {
+ return startWatchingTags(args, outFd);
+ } else if (args.size() == 2 && args[1] == String16("stop")) {
+ return stopWatchingTags(outFd);
+ } else if (args.size() == 2 && args[1] == String16("dump")) {
+ return printWatchedTags(outFd);
+ } else if (args.size() >= 2 && args[1] == String16("live")) {
+ return printWatchedTagsUntilInterrupt(args, inFd, outFd);
+ } else if (args.size() == 2 && args[1] == String16("clear")) {
+ return clearCachedMonitoredTagDumps(outFd);
+ }
+ dprintf(outFd, "Camera service watch commands:\n"
+ " start -m <comma_separated_tag_list> [-c <comma_separated_client_list>]\n"
+ " starts watching the provided tags for clients with provided package\n"
+ " recognizes tag shorthands like '3a'\n"
+ " watches all clients if no client is passed, or if 'all' is listed\n"
+ " dump dumps the monitoring information and exits\n"
+ " stop stops watching all tags\n"
+ " live [-n <refresh_interval_ms>]\n"
+ " prints the monitored information in real time\n"
+ " Hit return to exit\n"
+ " clear clears all buffers storing information for watch command");
+ return BAD_VALUE;
+}
+
+status_t CameraService::startWatchingTags(const Vector<String16> &args, int outFd) {
+ Mutex::Autolock lock(mLogLock);
+ size_t tagsIdx; // index of '-m'
+ String16 tags("");
+ for (tagsIdx = 2; tagsIdx < args.size() && args[tagsIdx] != String16("-m"); tagsIdx++);
+ if (tagsIdx < args.size() - 1) {
+ tags = args[tagsIdx + 1];
+ } else {
+ dprintf(outFd, "No tags provided.\n");
+ return BAD_VALUE;
+ }
+
+ size_t clientsIdx; // index of '-c'
+ String16 clients = kWatchAllClientsFlag; // watch all clients if no clients are provided
+ for (clientsIdx = 2; clientsIdx < args.size() && args[clientsIdx] != String16("-c");
+ clientsIdx++);
+ if (clientsIdx < args.size() - 1) {
+ clients = args[clientsIdx + 1];
+ }
+ parseClientsToWatchLocked(String8(clients));
+
+ // track tags to initialize future clients with the monitoring information
+ mMonitorTags = String8(tags);
+
+ bool serviceLock = tryLock(mServiceLock);
+ int numWatchedClients = 0;
+ auto cameraClients = mActiveClientManager.getAll();
+ for (const auto &clientDescriptor: cameraClients) {
+ if (clientDescriptor == nullptr) { continue; }
+ sp<BasicClient> client = clientDescriptor->getValue();
+ if (client.get() == nullptr) { continue; }
+
+ if (isClientWatchedLocked(client.get())) {
+ client->startWatchingTags(mMonitorTags, outFd);
+ numWatchedClients++;
+ }
+ }
+ dprintf(outFd, "Started watching %d active clients\n", numWatchedClients);
+
+ if (serviceLock) { mServiceLock.unlock(); }
+ return OK;
+}
+
+status_t CameraService::stopWatchingTags(int outFd) {
+ // clear mMonitorTags to prevent new clients from monitoring tags at initialization
+ Mutex::Autolock lock(mLogLock);
+ mMonitorTags = String8::empty();
+
+ mWatchedClientPackages.clear();
+ mWatchedClientsDumpCache.clear();
+
+ bool serviceLock = tryLock(mServiceLock);
+ auto cameraClients = mActiveClientManager.getAll();
+ for (const auto &clientDescriptor : cameraClients) {
+ if (clientDescriptor == nullptr) { continue; }
+ sp<BasicClient> client = clientDescriptor->getValue();
+ if (client.get() == nullptr) { continue; }
+ client->stopWatchingTags(outFd);
+ }
+ dprintf(outFd, "Stopped watching all clients.\n");
+ if (serviceLock) { mServiceLock.unlock(); }
+ return OK;
+}
+
+status_t CameraService::clearCachedMonitoredTagDumps(int outFd) {
+ Mutex::Autolock lock(mLogLock);
+ size_t clearedSize = mWatchedClientsDumpCache.size();
+ mWatchedClientsDumpCache.clear();
+ dprintf(outFd, "Cleared tag information of %zu cached clients.\n", clearedSize);
+ return OK;
+}
+
+status_t CameraService::printWatchedTags(int outFd) {
+ Mutex::Autolock logLock(mLogLock);
+ std::set<String16> connectedMonitoredClients;
+
+ bool printedSomething = false; // tracks if any monitoring information was printed
+ // (from either cached or active clients)
+
+ bool serviceLock = tryLock(mServiceLock);
+ // get all watched clients that are currently connected
+ for (const auto &clientDescriptor: mActiveClientManager.getAll()) {
+ if (clientDescriptor == nullptr) { continue; }
+
+ sp<BasicClient> client = clientDescriptor->getValue();
+ if (client.get() == nullptr) { continue; }
+ if (!isClientWatchedLocked(client.get())) { continue; }
+
+ std::vector<std::string> dumpVector;
+ client->dumpWatchedEventsToVector(dumpVector);
+
+ size_t printIdx = dumpVector.size();
+ if (printIdx == 0) {
+ continue;
+ }
+
+ // Print tag dumps for active client
+ const String8 &cameraId = clientDescriptor->getKey();
+ String8 packageName8 = String8(client->getPackageName());
+ const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+ dprintf(outFd, "Client: %s (active)\n", printablePackageName);
+ while(printIdx > 0) {
+ printIdx--;
+ dprintf(outFd, "%s:%s %s", cameraId.string(), printablePackageName,
+ dumpVector[printIdx].c_str());
+ }
+ dprintf(outFd, "\n");
+ packageName8.unlockBuffer();
+ printedSomething = true;
+
+ connectedMonitoredClients.emplace(client->getPackageName());
+ }
+ if (serviceLock) { mServiceLock.unlock(); }
+
+ // Print entries in mWatchedClientsDumpCache for clients that are not connected
+ for (const auto &kv: mWatchedClientsDumpCache) {
+ const String16 &package = kv.first;
+ if (connectedMonitoredClients.find(package) != connectedMonitoredClients.end()) {
+ continue;
+ }
+
+ dprintf(outFd, "Client: %s (cached)\n", String8(package).string());
+ dprintf(outFd, "%s\n", kv.second.c_str());
+ printedSomething = true;
+ }
+
+ if (!printedSomething) {
+ dprintf(outFd, "No monitoring information to print.\n");
+ }
+
+ return OK;
+}
+
+// Print all events in vector `events' that came after lastPrintedEvent
+void printNewWatchedEvents(int outFd,
+ const char *cameraId,
+ const String16 &packageName,
+ const std::vector<std::string> &events,
+ const std::string &lastPrintedEvent) {
+ if (events.empty()) { return; }
+
+ // index of lastPrintedEvent in events.
+ // lastPrintedIdx = events.size() if lastPrintedEvent is not in events
+ size_t lastPrintedIdx;
+ for (lastPrintedIdx = 0;
+ lastPrintedIdx < events.size() && lastPrintedEvent != events[lastPrintedIdx];
+ lastPrintedIdx++);
+
+ if (lastPrintedIdx == 0) { return; } // early exit if no new event in `events`
+
+ String8 packageName8(packageName);
+ const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+
+ // print events in chronological order (latest event last)
+ size_t idxToPrint = lastPrintedIdx;
+ do {
+ idxToPrint--;
+ dprintf(outFd, "%s:%s %s", cameraId, printablePackageName, events[idxToPrint].c_str());
+ } while (idxToPrint != 0);
+
+ packageName8.unlockBuffer();
+}
+
+// Returns true if adb shell cmd watch should be interrupted based on data in inFd. The watch
+// command should be interrupted if the user presses the return key, or if user loses any way to
+// signal interrupt.
+// If timeoutMs == 0, this function will always return false
+bool shouldInterruptWatchCommand(int inFd, int outFd, long timeoutMs) {
+ struct timeval startTime;
+ int startTimeError = gettimeofday(&startTime, nullptr);
+ if (startTimeError) {
+ dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+ return true;
+ }
+
+ const nfds_t numFds = 1;
+ struct pollfd pollFd = { .fd = inFd, .events = POLLIN, .revents = 0 };
+
+ struct timeval currTime;
+ char buffer[2];
+ while(true) {
+ int currTimeError = gettimeofday(&currTime, nullptr);
+ if (currTimeError) {
+ dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+ return true;
+ }
+
+ long elapsedTimeMs = ((currTime.tv_sec - startTime.tv_sec) * 1000L)
+ + ((currTime.tv_usec - startTime.tv_usec) / 1000L);
+ int remainingTimeMs = (int) (timeoutMs - elapsedTimeMs);
+
+ if (remainingTimeMs <= 0) {
+ // No user interrupt within timeoutMs, don't interrupt watch command
+ return false;
+ }
+
+ int numFdsUpdated = poll(&pollFd, numFds, remainingTimeMs);
+ if (numFdsUpdated < 0) {
+ dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+ return true;
+ }
+
+ if (numFdsUpdated == 0) {
+ // No user input within timeoutMs, don't interrupt watch command
+ return false;
+ }
+
+ if (!(pollFd.revents & POLLIN)) {
+ dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+ return true;
+ }
+
+ ssize_t sizeRead = read(inFd, buffer, sizeof(buffer) - 1);
+ if (sizeRead < 0) {
+ dprintf(outFd, "Error reading user input. Exiting.\n");
+ return true;
+ }
+
+ if (sizeRead == 0) {
+ // Reached end of input fd (can happen if input is piped)
+ // User has no way to signal an interrupt, so interrupt here
+ return true;
+ }
+
+ if (buffer[0] == '\n') {
+ // User pressed return, interrupt watch command.
+ return true;
+ }
+ }
+}
+
+status_t CameraService::printWatchedTagsUntilInterrupt(const Vector<String16> &args,
+ int inFd, int outFd) {
+ // Figure out refresh interval, if present in args
+ long refreshTimeoutMs = 1000L; // refresh every 1s by default
+ if (args.size() > 2) {
+ size_t intervalIdx; // index of '-n'
+ for (intervalIdx = 2; intervalIdx < args.size() && String16("-n") != args[intervalIdx];
+ intervalIdx++);
+
+ size_t intervalValIdx = intervalIdx + 1;
+ if (intervalValIdx < args.size()) {
+ refreshTimeoutMs = strtol(String8(args[intervalValIdx].string()), nullptr, 10);
+ if (errno) { return BAD_VALUE; }
+ }
+ }
+
+ // Set min timeout of 10ms. This prevents edge cases in polling when timeout of 0 is passed.
+ refreshTimeoutMs = refreshTimeoutMs < 10 ? 10 : refreshTimeoutMs;
+
+ dprintf(outFd, "Press return to exit...\n\n");
+ std::map<String16, std::string> packageNameToLastEvent;
+
+ while (true) {
+ bool serviceLock = tryLock(mServiceLock);
+ auto cameraClients = mActiveClientManager.getAll();
+ if (serviceLock) { mServiceLock.unlock(); }
+
+ for (const auto& clientDescriptor : cameraClients) {
+ Mutex::Autolock lock(mLogLock);
+ if (clientDescriptor == nullptr) { continue; }
+
+ sp<BasicClient> client = clientDescriptor->getValue();
+ if (client.get() == nullptr) { continue; }
+ if (!isClientWatchedLocked(client.get())) { continue; }
+
+ const String16 &packageName = client->getPackageName();
+ // This also initializes the map entries with an empty string
+ const std::string& lastPrintedEvent = packageNameToLastEvent[packageName];
+
+ std::vector<std::string> latestEvents;
+ client->dumpWatchedEventsToVector(latestEvents);
+
+ if (!latestEvents.empty()) {
+ String8 cameraId = clientDescriptor->getKey();
+ const char *printableCameraId = cameraId.lockBuffer(cameraId.size());
+ printNewWatchedEvents(outFd,
+ printableCameraId,
+ packageName,
+ latestEvents,
+ lastPrintedEvent);
+ packageNameToLastEvent[packageName] = latestEvents[0];
+ cameraId.unlockBuffer();
+ }
+ }
+ if (shouldInterruptWatchCommand(inFd, outFd, refreshTimeoutMs)) {
+ break;
+ }
+ }
+ return OK;
+}
+
+void CameraService::parseClientsToWatchLocked(String8 clients) {
+ mWatchedClientPackages.clear();
+
+ const char *allSentinel = String8(kWatchAllClientsFlag).string();
+
+ char *tokenized = clients.lockBuffer(clients.size());
+ char *savePtr;
+ char *nextClient = strtok_r(tokenized, ",", &savePtr);
+
+ while (nextClient != nullptr) {
+ if (strcmp(nextClient, allSentinel) == 0) {
+ // Don't need to track any other package if 'all' is present
+ mWatchedClientPackages.clear();
+ mWatchedClientPackages.emplace(kWatchAllClientsFlag);
+ break;
+ }
+
+ // track package names
+ mWatchedClientPackages.emplace(nextClient);
+ nextClient = strtok_r(nullptr, ",", &savePtr);
+ }
+ clients.unlockBuffer();
+}
+
status_t CameraService::printHelp(int out) {
return dprintf(out, "Camera service commands:\n"
" get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
@@ -4581,9 +5112,20 @@
" Valid values 0=OFF, 1=ON for JPEG\n"
" get-image-dump-mask returns the current image-dump-mask value\n"
" set-camera-mute <0/1> enable or disable camera muting\n"
+ " watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n"
" help print this message\n");
}
+bool CameraService::isClientWatched(const BasicClient *client) {
+ Mutex::Autolock lock(mLogLock);
+ return isClientWatchedLocked(client);
+}
+
+bool CameraService::isClientWatchedLocked(const BasicClient *client) {
+ return mWatchedClientPackages.find(kWatchAllClientsFlag) != mWatchedClientPackages.end() ||
+ mWatchedClientPackages.find(client->getPackageName()) != mWatchedClientPackages.end();
+}
+
int32_t CameraService::updateAudioRestriction() {
Mutex::Autolock lock(mServiceLock);
return updateAudioRestrictionLocked();
@@ -4605,10 +5147,43 @@
return mode;
}
-void CameraService::stopInjectionImpl() {
- mInjectionStatusListener->removeListener();
+status_t CameraService::checkIfInjectionCameraIsPresent(const String8& externalCamId,
+ sp<BasicClient> clientSp) {
+ std::unique_ptr<AutoConditionLock> lock =
+ AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
+ status_t res = NO_ERROR;
+ if ((res = checkIfDeviceIsUsable(externalCamId)) != NO_ERROR) {
+ ALOGW("Device %s is not usable!", externalCamId.string());
+ mInjectionStatusListener->notifyInjectionError(
+ externalCamId, UNKNOWN_TRANSACTION);
+ clientSp->notifyError(
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+ CaptureResultExtras());
- // TODO: Implement the stop injection function.
+ // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+ // other clients from connecting in mServiceLockWrapper if held
+ mServiceLock.unlock();
+
+ // Clear caller identity temporarily so client disconnect PID checks work correctly
+ int64_t token = CameraThreadState::clearCallingIdentity();
+ clientSp->disconnect();
+ CameraThreadState::restoreCallingIdentity(token);
+
+ // Reacquire mServiceLock
+ mServiceLock.lock();
+ }
+
+ return res;
+}
+
+void CameraService::clearInjectionParameters() {
+ {
+ Mutex::Autolock lock(mInjectionParametersLock);
+ mInjectionInitPending = false;
+ mInjectionInternalCamId = "";
+ }
+ mInjectionExternalCamId = "";
+ mInjectionStatusListener->removeListener();
}
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index bc2e347..51c734f 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -110,8 +110,16 @@
virtual void onDeviceStatusChanged(const String8 &cameraId,
const String8 &physicalCameraId,
hardware::camera::common::V1_0::CameraDeviceStatus newHalStatus) override;
+ // This method may hold CameraProviderManager::mInterfaceMutex as a part
+ // of calling getSystemCameraKind() internally. Care should be taken not to
+ // directly / indirectly call this from callers who also hold
+ // mInterfaceMutex.
virtual void onTorchStatusChanged(const String8& cameraId,
hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
+ // Does not hold CameraProviderManager::mInterfaceMutex.
+ virtual void onTorchStatusChanged(const String8& cameraId,
+ hardware::camera::common::V1_0::TorchModeStatus newStatus,
+ SystemCameraKind kind) override;
virtual void onNewProviderRegistered() override;
/////////////////////////////////////////////////////////////////////
@@ -272,6 +280,10 @@
// Internal dump method to be called by CameraService
virtual status_t dumpClient(int fd, const Vector<String16>& args) = 0;
+ virtual status_t startWatchingTags(const String8 &tags, int outFd);
+ virtual status_t stopWatchingTags(int outFd);
+ virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
+
// Return the package name for this client
virtual String16 getPackageName() const;
@@ -320,6 +332,14 @@
// Set/reset camera mute
virtual status_t setCameraMute(bool enabled) = 0;
+ // The injection camera session to replace the internal camera
+ // session.
+ virtual status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) = 0;
+
+ // Stop the injection camera and restore to internal camera session.
+ virtual status_t stopInjection() = 0;
+
protected:
BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
@@ -627,6 +647,12 @@
bool removeUnavailablePhysicalId(const String8& physicalId);
/**
+ * Set and get client package name.
+ */
+ void setClientPackage(const String8& clientPackage);
+ String8 getClientPackage() const;
+
+ /**
* Return the unavailable physical ids for this device.
*
* This method acquires mStatusLock.
@@ -638,6 +664,7 @@
const int mCost;
std::set<String8> mConflicting;
std::set<String8> mUnavailablePhysicalIds;
+ String8 mClientPackage;
mutable Mutex mStatusLock;
CameraParameters mShimParams;
const SystemCameraKind mSystemCameraKind;
@@ -810,6 +837,14 @@
RingBuffer<String8> mEventLog;
Mutex mLogLock;
+ // set of client package names to watch. if this set contains 'all', then all clients will
+ // be watched. Access should be guarded by mLogLock
+ std::set<String16> mWatchedClientPackages;
+ // cache of last monitored tags dump immediately before the client disconnects. If a client
+ // re-connects, its entry is not updated until it disconnects again. Access should be guarded
+ // by mLogLock
+ std::map<String16, std::string> mWatchedClientsDumpCache;
+
// The last monitored tags set by client
String8 mMonitorTags;
@@ -942,6 +977,8 @@
*/
void dumpEventLog(int fd);
+ void cacheClientTagDumpIfNeeded(const char *cameraId, BasicClient *client);
+
/**
* This method will acquire mServiceLock
*/
@@ -1134,9 +1171,43 @@
// Set the camera mute state
status_t handleSetCameraMute(const Vector<String16>& args);
+ // Handle 'watch' command as passed through 'cmd'
+ status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
+
+ // Enable tag monitoring of the given tags in provided clients
+ status_t startWatchingTags(const Vector<String16> &args, int outFd);
+
+ // Disable tag monitoring
+ status_t stopWatchingTags(int outFd);
+
+ // Clears mWatchedClientsDumpCache
+ status_t clearCachedMonitoredTagDumps(int outFd);
+
+ // Print events of monitored tags in all cached and attached clients
+ status_t printWatchedTags(int outFd);
+
+ // Print events of monitored tags in all attached clients as they are captured. New events are
+ // fetched every `refreshMillis` ms
+ // NOTE: This function does not terminate until user passes '\n' to inFd.
+ status_t printWatchedTagsUntilInterrupt(const Vector<String16> &args, int inFd, int outFd);
+
+ // Parses comma separated clients list and adds them to mWatchedClientPackages.
+ // Does not acquire mLogLock before modifying mWatchedClientPackages. It is the caller's
+ // responsibility to acquire mLogLock before calling this function.
+ void parseClientsToWatchLocked(String8 clients);
+
// Prints the shell command help
status_t printHelp(int out);
+ // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+ // Acquires mLogLock before querying mWatchedClientPackages.
+ bool isClientWatched(const BasicClient *client);
+
+ // Returns true if client should monitor tags based on the contents of mWatchedClientPackages.
+ // Does not acquire mLogLock before querying mWatchedClientPackages. It is the caller's
+ // responsibility to acquire mLogLock before calling this functions.
+ bool isClientWatchedLocked(const BasicClient *client);
+
/**
* Get the current system time as a formatted string.
*/
@@ -1167,6 +1238,10 @@
// Use separate keys for offline devices.
static const String8 kOfflineDevice;
+ // Sentinel value to be stored in `mWatchedClientsPackages` to indicate that all clients should
+ // be watched.
+ static const String16 kWatchAllClientsFlag;
+
// TODO: right now each BasicClient holds one AppOpsManager instance.
// We can refactor the code so all of clients share this instance
AppOpsManager mAppOps;
@@ -1194,7 +1269,7 @@
void addListener(const sp<hardware::camera2::ICameraInjectionCallback>& callback);
void removeListener();
- void notifyInjectionError(int errorCode);
+ void notifyInjectionError(String8 injectedCamId, status_t err);
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder>& who);
@@ -1221,7 +1296,20 @@
wp<CameraService> mParent;
};
- void stopInjectionImpl();
+ // When injecting the camera, it will check whether the injecting camera status is unavailable.
+ // If it is, the disconnect function will be called to to prevent camera access on the device.
+ status_t checkIfInjectionCameraIsPresent(const String8& externalCamId,
+ sp<BasicClient> clientSp);
+
+ void clearInjectionParameters();
+
+ // This is the existing camera id being replaced.
+ String8 mInjectionInternalCamId;
+ // This is the external camera Id replacing the internalId.
+ String8 mInjectionExternalCamId;
+ bool mInjectionInitPending = false;
+ // Guard mInjectionInternalCamId and mInjectionInitPending.
+ Mutex mInjectionParametersLock;
};
} // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 944b8ab..8c72bd7 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -62,7 +62,7 @@
bool overrideForPerfClass):
Camera2ClientBase(cameraService, cameraClient, clientPackageName, clientFeatureId,
cameraDeviceId, api1CameraId, cameraFacing, sensorOrientation,
- clientPid, clientUid, servicePid, overrideForPerfClass),
+ clientPid, clientUid, servicePid, overrideForPerfClass, /*legacyClient*/ true),
mParameters(api1CameraId, cameraFacing)
{
ATRACE_CALL();
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 80508e4..a406e62 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -864,7 +864,6 @@
if (fabs(maxDigitalZoom.data.f[0] - 1.f) > 0.00001f) {
params.set(CameraParameters::KEY_ZOOM, zoom);
- params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
{
String8 zoomRatios;
@@ -872,18 +871,34 @@
float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
(NUM_ZOOM_STEPS-1);
bool addComma = false;
- for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+ int previousZoom = -1;
+ size_t zoomSteps = 0;
+ for (size_t i = 0; i < NUM_ZOOM_STEPS; i++) {
+ int currentZoom = static_cast<int>(zoom * 100);
+ if (previousZoom == currentZoom) {
+ zoom += zoomIncrement;
+ continue;
+ }
if (addComma) zoomRatios += ",";
addComma = true;
- zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+ zoomRatios += String8::format("%d", currentZoom);
zoom += zoomIncrement;
+ previousZoom = currentZoom;
+ zoomSteps++;
}
- params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+
+ if (zoomSteps > 0) {
+ params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+ params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+ CameraParameters::TRUE);
+ params.set(CameraParameters::KEY_MAX_ZOOM, zoomSteps - 1);
+ zoomAvailable = true;
+ } else {
+ params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+ CameraParameters::FALSE);
+ }
}
- params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
- CameraParameters::TRUE);
- zoomAvailable = true;
} else {
params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
CameraParameters::FALSE);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index e2f8d011..1b2ceda 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -205,7 +205,7 @@
static const int MAX_INITIAL_PREVIEW_WIDTH = 1920;
static const int MAX_INITIAL_PREVIEW_HEIGHT = 1080;
// Aspect ratio tolerance
- static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.001;
+ static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.01;
// Threshold for slow jpeg mode
static const int64_t kSlowJpegModeThreshold = 33400000LL; // 33.4 ms
// Margin for checking FPS
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 1f3d478..1c26081 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -379,6 +379,12 @@
}
String8 physicalId(it.id.c_str());
+ bool hasTestPatternModePhysicalKey = std::find(mSupportedPhysicalRequestKeys.begin(),
+ mSupportedPhysicalRequestKeys.end(), ANDROID_SENSOR_TEST_PATTERN_MODE) !=
+ mSupportedPhysicalRequestKeys.end();
+ bool hasTestPatternDataPhysicalKey = std::find(mSupportedPhysicalRequestKeys.begin(),
+ mSupportedPhysicalRequestKeys.end(), ANDROID_SENSOR_TEST_PATTERN_DATA) !=
+ mSupportedPhysicalRequestKeys.end();
if (physicalId != mDevice->getId()) {
auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(),
it.id);
@@ -404,7 +410,8 @@
}
}
- physicalSettingsList.push_back({it.id, filteredParams});
+ physicalSettingsList.push_back({it.id, filteredParams,
+ hasTestPatternModePhysicalKey, hasTestPatternDataPhysicalKey});
}
} else {
physicalSettingsList.push_back({it.id, it.settings});
@@ -1797,6 +1804,35 @@
return dumpDevice(fd, args);
}
+status_t CameraDeviceClient::startWatchingTags(const String8 &tags, int out) {
+ sp<CameraDeviceBase> device = mDevice;
+ if (!device) {
+ dprintf(out, " Device is detached.");
+ return OK;
+ }
+ device->startWatchingTags(tags);
+ return OK;
+}
+
+status_t CameraDeviceClient::stopWatchingTags(int out) {
+ sp<CameraDeviceBase> device = mDevice;
+ if (!device) {
+ dprintf(out, " Device is detached.");
+ return OK;
+ }
+ device->stopWatchingTags();
+ return OK;
+}
+
+status_t CameraDeviceClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+ sp<CameraDeviceBase> device = mDevice;
+ if (!device) {
+ return OK;
+ }
+ device->dumpWatchedEventsToVector(out);
+ return OK;
+}
+
void CameraDeviceClient::notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) {
// Thread safe. Don't bother locking.
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 76b3f53..288f2d7 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -199,6 +199,10 @@
virtual status_t dumpClient(int fd, const Vector<String16>& args);
+ virtual status_t startWatchingTags(const String8 &tags, int out);
+ virtual status_t stopWatchingTags(int out);
+ virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
+
/**
* Device listener interface
*/
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index ef15f2d..10fa33f 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -110,6 +110,18 @@
return OK;
}
+status_t CameraOfflineSessionClient::startWatchingTags(const String8 &tags, int outFd) {
+ return BasicClient::startWatchingTags(tags, outFd);
+}
+
+status_t CameraOfflineSessionClient::stopWatchingTags(int outFd) {
+ return BasicClient::stopWatchingTags(outFd);
+}
+
+status_t CameraOfflineSessionClient::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+ return BasicClient::dumpWatchedEventsToVector(out);
+}
+
binder::Status CameraOfflineSessionClient::disconnect() {
Mutex::Autolock icl(mBinderSerializationLock);
@@ -330,5 +342,19 @@
CaptureResultExtras());
}
+status_t CameraOfflineSessionClient::injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) {
+ ALOGV("%s: This client doesn't support the injection camera. injectedCamId: %s providerPtr: %p",
+ __FUNCTION__, injectedCamId.string(), manager.get());
+
+ return OK;
+}
+
+status_t CameraOfflineSessionClient::stopInjection() {
+ ALOGV("%s: This client doesn't support the injection camera.", __FUNCTION__);
+
+ return OK;
+}
+
// ----------------------------------------------------------------------------
}; // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index b219a4c..920a176 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -71,6 +71,10 @@
status_t dumpClient(int /*fd*/, const Vector<String16>& /*args*/) override;
+ status_t startWatchingTags(const String8 &tags, int outFd) override;
+ status_t stopWatchingTags(int outFd) override;
+ status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
+
status_t initialize(sp<CameraProviderManager> /*manager*/,
const String8& /*monitorTags*/) override;
@@ -98,6 +102,9 @@
void notifyPrepared(int streamId) override;
void notifyRequestQueueEmpty() override;
void notifyRepeatingRequestError(long lastFrameNumber) override;
+ status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) override;
+ status_t stopInjection() override;
private:
mutable Mutex mBinderSerializationLock;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 13d044a..5d17c11 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -55,13 +55,14 @@
int clientPid,
uid_t clientUid,
int servicePid,
- bool overrideForPerfClass):
+ bool overrideForPerfClass,
+ bool legacyClient):
TClientBase(cameraService, remoteCallback, clientPackageName, clientFeatureId,
cameraId, api1CameraId, cameraFacing, sensorOrientation, clientPid, clientUid,
servicePid),
mSharedCameraCallbacks(remoteCallback),
mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
- mDevice(new Camera3Device(cameraId, overrideForPerfClass)),
+ mDevice(new Camera3Device(cameraId, overrideForPerfClass, legacyClient)),
mDeviceActive(false), mApi1CameraId(api1CameraId)
{
ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
@@ -155,6 +156,38 @@
}
template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::startWatchingTags(const String8 &tags, int out) {
+ sp<CameraDeviceBase> device = mDevice;
+ if (!device) {
+ dprintf(out, " Device is detached");
+ return OK;
+ }
+
+ return device->startWatchingTags(tags);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopWatchingTags(int out) {
+ sp<CameraDeviceBase> device = mDevice;
+ if (!device) {
+ dprintf(out, " Device is detached");
+ return OK;
+ }
+
+ return device->stopWatchingTags();
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+ sp<CameraDeviceBase> device = mDevice;
+ if (!device) {
+ // Nothing to dump if the device is detached
+ return OK;
+ }
+ return device->dumpWatchedEventsToVector(out);
+}
+
+template <typename TClientBase>
status_t Camera2ClientBase<TClientBase>::dumpDevice(
int fd,
const Vector<String16>& args) {
@@ -413,6 +446,17 @@
mRemoteCallback.clear();
}
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) {
+ return mDevice->injectCamera(injectedCamId, manager);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopInjection() {
+ return mDevice->stopInjection();
+}
+
template class Camera2ClientBase<CameraService::Client>;
template class Camera2ClientBase<CameraDeviceClientBase>;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 6246f7b..4688502 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -56,11 +56,15 @@
int clientPid,
uid_t clientUid,
int servicePid,
- bool overrideForPerfClass);
+ bool overrideForPerfClass,
+ bool legacyClient = false);
virtual ~Camera2ClientBase();
virtual status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags);
virtual status_t dumpClient(int fd, const Vector<String16>& args);
+ virtual status_t startWatchingTags(const String8 &tags, int out);
+ virtual status_t stopWatchingTags(int out);
+ virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
/**
* NotificationListener implementation
@@ -114,6 +118,10 @@
mutable Mutex mRemoteCallbackLock;
} mSharedCameraCallbacks;
+ status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) override;
+ status_t stopInjection() override;
+
protected:
// The PID provided in the constructor call
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 85b0cc2..06a3d36 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -97,6 +97,9 @@
virtual status_t disconnect() = 0;
virtual status_t dump(int fd, const Vector<String16> &args) = 0;
+ virtual status_t startWatchingTags(const String8 &tags) = 0;
+ virtual status_t stopWatchingTags() = 0;
+ virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out) = 0;
/**
* The physical camera device's static characteristics metadata buffer
@@ -106,6 +109,16 @@
struct PhysicalCameraSettings {
std::string cameraId;
CameraMetadata metadata;
+
+ // Whether the physical camera supports testPatternMode/testPatternData
+ bool mHasTestPatternModeTag = true;
+ bool mHasTestPatternDataTag = true;
+
+ // Original value of TEST_PATTERN_MODE and DATA so that they can be
+ // restored when sensor muting is turned off
+ int32_t mOriginalTestPatternMode = 0;
+ int32_t mOriginalTestPatternData[4] = {};
+
};
typedef List<PhysicalCameraSettings> PhysicalCameraSettingsList;
@@ -427,6 +440,18 @@
*/
void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+ /**
+ * The injection camera session to replace the internal camera
+ * session.
+ */
+ virtual status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) = 0;
+
+ /**
+ * Stop the injection camera and restore to internal camera session.
+ */
+ virtual status_t stopInjection() = 0;
+
protected:
bool mImageDumpMask = 0;
};
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 4f2b878..0cce2ca 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -359,7 +359,13 @@
for (auto& provider : mProviders) {
ALOGV("%s: Notifying %s for new state 0x%" PRIx64,
__FUNCTION__, provider->mProviderName.c_str(), newState);
+ // b/199240726 Camera providers can for example try to add/remove
+ // camera devices as part of the state change notification. Holding
+ // 'mInterfaceMutex' while calling 'notifyDeviceStateChange' can
+ // result in a recursive deadlock.
+ mInterfaceMutex.unlock();
status_t singleRes = provider->notifyDeviceStateChange(mDeviceState);
+ mInterfaceMutex.lock();
if (singleRes != OK) {
ALOGE("%s: Unable to notify provider %s about device state change",
__FUNCTION__,
@@ -367,6 +373,7 @@
res = singleRes;
// continue to do the rest of the providers instead of returning now
}
+ provider->notifyDeviceInfoStateChangeLocked(mDeviceState);
}
return res;
}
@@ -1185,10 +1192,12 @@
}
bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
return isHiddenPhysicalCameraInternal(cameraId).first;
}
status_t CameraProviderManager::filterSmallJpegSizes(const std::string& cameraId) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
for (auto& provider : mProviders) {
for (auto& deviceInfo : provider->mDevices) {
if (deviceInfo->mId == cameraId) {
@@ -1629,6 +1638,7 @@
return BAD_VALUE;
}
if (deviceInfo == nullptr) return BAD_VALUE;
+ deviceInfo->notifyDeviceStateChange(mDeviceState);
deviceInfo->mStatus = initialStatus;
bool isAPI1Compatible = deviceInfo->isAPI1Compatible();
@@ -1950,16 +1960,19 @@
const hardware::hidl_string& cameraDeviceName,
TorchModeStatus newStatus) {
sp<StatusListener> listener;
+ SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
std::string id;
+ bool known = false;
{
- std::lock_guard<std::mutex> lock(mManager->mStatusListenerMutex);
- bool known = false;
+ // Hold mLock for accessing mDevices
+ std::lock_guard<std::mutex> lock(mLock);
for (auto& deviceInfo : mDevices) {
if (deviceInfo->mName == cameraDeviceName) {
ALOGI("Camera device %s torch status is now %s", cameraDeviceName.c_str(),
torchStatusToString(newStatus));
id = deviceInfo->mId;
known = true;
+ systemCameraKind = deviceInfo->mSystemCameraKind;
if (TorchModeStatus::AVAILABLE_ON != newStatus) {
mManager->removeRef(DeviceMode::TORCH, id);
}
@@ -1971,11 +1984,19 @@
mProviderName.c_str(), cameraDeviceName.c_str(), newStatus);
return hardware::Void();
}
+ // no lock needed since listener is set up only once during
+ // CameraProviderManager initialization and then never changed till it is
+ // destructed.
listener = mManager->getStatusListener();
- }
+ }
// Call without lock held to allow reentrancy into provider manager
+ // The problem with holding mLock here is that we
+ // might be limiting re-entrancy : CameraService::onTorchStatusChanged calls
+ // back into CameraProviderManager which might try to hold mLock again (eg:
+ // findDeviceInfo, which should be holding mLock while iterating through
+ // each provider's devices).
if (listener != nullptr) {
- listener->onTorchStatusChanged(String8(id.c_str()), newStatus);
+ listener->onTorchStatusChanged(String8(id.c_str()), newStatus, systemCameraKind);
}
return hardware::Void();
}
@@ -2031,6 +2052,14 @@
return OK;
}
+void CameraProviderManager::ProviderInfo::notifyDeviceInfoStateChangeLocked(
+ hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
+ std::lock_guard<std::mutex> lock(mLock);
+ for (auto it = mDevices.begin(); it != mDevices.end(); it++) {
+ (*it)->notifyDeviceStateChange(newDeviceState);
+ }
+}
+
status_t CameraProviderManager::ProviderInfo::notifyDeviceStateChange(
hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
mDeviceState = newDeviceState;
@@ -2285,6 +2314,18 @@
return;
}
+ if (mCameraCharacteristics.exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) {
+ const auto &stateMap = mCameraCharacteristics.find(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS);
+ if ((stateMap.count > 0) && ((stateMap.count % 2) == 0)) {
+ for (size_t i = 0; i < stateMap.count; i += 2) {
+ mDeviceStateOrientationMap.emplace(stateMap.data.i64[i], stateMap.data.i64[i+1]);
+ }
+ } else {
+ ALOGW("%s: Invalid ANDROID_INFO_DEVICE_STATE_ORIENTATIONS map size: %zu", __FUNCTION__,
+ stateMap.count);
+ }
+ }
+
mSystemCameraKind = getSystemCameraKind();
status_t res = fixupMonochromeTags();
@@ -2413,6 +2454,16 @@
CameraProviderManager::ProviderInfo::DeviceInfo3::~DeviceInfo3() {}
+void CameraProviderManager::ProviderInfo::DeviceInfo3::notifyDeviceStateChange(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> newState) {
+
+ if (!mDeviceStateOrientationMap.empty() &&
+ (mDeviceStateOrientationMap.find(newState) != mDeviceStateOrientationMap.end())) {
+ mCameraCharacteristics.update(ANDROID_SENSOR_ORIENTATION,
+ &mDeviceStateOrientationMap[newState], 1);
+ }
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::setTorchMode(bool enabled) {
return setTorchModeForDevice<InterfaceT>(enabled);
}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 1bdbb44..f28d128 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -40,7 +40,6 @@
#include <camera/VendorTagDescriptor.h>
namespace android {
-
/**
* The vendor tag descriptor class that takes HIDL vendor tag information as
* input. Not part of VendorTagDescriptor class because that class is used
@@ -89,6 +88,7 @@
#define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
#define CAMERA_DEVICE_API_VERSION_3_6 HARDWARE_DEVICE_API_VERSION(3, 6)
#define CAMERA_DEVICE_API_VERSION_3_7 HARDWARE_DEVICE_API_VERSION(3, 7)
+#define CAMERA_DEVICE_API_VERSION_3_8 HARDWARE_DEVICE_API_VERSION(3, 8)
/**
* A manager for all camera providers available on an Android device.
@@ -156,6 +156,9 @@
const String8 &physicalCameraId,
hardware::camera::common::V1_0::CameraDeviceStatus newStatus) = 0;
virtual void onTorchStatusChanged(const String8 &cameraId,
+ hardware::camera::common::V1_0::TorchModeStatus newStatus,
+ SystemCameraKind kind) = 0;
+ virtual void onTorchStatusChanged(const String8 &cameraId,
hardware::camera::common::V1_0::TorchModeStatus newStatus) = 0;
virtual void onNewProviderRegistered() = 0;
};
@@ -284,12 +287,6 @@
sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session);
/**
- * Save the ICameraProvider while it is being used by a camera or torch client
- */
- void saveRef(DeviceMode usageType, const std::string &cameraId,
- sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
-
- /**
* Notify that the camera or torch is no longer being used by a camera client
*/
void removeRef(DeviceMode usageType, const std::string &cameraId);
@@ -336,8 +333,6 @@
// All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
mutable std::mutex mInterfaceMutex;
- // the status listener update callbacks will lock mStatusMutex
- mutable std::mutex mStatusListenerMutex;
wp<StatusListener> mListener;
ServiceInteractionProxy* mServiceProxy;
@@ -434,6 +429,10 @@
/**
* Notify provider about top-level device physical state changes
+ *
+ * Note that 'mInterfaceMutex' should not be held when calling this method.
+ * It is possible for camera providers to add/remove devices and try to
+ * acquire it.
*/
status_t notifyDeviceStateChange(
hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
@@ -442,6 +441,15 @@
std::vector<std::unordered_set<std::string>> getConcurrentCameraIdCombinations();
/**
+ * Notify 'DeviceInfo' instanced about top-level device physical state changes
+ *
+ * Note that 'mInterfaceMutex' should be held when calling this method.
+ */
+ void notifyDeviceInfoStateChangeLocked(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ newDeviceState);
+
+ /**
* Query the camera provider for concurrent stream configuration support
*/
status_t isConcurrentSessionConfigurationSupported(
@@ -493,6 +501,9 @@
return INVALID_OPERATION;
}
virtual status_t filterSmallJpegSizes() = 0;
+ virtual void notifyDeviceStateChange(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ /*newState*/) {}
template<class InterfaceT>
sp<InterfaceT> startDeviceInterface();
@@ -553,6 +564,9 @@
bool *status /*out*/)
override;
virtual status_t filterSmallJpegSizes() override;
+ virtual void notifyDeviceStateChange(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ newState) override;
DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
const std::string &id, uint16_t minorVersion,
@@ -562,6 +576,8 @@
virtual ~DeviceInfo3();
private:
CameraMetadata mCameraCharacteristics;
+ // Map device states to sensor orientations
+ std::unordered_map<int64_t, int32_t> mDeviceStateOrientationMap;
// A copy of mCameraCharacteristics without performance class
// override
std::unique_ptr<CameraMetadata> mCameraCharNoPCOverride;
@@ -662,6 +678,12 @@
sp<hardware::camera::provider::V2_6::ICameraProvider> &interface2_6);
};
+ /**
+ * Save the ICameraProvider while it is being used by a camera or torch client
+ */
+ void saveRef(DeviceMode usageType, const std::string &cameraId,
+ sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
+
// Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
// and the calling code doesn't mutate the list of providers or their lists of devices.
// Finds the first device of the given ID that falls within the requested version range
diff --git a/services/camera/libcameraservice/device3/BufferUtils.h b/services/camera/libcameraservice/device3/BufferUtils.h
index 1e1cd60..03112ec 100644
--- a/services/camera/libcameraservice/device3/BufferUtils.h
+++ b/services/camera/libcameraservice/device3/BufferUtils.h
@@ -104,7 +104,7 @@
// Return the removed buffer ID if input cache is found.
// Otherwise return BUFFER_ID_NO_BUFFER
- uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle);
+ uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) override;
// Clear all caches for input stream, but do not remove the stream
// Removed buffers' ID are returned
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index fd645c7..3742a17 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -75,8 +75,9 @@
namespace android {
-Camera3Device::Camera3Device(const String8 &id, bool overrideForPerfClass):
+Camera3Device::Camera3Device(const String8 &id, bool overrideForPerfClass, bool legacyClient):
mId(id),
+ mLegacyClient(legacyClient),
mOperatingMode(NO_MODE),
mIsConstrainedHighSpeedConfiguration(false),
mStatus(STATUS_UNINITIALIZED),
@@ -848,6 +849,21 @@
return OK;
}
+status_t Camera3Device::startWatchingTags(const String8 &tags) {
+ mTagMonitor.parseTagsToMonitor(tags);
+ return OK;
+}
+
+status_t Camera3Device::stopWatchingTags() {
+ mTagMonitor.disableMonitoring();
+ return OK;
+}
+
+status_t Camera3Device::dumpWatchedEventsToVector(std::vector<std::string> &out) {
+ mTagMonitor.getLatestMonitoredTagEvents(out);
+ return OK;
+}
+
const CameraMetadata& Camera3Device::infoPhysical(const String8& physicalId) const {
ALOGVV("%s: E", __FUNCTION__);
if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
@@ -1089,7 +1105,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface
+ *mInterface, mLegacyClient
};
for (const auto& result : results) {
@@ -1148,7 +1164,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface
+ *mInterface, mLegacyClient
};
for (const auto& result : results) {
@@ -1160,6 +1176,16 @@
hardware::Return<void> Camera3Device::notify(
const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
+ return notifyHelper<hardware::camera::device::V3_2::NotifyMsg>(msgs);
+}
+
+hardware::Return<void> Camera3Device::notify_3_8(
+ const hardware::hidl_vec<hardware::camera::device::V3_8::NotifyMsg>& msgs) {
+ return notifyHelper<hardware::camera::device::V3_8::NotifyMsg>(msgs);
+}
+
+template<typename NotifyMsgType>
+hardware::Return<void> Camera3Device::notifyHelper(const hardware::hidl_vec<NotifyMsgType>& msgs) {
// Ideally we should grab mLock, but that can lead to deadlock, and
// it's not super important to get up to date value of mStatus for this
// warning print, hence skipping the lock here
@@ -1189,7 +1215,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface
+ *mInterface, mLegacyClient
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
@@ -2455,22 +2481,24 @@
}
if (mSupportCameraMute) {
- auto testPatternModeEntry =
- newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
- newRequest->mOriginalTestPatternMode = testPatternModeEntry.count > 0 ?
- testPatternModeEntry.data.i32[0] :
- ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+ for (auto& settings : newRequest->mSettingsList) {
+ auto testPatternModeEntry =
+ settings.metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+ settings.mOriginalTestPatternMode = testPatternModeEntry.count > 0 ?
+ testPatternModeEntry.data.i32[0] :
+ ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
- auto testPatternDataEntry =
- newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
- if (testPatternDataEntry.count >= 4) {
- memcpy(newRequest->mOriginalTestPatternData, testPatternDataEntry.data.i32,
- sizeof(CaptureRequest::mOriginalTestPatternData));
- } else {
- newRequest->mOriginalTestPatternData[0] = 0;
- newRequest->mOriginalTestPatternData[1] = 0;
- newRequest->mOriginalTestPatternData[2] = 0;
- newRequest->mOriginalTestPatternData[3] = 0;
+ auto testPatternDataEntry =
+ settings.metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+ if (testPatternDataEntry.count >= 4) {
+ memcpy(settings.mOriginalTestPatternData, testPatternDataEntry.data.i32,
+ sizeof(PhysicalCameraSettings::mOriginalTestPatternData));
+ } else {
+ settings.mOriginalTestPatternData[0] = 0;
+ settings.mOriginalTestPatternData[1] = 0;
+ settings.mOriginalTestPatternData[2] = 0;
+ settings.mOriginalTestPatternData[3] = 0;
+ }
}
}
@@ -2677,6 +2705,7 @@
}
mGroupIdPhysicalCameraMap.clear();
+ bool composerSurfacePresent = false;
for (size_t i = 0; i < mOutputStreams.size(); i++) {
// Don't configure bidi streams twice, nor add them twice to the list
@@ -2716,6 +2745,10 @@
const String8& physicalCameraId = mOutputStreams[i]->getPhysicalCameraId();
mGroupIdPhysicalCameraMap[streamGroupId].insert(physicalCameraId);
}
+
+ if (outputStream->usage & GraphicBuffer::USAGE_HW_COMPOSER) {
+ composerSurfacePresent = true;
+ }
}
config.streams = streams.editArray();
@@ -2783,6 +2816,8 @@
}
}
+ mRequestThread->setComposerSurface(composerSurfacePresent);
+
// Request thread needs to know to avoid using repeat-last-settings protocol
// across configure_streams() calls
if (notifyRequestThread) {
@@ -2836,17 +2871,28 @@
mRequestBufferSM.onStreamsConfigured();
}
+ // First call injectCamera() and then run configureStreamsLocked() case:
// Since the streams configuration of the injection camera is based on the internal camera, we
- // must wait until the internal camera configure streams before calling injectCamera() to
+ // must wait until the internal camera configure streams before running the injection job to
// configure the injection streams.
if (mInjectionMethods->isInjecting()) {
- ALOGV("%s: Injection camera %s: Start to configure streams.",
+ ALOGD("%s: Injection camera %s: Start to configure streams.",
__FUNCTION__, mInjectionMethods->getInjectedCamId().string());
res = mInjectionMethods->injectCamera(config, bufferSizes);
if (res != OK) {
ALOGE("Can't finish inject camera process!");
return res;
}
+ } else {
+ // First run configureStreamsLocked() and then call injectCamera() case:
+ // If the stream configuration has been completed and camera deive is active, but the
+ // injection camera has not been injected yet, we need to store the stream configuration of
+ // the internal camera (because the stream configuration of the injection camera is based
+ // on the internal camera). When injecting occurs later, this configuration can be used by
+ // the injection camera.
+ ALOGV("%s: The stream configuration is complete and the camera device is active, but the"
+ " injection camera has not been injected yet.", __FUNCTION__);
+ mInjectionMethods->storeInjectionConfig(config, bufferSizes);
}
return OK;
@@ -3072,10 +3118,12 @@
void Camera3Device::monitorMetadata(TagMonitor::eventSource source,
int64_t frameNumber, nsecs_t timestamp, const CameraMetadata& metadata,
- const std::unordered_map<std::string, CameraMetadata>& physicalMetadata) {
+ const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+ const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+ int32_t inputStreamId) {
mTagMonitor.monitorMetadata(source, frameNumber, timestamp, metadata,
- physicalMetadata);
+ physicalMetadata, outputBuffers, numOutputBuffers, inputStreamId);
}
/**
@@ -4132,6 +4180,11 @@
return mBufferRecords.getBufferId(buf, streamId);
}
+uint64_t Camera3Device::HalInterface::removeOneBufferCache(int streamId,
+ const native_handle_t* handle) {
+ return mBufferRecords.removeOneBufferCache(streamId, handle);
+}
+
void Camera3Device::HalInterface::onBufferFreed(
int streamId, const native_handle_t* handle) {
uint32_t bufferId = mBufferRecords.removeOneBufferCache(streamId, handle);
@@ -4174,6 +4227,7 @@
mCurrentAfTriggerId(0),
mCurrentPreCaptureTriggerId(0),
mRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE),
+ mComposerOutput(false),
mCameraMute(ANDROID_SENSOR_TEST_PATTERN_MODE_OFF),
mCameraMuteChanged(false),
mRepeatingLastFrameNumber(
@@ -4578,9 +4632,15 @@
sp<Camera3Device> parent = mParent.promote();
if (parent != NULL) {
+ int32_t inputStreamId = -1;
+ if (halRequest.input_buffer != nullptr) {
+ inputStreamId = Camera3Stream::cast(halRequest.input_buffer->stream)->getId();
+ }
+
parent->monitorMetadata(TagMonitor::REQUEST,
halRequest.frame_number,
- 0, mLatestRequest, mLatestPhysicalRequest);
+ 0, mLatestRequest, mLatestPhysicalRequest, halRequest.output_buffers,
+ halRequest.num_output_buffers, inputStreamId);
}
}
@@ -4778,6 +4838,26 @@
return submitRequestSuccess;
}
+status_t Camera3Device::removeFwkOnlyRegionKeys(CameraMetadata *request) {
+ static const std::array<uint32_t, 4> kFwkOnlyRegionKeys = {ANDROID_CONTROL_AF_REGIONS_SET,
+ ANDROID_CONTROL_AE_REGIONS_SET, ANDROID_CONTROL_AWB_REGIONS_SET,
+ ANDROID_SCALER_CROP_REGION_SET};
+ if (request == nullptr) {
+ ALOGE("%s request metadata nullptr", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ status_t res = OK;
+ for (const auto &key : kFwkOnlyRegionKeys) {
+ if (request->exists(key)) {
+ res = request->erase(key);
+ if (res != OK) {
+ return res;
+ }
+ }
+ }
+ return OK;
+}
+
status_t Camera3Device::RequestThread::prepareHalRequests() {
ATRACE_CALL();
@@ -4804,7 +4884,11 @@
bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
mPrevTriggers = triggerCount;
- bool rotateAndCropChanged = overrideAutoRotateAndCrop(captureRequest);
+ // Do not override rotate&crop for stream configurations that include
+ // SurfaceViews(HW_COMPOSER) output. The display rotation there will be
+ // compensated by NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY
+ bool rotateAndCropChanged = mComposerOutput ? false :
+ overrideAutoRotateAndCrop(captureRequest);
bool testPatternChanged = overrideTestPattern(captureRequest);
// If the request is the same as last, or we had triggers now or last time or
@@ -4837,6 +4921,12 @@
it != captureRequest->mSettingsList.end(); it++) {
if (parent->mUHRCropAndMeteringRegionMappers.find(it->cameraId) ==
parent->mUHRCropAndMeteringRegionMappers.end()) {
+ if (removeFwkOnlyRegionKeys(&(it->metadata)) != OK) {
+ SET_ERR("RequestThread: Unable to remove fwk-only keys from request"
+ "%d: %s (%d)", halRequest->frame_number, strerror(-res),
+ res);
+ return INVALID_OPERATION;
+ }
continue;
}
@@ -4851,6 +4941,12 @@
return INVALID_OPERATION;
}
captureRequest->mUHRCropAndMeteringRegionsUpdated = true;
+ if (removeFwkOnlyRegionKeys(&(it->metadata)) != OK) {
+ SET_ERR("RequestThread: Unable to remove fwk-only keys from request"
+ "%d: %s (%d)", halRequest->frame_number, strerror(-res),
+ res);
+ return INVALID_OPERATION;
+ }
}
}
@@ -5298,6 +5394,13 @@
return OK;
}
+status_t Camera3Device::RequestThread::setComposerSurface(bool composerSurfacePresent) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mTriggerMutex);
+ mComposerOutput = composerSurfacePresent;
+ return OK;
+}
+
status_t Camera3Device::RequestThread::setCameraMute(int32_t muteMode) {
ATRACE_CALL();
Mutex::Autolock l(mTriggerMutex);
@@ -5376,7 +5479,8 @@
outputBuffers->editItemAt(i).acquire_fence = -1;
}
outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
- captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
+ captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
+ /*timestamp*/0, /*readoutTimestamp*/0,
/*timestampIncreasing*/true, std::vector<size_t> (),
captureRequest->mResultExtras.frameNumber);
}
@@ -5871,48 +5975,53 @@
bool changed = false;
- int32_t testPatternMode = request->mOriginalTestPatternMode;
- int32_t testPatternData[4] = {
- request->mOriginalTestPatternData[0],
- request->mOriginalTestPatternData[1],
- request->mOriginalTestPatternData[2],
- request->mOriginalTestPatternData[3]
- };
+ // For a multi-camera, the physical cameras support the same set of
+ // test pattern modes as the logical camera.
+ for (auto& settings : request->mSettingsList) {
+ CameraMetadata &metadata = settings.metadata;
- if (mCameraMute != ANDROID_SENSOR_TEST_PATTERN_MODE_OFF) {
- testPatternMode = mCameraMute;
- testPatternData[0] = 0;
- testPatternData[1] = 0;
- testPatternData[2] = 0;
- testPatternData[3] = 0;
- }
-
- CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
-
- auto testPatternEntry = metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
- if (testPatternEntry.count > 0) {
- if (testPatternEntry.data.i32[0] != testPatternMode) {
- testPatternEntry.data.i32[0] = testPatternMode;
- changed = true;
+ int32_t testPatternMode = settings.mOriginalTestPatternMode;
+ int32_t testPatternData[4] = {
+ settings.mOriginalTestPatternData[0],
+ settings.mOriginalTestPatternData[1],
+ settings.mOriginalTestPatternData[2],
+ settings.mOriginalTestPatternData[3]
+ };
+ if (mCameraMute != ANDROID_SENSOR_TEST_PATTERN_MODE_OFF) {
+ testPatternMode = mCameraMute;
+ testPatternData[0] = 0;
+ testPatternData[1] = 0;
+ testPatternData[2] = 0;
+ testPatternData[3] = 0;
}
- } else {
- metadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE,
- &testPatternMode, 1);
- changed = true;
- }
- auto testPatternColor = metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
- if (testPatternColor.count >= 4) {
- for (size_t i = 0; i < 4; i++) {
- if (testPatternColor.data.i32[i] != testPatternData[i]) {
- testPatternColor.data.i32[i] = testPatternData[i];
+ auto testPatternEntry = metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+ bool supportTestPatternModeKey = settings.mHasTestPatternModeTag;
+ if (testPatternEntry.count > 0) {
+ if (testPatternEntry.data.i32[0] != testPatternMode) {
+ testPatternEntry.data.i32[0] = testPatternMode;
changed = true;
}
+ } else if (supportTestPatternModeKey) {
+ metadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE,
+ &testPatternMode, 1);
+ changed = true;
}
- } else {
- metadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA,
- testPatternData, 4);
- changed = true;
+
+ auto testPatternColor = metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+ bool supportTestPatternDataKey = settings.mHasTestPatternDataTag;
+ if (testPatternColor.count >= 4) {
+ for (size_t i = 0; i < 4; i++) {
+ if (testPatternColor.data.i32[i] != testPatternData[i]) {
+ testPatternColor.data.i32[i] = testPatternData[i];
+ changed = true;
+ }
+ }
+ } else if (supportTestPatternDataKey) {
+ metadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA,
+ testPatternData, 4);
+ changed = true;
+ }
}
return changed;
@@ -6582,6 +6691,13 @@
ALOGI("%s Injection camera: injectedCamId = %s", __FUNCTION__, injectedCamId.string());
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
+ // When the camera device is active, injectCamera() and stopInjection() will call
+ // internalPauseAndWaitLocked() and internalResumeLocked(), and then they will call
+ // mStatusChanged.waitRelative(mLock, timeout) of waitUntilStateThenRelock(). But
+ // mStatusChanged.waitRelative(mLock, timeout)'s parameter: mutex "mLock" must be in the locked
+ // state, so we need to add "Mutex::Autolock l(mLock)" to lock the "mLock" before calling
+ // waitUntilStateThenRelock().
+ Mutex::Autolock l(mLock);
status_t res = NO_ERROR;
if (mInjectionMethods->isInjecting()) {
@@ -6604,16 +6720,25 @@
return res;
}
- camera3::camera_stream_configuration injectionConfig;
- std::vector<uint32_t> injectionBufferSizes;
- mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
// When the second display of android is cast to the remote device, and the opened camera is
// also cast to the second display, in this case, because the camera has configured the streams
// at this time, we can directly call injectCamera() to replace the internal camera with
// injection camera.
- if (mOperatingMode >= 0 && injectionConfig.num_streams > 0
- && injectionBufferSizes.size() > 0) {
- ALOGV("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+ if (mInjectionMethods->isStreamConfigCompleteButNotInjected()) {
+ ALOGD("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+
+ camera3::camera_stream_configuration injectionConfig;
+ std::vector<uint32_t> injectionBufferSizes;
+ mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
+ if (mOperatingMode < 0 || injectionConfig.num_streams <= 0
+ || injectionBufferSizes.size() <= 0) {
+ ALOGE("Failed to inject camera due to abandoned configuration! "
+ "mOperatingMode: %d injectionConfig.num_streams: %d "
+ "injectionBufferSizes.size(): %zu", mOperatingMode,
+ injectionConfig.num_streams, injectionBufferSizes.size());
+ return DEAD_OBJECT;
+ }
+
res = mInjectionMethods->injectCamera(
injectionConfig, injectionBufferSizes);
if (res != OK) {
@@ -6628,6 +6753,7 @@
status_t Camera3Device::stopInjection() {
ALOGI("%s: Injection camera: stopInjection", __FUNCTION__);
Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
return mInjectionMethods->stopInjection();
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 39714f0..d08c41f 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -39,6 +39,7 @@
#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
#include <fmq/MessageQueue.h>
#include <camera/CaptureResult.h>
@@ -83,14 +84,14 @@
*/
class Camera3Device :
public CameraDeviceBase,
- virtual public hardware::camera::device::V3_5::ICameraDeviceCallback,
+ virtual public hardware::camera::device::V3_8::ICameraDeviceCallback,
public camera3::SetErrorInterface,
public camera3::InflightRequestUpdateInterface,
public camera3::RequestBufferInterface,
public camera3::FlushBufferInterface {
public:
- explicit Camera3Device(const String8& id, bool overrideForPerfClass);
+ explicit Camera3Device(const String8& id, bool overrideForPerfClass, bool legacyClient = false);
virtual ~Camera3Device();
@@ -106,6 +107,9 @@
status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags) override;
status_t disconnect() override;
status_t dump(int fd, const Vector<String16> &args) override;
+ status_t startWatchingTags(const String8 &tags) override;
+ status_t stopWatchingTags() override;
+ status_t dumpWatchedEventsToVector(std::vector<std::string> &out) override;
const CameraMetadata& info() const override;
const CameraMetadata& infoPhysical(const String8& physicalId) const override;
@@ -297,6 +301,7 @@
private:
status_t disconnectImpl();
+ static status_t removeFwkOnlyRegionKeys(CameraMetadata *request);
// internal typedefs
using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
@@ -332,6 +337,9 @@
// Camera device ID
const String8 mId;
+ // Legacy camera client flag
+ bool mLegacyClient;
+
// Current stream configuration mode;
int mOperatingMode;
// Current session wide parameters
@@ -412,6 +420,8 @@
std::pair<bool, uint64_t> getBufferId(
const buffer_handle_t& buf, int streamId) override;
+ uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) override;
+
status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
/*out*/ buffer_handle_t **buffer) override;
@@ -570,10 +580,6 @@
// overriding of ROTATE_AND_CROP value and adjustment of coordinates
// in several other controls in both the request and the result
bool mRotateAndCropAuto;
- // Original value of TEST_PATTERN_MODE and DATA so that they can be
- // restored when sensor muting is turned off
- int32_t mOriginalTestPatternMode;
- int32_t mOriginalTestPatternData[4];
// Whether this capture request has its zoom ratio set to 1.0x before
// the framework overrides it for camera HAL consumption.
@@ -581,7 +587,6 @@
// The systemTime timestamp when the request is created.
nsecs_t mRequestTimeNs;
-
// Whether this capture request's distortion correction update has
// been done.
bool mDistortionCorrectionUpdated = false;
@@ -638,6 +643,14 @@
const hardware::hidl_vec<
hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
+ hardware::Return<void> notify_3_8(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_8::NotifyMsg>& msgs) override;
+
+ template<typename NotifyMsgType>
+ hardware::Return<void> notifyHelper(
+ const hardware::hidl_vec<NotifyMsgType>& msgs);
+
// Handle one notify message
void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
@@ -916,6 +929,7 @@
status_t setRotateAndCropAutoBehavior(
camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
+ status_t setComposerSurface(bool composerSurfacePresent);
status_t setCameraMute(int32_t muteMode);
@@ -1068,6 +1082,7 @@
uint32_t mCurrentAfTriggerId;
uint32_t mCurrentPreCaptureTriggerId;
camera_metadata_enum_android_scaler_rotate_and_crop_t mRotateAndCropOverride;
+ bool mComposerOutput;
int32_t mCameraMute; // 0 = no mute, otherwise the TEST_PATTERN_MODE to use
bool mCameraMuteChanged;
@@ -1249,7 +1264,9 @@
void monitorMetadata(TagMonitor::eventSource source, int64_t frameNumber,
nsecs_t timestamp, const CameraMetadata& metadata,
- const std::unordered_map<std::string, CameraMetadata>& physicalMetadata);
+ const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+ const camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+ int32_t inputStreamId);
metadata_vendor_id_t mVendorTagId;
@@ -1373,25 +1390,34 @@
// when device is IDLE and request thread is paused.
status_t injectCamera(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes);
+ const std::vector<uint32_t>& injectionBufferSizes);
// Stop the injection camera and switch back to backup hal interface.
status_t stopInjection();
bool isInjecting();
+ bool isStreamConfigCompleteButNotInjected();
+
const String8& getInjectedCamId() const;
void getInjectionConfig(/*out*/ camera3::camera_stream_configuration* injectionConfig,
/*out*/ std::vector<uint32_t>* injectionBufferSizes);
+ // When the streaming configuration is completed and the camera device is active, but the
+ // injection camera has not yet been injected, the streaming configuration of the internal
+ // camera will be stored first.
+ void storeInjectionConfig(
+ const camera3::camera_stream_configuration& injectionConfig,
+ const std::vector<uint32_t>& injectionBufferSizes);
+
private:
// Configure the streams of injection camera, it need wait until the
// output streams are created and configured to the original camera before
// proceeding.
status_t injectionConfigureStreams(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes);
+ const std::vector<uint32_t>& injectionBufferSizes);
// Disconnect the injection camera and delete the hal interface.
void injectionDisconnectImpl();
@@ -1409,9 +1435,23 @@
// Generated injection camera hal interface.
sp<HalInterface> mInjectedCamHalInterface;
+ // Backup of the original camera hal result FMQ.
+ std::unique_ptr<ResultMetadataQueue> mBackupResultMetadataQueue;
+
+ // FMQ writes the result for the injection camera. Must be guarded by
+ // mProcessCaptureResultLock.
+ std::unique_ptr<ResultMetadataQueue> mInjectionResultMetadataQueue;
+
+ // The flag indicates that the stream configuration is complete, the camera device is
+ // active, but the injection camera has not yet been injected.
+ bool mIsStreamConfigCompleteButNotInjected = false;
+
// Copy the configuration of the internal camera.
camera3::camera_stream_configuration mInjectionConfig;
+ // Copy the streams of the internal camera.
+ Vector<camera3::camera_stream_t*> mInjectionStreams;
+
// Copy the bufferSizes of the output streams of the internal camera.
std::vector<uint32_t> mInjectionBufferSizes;
diff --git a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
index f145dac..4744a6d 100644
--- a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
@@ -86,7 +86,7 @@
return DEAD_OBJECT;
}
- std::unique_ptr<ResultMetadataQueue>& resQueue = parent->mResultMetadataQueue;
+ std::unique_ptr<ResultMetadataQueue>& resQueue = mInjectionResultMetadataQueue;
auto resultQueueRet = session->getCaptureResultMetadataQueue(
[&resQueue](const auto& descriptor) {
resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
@@ -127,10 +127,8 @@
status_t Camera3Device::Camera3DeviceInjectionMethods::injectCamera(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes) {
+ const std::vector<uint32_t>& injectionBufferSizes) {
status_t res = NO_ERROR;
- mInjectionConfig = injectionConfig;
- mInjectionBufferSizes = injectionBufferSizes;
if (mInjectedCamHalInterface == nullptr) {
ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
@@ -148,7 +146,6 @@
if (parent->mStatus == STATUS_ACTIVE) {
ALOGV("%s: Let the device be IDLE and the request thread is paused",
__FUNCTION__);
- parent->mPauseStateNotify = true;
res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
if (res != OK) {
ALOGE("%s: Can't pause captures to inject camera!", __FUNCTION__);
@@ -188,7 +185,7 @@
ALOGV("%s: Restarting activity to inject camera", __FUNCTION__);
// Reuse current operating mode and session parameters for new stream
// config.
- parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+ parent->internalResumeLocked();
}
return OK;
@@ -208,7 +205,6 @@
if (parent->mStatus == STATUS_ACTIVE) {
ALOGV("%s: Let the device be IDLE and the request thread is paused",
__FUNCTION__);
- parent->mPauseStateNotify = true;
res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
if (res != OK) {
ALOGE("%s: Can't pause captures to stop injection!", __FUNCTION__);
@@ -229,7 +225,7 @@
ALOGV("%s: Restarting activity to stop injection", __FUNCTION__);
// Reuse current operating mode and session parameters for new stream
// config.
- parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+ parent->internalResumeLocked();
}
return OK;
@@ -243,6 +239,10 @@
}
}
+bool Camera3Device::Camera3DeviceInjectionMethods::isStreamConfigCompleteButNotInjected() {
+ return mIsStreamConfigCompleteButNotInjected;
+}
+
const String8& Camera3Device::Camera3DeviceInjectionMethods::getInjectedCamId()
const {
return mInjectedCamId;
@@ -260,10 +260,22 @@
*injectionBufferSizes = mInjectionBufferSizes;
}
+void Camera3Device::Camera3DeviceInjectionMethods::storeInjectionConfig(
+ const camera3::camera_stream_configuration& injectionConfig,
+ const std::vector<uint32_t>& injectionBufferSizes) {
+ mIsStreamConfigCompleteButNotInjected = true;
+ mInjectionConfig = injectionConfig;
+ mInjectionStreams.clear();
+ for (size_t i = 0; i < injectionConfig.num_streams; i++) {
+ mInjectionStreams.push_back(injectionConfig.streams[i]);
+ }
+ mInjectionConfig.streams = mInjectionStreams.editArray();
+ mInjectionBufferSizes = injectionBufferSizes;
+}
status_t Camera3Device::Camera3DeviceInjectionMethods::injectionConfigureStreams(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes) {
+ const std::vector<uint32_t>& injectionBufferSizes) {
ATRACE_CALL();
status_t res = NO_ERROR;
@@ -326,7 +338,6 @@
mInjectedCamId.string());
auto rc = parent->mPreparerThread->resume();
-
if (rc != OK) {
ALOGE("%s: Injection camera %s: Preparer thread failed to resume!",
__FUNCTION__, mInjectedCamId.string());
@@ -339,6 +350,9 @@
void Camera3Device::Camera3DeviceInjectionMethods::injectionDisconnectImpl() {
ATRACE_CALL();
ALOGI("%s: Injection camera disconnect", __FUNCTION__);
+ mIsStreamConfigCompleteButNotInjected = false;
+ mInjectionStreams.clear();
+ mInjectionConfig.streams = nullptr;
mBackupHalInterface = nullptr;
HalInterface* interface = nullptr;
@@ -380,10 +394,18 @@
return INVALID_OPERATION;
}
- if (keepBackup && mBackupHalInterface == nullptr) {
- mBackupHalInterface = parent->mInterface;
- } else if (!keepBackup) {
+ if (keepBackup) {
+ if (mBackupHalInterface == nullptr) {
+ mBackupHalInterface = parent->mInterface;
+ }
+ if (mBackupResultMetadataQueue == nullptr) {
+ mBackupResultMetadataQueue = std::move(parent->mResultMetadataQueue);
+ parent->mResultMetadataQueue = std::move(mInjectionResultMetadataQueue);
+ }
+ } else {
mBackupHalInterface = nullptr;
+ parent->mResultMetadataQueue = std::move(mBackupResultMetadataQueue);
+ mBackupResultMetadataQueue = nullptr;
}
parent->mInterface = newHalInterface;
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
index 8cc6833..61e43cb 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
@@ -48,7 +48,7 @@
status_t Camera3FakeStream::returnBufferLocked(
const camera_stream_buffer &,
- nsecs_t, const std::vector<size_t>&) {
+ nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
ATRACE_CALL();
ALOGE("%s: Stream %d: Fake stream cannot return buffers!", __FUNCTION__, mId);
return INVALID_OPERATION;
@@ -56,8 +56,9 @@
status_t Camera3FakeStream::returnBufferCheckedLocked(
const camera_stream_buffer &,
- nsecs_t,
+ nsecs_t, nsecs_t,
bool,
+ int32_t,
const std::vector<size_t>&,
/*out*/
sp<Fence>*) {
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index 914ccbf..df19c3d 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -108,7 +108,9 @@
virtual status_t returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut);
@@ -134,7 +136,8 @@
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferLocked(
const camera_stream_buffer &buffer,
- nsecs_t timestamp, const std::vector<size_t>& surface_ids);
+ nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
+ const std::vector<size_t>& surface_ids);
virtual status_t configureQueueLocked();
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 0204d49..f4b3197 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -82,7 +82,7 @@
camera_stream::width, camera_stream::height,
camera_stream::format, camera_stream::data_space);
lines.appendFormat(" Max size: %zu\n", mMaxSize);
- lines.appendFormat(" Combined usage: %" PRIu64 ", max HAL buffers: %d\n",
+ lines.appendFormat(" Combined usage: 0x%" PRIx64 ", max HAL buffers: %d\n",
mUsage | consumerUsage, camera_stream::max_buffers);
if (strlen(camera_stream::physical_camera_id) > 0) {
lines.appendFormat(" Physical camera id: %s\n", camera_stream::physical_camera_id);
@@ -224,7 +224,9 @@
status_t Camera3IOStreamBase::returnAnyBufferLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids) {
status_t res;
@@ -241,7 +243,8 @@
}
sp<Fence> releaseFence;
- res = returnBufferCheckedLocked(buffer, timestamp, output, surface_ids,
+ res = returnBufferCheckedLocked(buffer, timestamp, readoutTimestamp,
+ output, transform, surface_ids,
&releaseFence);
// Res may be an error, but we still want to decrement our owned count
// to enable clean shutdown. So we'll just return the error but otherwise
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 90c8a7b..fb73c97 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -67,13 +67,17 @@
status_t returnAnyBufferLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut) = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 6d8317b..9a3f7ed 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -105,12 +105,15 @@
status_t Camera3InputStream::returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t /*transform*/,
const std::vector<size_t>&,
/*out*/
sp<Fence> *releaseFenceOut) {
(void)timestamp;
+ (void)readoutTimestamp;
(void)output;
ALOG_ASSERT(!output, "Expected output to be false");
@@ -175,7 +178,8 @@
const camera_stream_buffer &buffer) {
ATRACE_CALL();
- return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false);
+ return returnAnyBufferLocked(buffer, /*timestamp*/0, /*readoutTimestamp*/0,
+ /*output*/false, /*transform*/ -1);
}
status_t Camera3InputStream::getInputBufferProducerLocked(
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 46221d1..5e0587b 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -61,7 +61,9 @@
virtual status_t returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut);
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
index a7e64ce..b702e20 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
@@ -261,7 +261,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords
+ mBufferRecords, /*legacyClient*/ false
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -301,7 +301,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords
+ mBufferRecords, /*legacyClient*/ false
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -336,7 +336,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords
+ mBufferRecords, /*legacyClient*/ false
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputInterface.h b/services/camera/libcameraservice/device3/Camera3OutputInterface.h
index 8817833..40eef1d 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputInterface.h
@@ -50,6 +50,10 @@
// return pair of (newlySeenBuffer?, bufferId)
virtual std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId) = 0;
+ // Return the removed buffer ID if input cache is found.
+ // Otherwise return BUFFER_ID_NO_BUFFER
+ virtual uint64_t removeOneBufferCache(int streamId, const native_handle_t* handle) = 0;
+
// Find a buffer_handle_t based on frame number and stream ID
virtual status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
/*out*/ buffer_handle_t **buffer) = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 03b77fc..0dfeac3 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -22,6 +22,7 @@
#include <fstream>
#include <android-base/unique_fd.h>
+#include <cutils/properties.h>
#include <ui/GraphicBuffer.h>
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -263,14 +264,16 @@
status_t Camera3OutputStream::returnBufferLocked(
const camera_stream_buffer &buffer,
- nsecs_t timestamp, const std::vector<size_t>& surface_ids) {
+ nsecs_t timestamp, nsecs_t readoutTimestamp,
+ int32_t transform, const std::vector<size_t>& surface_ids) {
ATRACE_HFR_CALL();
if (mHandoutTotalBufferCount == 1) {
returnPrefetchedBuffersLocked();
}
- status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true, surface_ids);
+ status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
+ /*output*/true, transform, surface_ids);
if (res != OK) {
return res;
@@ -285,7 +288,9 @@
status_t Camera3OutputStream::returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut) {
@@ -346,16 +351,6 @@
mTraceFirstBuffer = false;
}
- /* Certain consumers (such as AudioSource or HardwareComposer) use
- * MONOTONIC time, causing time misalignment if camera timestamp is
- * in BOOTTIME. Do the conversion if necessary. */
- res = native_window_set_buffers_timestamp(mConsumer.get(),
- mUseMonoTimestamp ? timestamp - mTimestampOffset : timestamp);
- if (res != OK) {
- ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- return res;
- }
// If this is a JPEG output, and image dump mask is set, save image to
// disk.
if (getFormat() == HAL_PIXEL_FORMAT_BLOB && getDataSpace() == HAL_DATASPACE_V0_JFIF &&
@@ -363,10 +358,32 @@
dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
}
- res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
- if (shouldLogError(res, state)) {
- ALOGE("%s: Stream %d: Error queueing buffer to native window:"
- " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ /* Certain consumers (such as AudioSource or HardwareComposer) use
+ * MONOTONIC time, causing time misalignment if camera timestamp is
+ * in BOOTTIME. Do the conversion if necessary. */
+ nsecs_t t = mPreviewFrameScheduler != nullptr ? readoutTimestamp : timestamp;
+ nsecs_t adjustedTs = mUseMonoTimestamp ? t - mTimestampOffset : t;
+ if (mPreviewFrameScheduler != nullptr) {
+ res = mPreviewFrameScheduler->queuePreviewBuffer(adjustedTs, transform,
+ anwBuffer, anwReleaseFence);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error queuing buffer to preview buffer scheduler: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ } else {
+ setTransform(transform);
+ res = native_window_set_buffers_timestamp(mConsumer.get(), adjustedTs);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
+ if (shouldLogError(res, state)) {
+ ALOGE("%s: Stream %d: Error queueing buffer to native window:"
+ " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ }
}
}
mLock.lock();
@@ -407,6 +424,9 @@
status_t Camera3OutputStream::setTransformLocked(int transform) {
status_t res = OK;
+
+ if (transform == -1) return res;
+
if (mState == STATE_ERROR) {
ALOGE("%s: Stream in error state", __FUNCTION__);
return INVALID_OPERATION;
@@ -432,7 +452,7 @@
return res;
}
- if ((res = configureConsumerQueueLocked()) != OK) {
+ if ((res = configureConsumerQueueLocked(true /*allowPreviewScheduler*/)) != OK) {
return res;
}
@@ -456,7 +476,7 @@
return OK;
}
-status_t Camera3OutputStream::configureConsumerQueueLocked() {
+status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewScheduler) {
status_t res;
mTraceFirstBuffer = true;
@@ -542,6 +562,15 @@
}
mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
+ if (allowPreviewScheduler && isConsumedByHWComposer()) {
+ // We cannot distinguish between a SurfaceView and an ImageReader of
+ // preview buffer format. The PreviewFrameScheduler needs to handle both.
+ if (!property_get_bool("camera.disable_preview_scheduler", false)) {
+ mPreviewFrameScheduler = std::make_unique<PreviewFrameScheduler>(*this, mConsumer);
+ mTotalBufferCount += PreviewFrameScheduler::kQueueDepthWatermark;
+ }
+ }
+
mHandoutTotalBufferCount = 0;
mFrameCount = 0;
mLastTimestamp = 0;
@@ -1180,6 +1209,11 @@
}
}
+bool Camera3OutputStream::shouldLogError(status_t res) {
+ Mutex::Autolock l(mLock);
+ return shouldLogError(res, mState);
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index ad03b53..a70b883 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -27,6 +27,7 @@
#include "Camera3IOStreamBase.h"
#include "Camera3OutputStreamInterface.h"
#include "Camera3BufferManager.h"
+#include "PreviewFrameScheduler.h"
namespace android {
@@ -229,6 +230,7 @@
static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+ bool shouldLogError(status_t res);
protected:
Camera3OutputStream(int id, camera_stream_type_t type,
@@ -245,7 +247,9 @@
virtual status_t returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
+ nsecs_t readoutTimestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut);
@@ -254,7 +258,7 @@
status_t getEndpointUsageForSurface(uint64_t *usage,
const sp<Surface>& surface) const;
- status_t configureConsumerQueueLocked();
+ status_t configureConsumerQueueLocked(bool allowPreviewScheduler);
// Consumer as the output of camera HAL
sp<Surface> mConsumer;
@@ -332,7 +336,8 @@
virtual status_t returnBufferLocked(
const camera_stream_buffer &buffer,
- nsecs_t timestamp, const std::vector<size_t>& surface_ids);
+ nsecs_t timestamp, nsecs_t readoutTimestamp,
+ int32_t transform, const std::vector<size_t>& surface_ids);
virtual status_t queueBufferToConsumer(sp<ANativeWindow>& consumer,
ANativeWindowBuffer* buffer, int anwReleaseFence,
@@ -369,6 +374,8 @@
int mImageDumpMask = 0;
+ // The preview stream scheduler for re-timing frames
+ std::unique_ptr<PreviewFrameScheduler> mPreviewFrameScheduler;
}; // class Camera3OutputStream
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 9f225d0..5e4f38a 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -42,10 +42,13 @@
#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
#include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
+#include <camera/CameraUtils.h>
#include <camera_metadata_hidden.h>
#include "device3/Camera3OutputUtils.h"
+#include "system/camera_metadata.h"
+
using namespace android::camera3;
using namespace android::hardware::camera;
@@ -460,11 +463,11 @@
returnOutputBuffers(
states.useHalBufManager, states.listener,
request.pendingOutputBuffers.array(),
- request.pendingOutputBuffers.size(), 0,
+ request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
/*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
/*timestampIncreasing*/true,
request.outputSurfaces, request.resultExtras,
- request.errorBufStrategy);
+ request.errorBufStrategy, request.transform);
// Note down the just completed frame number
if (request.hasInputBuffer) {
@@ -555,6 +558,31 @@
if (result->partial_result != 0)
request.resultExtras.partialResultCount = result->partial_result;
+ if ((result->result != nullptr) && !states.legacyClient) {
+ camera_metadata_ro_entry entry;
+ auto ret = find_camera_metadata_ro_entry(result->result,
+ ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID, &entry);
+ if ((ret == OK) && (entry.count > 0)) {
+ std::string physicalId(reinterpret_cast<const char *>(entry.data.u8));
+ auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
+ if (deviceInfo != states.physicalDeviceInfoMap.end()) {
+ auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
+ if (orientation.count > 0) {
+ ret = CameraUtils::getRotationTransform(deviceInfo->second,
+ &request.transform);
+ if (ret != OK) {
+ ALOGE("%s: Failed to calculate current stream transformation: %s (%d)",
+ __FUNCTION__, strerror(-ret), ret);
+ }
+ } else {
+ ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
+ }
+ } else {
+ ALOGE("%s: Physical device not found in device info map found!", __FUNCTION__);
+ }
+ }
+ }
+
// Check if this result carries only partial metadata
if (states.usePartialResult && result->result != NULL) {
if (result->partial_result > states.numPartialResults || result->partial_result < 1) {
@@ -842,11 +870,11 @@
bool useHalBufManager,
sp<NotificationListener> listener,
const camera_stream_buffer_t *outputBuffers, size_t numBuffers,
- nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
- SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing,
- const SurfaceMap& outputSurfaces,
+ nsecs_t timestamp, nsecs_t readoutTimestamp, bool requested,
+ nsecs_t requestTimeNs, SessionStatsBuilder& sessionStatsBuilder,
+ bool timestampIncreasing, const SurfaceMap& outputSurfaces,
const CaptureResultExtras &inResultExtras,
- ERROR_BUF_STRATEGY errorBufStrategy) {
+ ERROR_BUF_STRATEGY errorBufStrategy, int32_t transform) {
for (size_t i = 0; i < numBuffers; i++)
{
@@ -888,12 +916,12 @@
errorBufStrategy != ERROR_BUF_CACHE) {
if (it != outputSurfaces.end()) {
res = stream->returnBuffer(
- outputBuffers[i], timestamp, timestampIncreasing, it->second,
- inResultExtras.frameNumber);
+ outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
+ it->second, inResultExtras.frameNumber, transform);
} else {
res = stream->returnBuffer(
- outputBuffers[i], timestamp, timestampIncreasing, std::vector<size_t> (),
- inResultExtras.frameNumber);
+ outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
+ std::vector<size_t> (), inResultExtras.frameNumber, transform);
}
}
// Note: stream may be deallocated at this point, if this buffer was
@@ -923,9 +951,9 @@
// cancel the buffer
camera_stream_buffer_t sb = outputBuffers[i];
sb.status = CAMERA_BUFFER_STATUS_ERROR;
- stream->returnBuffer(sb, /*timestamp*/0,
+ stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
timestampIncreasing, std::vector<size_t> (),
- inResultExtras.frameNumber);
+ inResultExtras.frameNumber, transform);
if (listener != nullptr) {
CaptureResultExtras extras = inResultExtras;
@@ -945,10 +973,10 @@
returnOutputBuffers(useHalBufManager, listener,
request.pendingOutputBuffers.array(),
request.pendingOutputBuffers.size(),
- request.shutterTimestamp, /*requested*/true,
- request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
+ request.shutterTimestamp, request.shutterReadoutTimestamp,
+ /*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
request.outputSurfaces, request.resultExtras,
- request.errorBufStrategy);
+ request.errorBufStrategy, request.transform);
// Remove error buffers that are not cached.
for (auto iter = request.pendingOutputBuffers.begin();
@@ -1007,6 +1035,7 @@
}
r.shutterTimestamp = msg.timestamp;
+ r.shutterReadoutTimestamp = msg.readout_timestamp;
if (r.hasCallback) {
ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
states.cameraId.string(), __FUNCTION__,
@@ -1165,7 +1194,30 @@
}
void notify(CaptureOutputStates& states,
- const hardware::camera::device::V3_2::NotifyMsg& msg) {
+ const hardware::camera::device::V3_8::NotifyMsg& msg) {
+ using android::hardware::camera::device::V3_2::MsgType;
+
+ hardware::camera::device::V3_2::NotifyMsg msg_3_2;
+ msg_3_2.type = msg.type;
+ bool hasReadoutTime = false;
+ uint64_t readoutTime = 0;
+ switch (msg.type) {
+ case MsgType::ERROR:
+ msg_3_2.msg.error = msg.msg.error;
+ break;
+ case MsgType::SHUTTER:
+ msg_3_2.msg.shutter = msg.msg.shutter.v3_2;
+ hasReadoutTime = true;
+ readoutTime = msg.msg.shutter.readoutTimestamp;
+ break;
+ }
+ notify(states, msg_3_2, hasReadoutTime, readoutTime);
+}
+
+void notify(CaptureOutputStates& states,
+ const hardware::camera::device::V3_2::NotifyMsg& msg,
+ bool hasReadoutTime, uint64_t readoutTime) {
+
using android::hardware::camera::device::V3_2::MsgType;
using android::hardware::camera::device::V3_2::ErrorCode;
@@ -1206,11 +1258,21 @@
m.type = CAMERA_MSG_SHUTTER;
m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+ m.message.shutter.readout_timestamp = hasReadoutTime ?
+ readoutTime : m.message.shutter.timestamp;
break;
}
notify(states, &m);
}
+// The buffers requested through this call are not tied to any CaptureRequest in
+// particular. They may used by the hal for a particular frame's output buffer
+// or for its internal use as well. In the case that the hal does use any buffer
+// from the requested list here, for a particular frame's output buffer, the
+// buffer will be returned with the processCaptureResult call corresponding to
+// the frame. The other buffers will be returned through returnStreamBuffers.
+// The buffers returned via returnStreamBuffers will not have a valid
+// timestamp(0) and will be dropped by the bufferqueue.
void requestStreamBuffers(RequestBufferStates& states,
const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb _hidl_cb) {
@@ -1305,6 +1367,7 @@
hardware::hidl_vec<StreamBuffer> tmpRetBuffers(numBuffersRequested);
bool currentReqSucceeds = true;
std::vector<camera_stream_buffer_t> streamBuffers(numBuffersRequested);
+ std::vector<buffer_handle_t> newBuffers;
size_t numAllocatedBuffers = 0;
size_t numPushedInflightBuffers = 0;
for (size_t b = 0; b < numBuffersRequested; b++) {
@@ -1344,6 +1407,9 @@
hBuf.buffer = (isNewBuffer) ? *buffer : nullptr;
hBuf.status = BufferStatus::OK;
hBuf.releaseFence = nullptr;
+ if (isNewBuffer) {
+ newBuffers.push_back(*buffer);
+ }
native_handle_t *acquireFence = nullptr;
if (sb.acquire_fence != -1) {
@@ -1384,8 +1450,12 @@
sb.status = CAMERA_BUFFER_STATUS_ERROR;
}
returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
- streamBuffers.data(), numAllocatedBuffers, 0, /*requested*/false,
+ streamBuffers.data(), numAllocatedBuffers, /*timestamp*/0,
+ /*readoutTimestamp*/0, /*requested*/false,
/*requestTimeNs*/0, states.sessionStatsBuilder);
+ for (auto buf : newBuffers) {
+ states.bufferRecordsIntf.removeOneBufferCache(streamId, buf);
+ }
}
}
@@ -1442,8 +1512,8 @@
}
streamBuffer.stream = stream->asHalStream();
returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
- &streamBuffer, /*size*/1, /*timestamp*/ 0, /*requested*/false,
- /*requestTimeNs*/0, states.sessionStatsBuilder);
+ &streamBuffer, /*size*/1, /*timestamp*/ 0, /*readoutTimestamp*/0,
+ /*requested*/false, /*requestTimeNs*/0, states.sessionStatsBuilder);
}
}
@@ -1456,9 +1526,10 @@
returnOutputBuffers(
states.useHalBufManager, states.listener,
request.pendingOutputBuffers.array(),
- request.pendingOutputBuffers.size(), 0, /*requested*/true,
- request.requestTimeNs, states.sessionStatsBuilder, /*timestampIncreasing*/true,
- request.outputSurfaces, request.resultExtras, request.errorBufStrategy);
+ request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
+ /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+ /*timestampIncreasing*/true, request.outputSurfaces, request.resultExtras,
+ request.errorBufStrategy);
ALOGW("%s: Frame %d | Timestamp: %" PRId64 ", metadata"
" arrived: %s, buffers left: %d.\n", __FUNCTION__,
states.inflightMap.keyAt(idx), request.shutterTimestamp,
@@ -1530,7 +1601,7 @@
switch (halStream->stream_type) {
case CAMERA_STREAM_OUTPUT:
res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0,
- /*timestampIncreasing*/true,
+ /*readoutTimestamp*/0, /*timestampIncreasing*/true,
std::vector<size_t> (), frameNumber);
if (res != OK) {
ALOGE("%s: Can't return output buffer for frame %d to"
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 142889a..51899ee 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -26,6 +26,8 @@
#include <common/CameraDeviceBase.h>
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
+
#include "device3/BufferUtils.h"
#include "device3/DistortionMapper.h"
#include "device3/ZoomRatioMapper.h"
@@ -42,66 +44,6 @@
namespace camera3 {
- typedef struct camera_stream_configuration {
- uint32_t num_streams;
- camera_stream_t **streams;
- uint32_t operation_mode;
- bool input_is_multi_resolution;
- } camera_stream_configuration_t;
-
- typedef struct camera_capture_request {
- uint32_t frame_number;
- const camera_metadata_t *settings;
- camera_stream_buffer_t *input_buffer;
- uint32_t num_output_buffers;
- const camera_stream_buffer_t *output_buffers;
- uint32_t num_physcam_settings;
- const char **physcam_id;
- const camera_metadata_t **physcam_settings;
- int32_t input_width;
- int32_t input_height;
- } camera_capture_request_t;
-
- typedef struct camera_capture_result {
- uint32_t frame_number;
- const camera_metadata_t *result;
- uint32_t num_output_buffers;
- const camera_stream_buffer_t *output_buffers;
- const camera_stream_buffer_t *input_buffer;
- uint32_t partial_result;
- uint32_t num_physcam_metadata;
- const char **physcam_ids;
- const camera_metadata_t **physcam_metadata;
- } camera_capture_result_t;
-
- typedef struct camera_shutter_msg {
- uint32_t frame_number;
- uint64_t timestamp;
- } camera_shutter_msg_t;
-
- typedef struct camera_error_msg {
- uint32_t frame_number;
- camera_stream_t *error_stream;
- int error_code;
- } camera_error_msg_t;
-
- typedef enum camera_error_msg_code {
- CAMERA_MSG_ERROR_DEVICE = 1,
- CAMERA_MSG_ERROR_REQUEST = 2,
- CAMERA_MSG_ERROR_RESULT = 3,
- CAMERA_MSG_ERROR_BUFFER = 4,
- CAMERA_MSG_NUM_ERRORS
- } camera_error_msg_code_t;
-
- typedef struct camera_notify_msg {
- int type;
-
- union {
- camera_error_msg_t error;
- camera_shutter_msg_t shutter;
- } message;
- } camera_notify_msg_t;
-
/**
* Helper methods shared between Camera3Device/Camera3OfflineSession for HAL callbacks
*/
@@ -112,13 +54,15 @@
bool useHalBufManager,
sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
const camera_stream_buffer_t *outputBuffers,
- size_t numBuffers, nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
+ size_t numBuffers, nsecs_t timestamp,
+ nsecs_t readoutTimestamp, bool requested, nsecs_t requestTimeNs,
SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing = true,
// The following arguments are only meant for surface sharing use case
const SurfaceMap& outputSurfaces = SurfaceMap{},
// Used to send buffer error callback when failing to return buffer
const CaptureResultExtras &resultExtras = CaptureResultExtras{},
- ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN);
+ ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN,
+ int32_t transform = -1);
// helper function to return the output buffers to output streams, and
// remove the returned buffers from the inflight request's pending buffers
@@ -165,6 +109,7 @@
SetErrorInterface& setErrIntf;
InflightRequestUpdateInterface& inflightIntf;
BufferRecordsInterface& bufferRecordsIntf;
+ bool legacyClient;
};
// Handle one capture result. Assume callers hold the lock to serialize all
@@ -177,7 +122,10 @@
// Handle one notify message
void notify(CaptureOutputStates& states,
- const hardware::camera::device::V3_2::NotifyMsg& msg);
+ const hardware::camera::device::V3_2::NotifyMsg& msg,
+ bool hasReadoutTime = false, uint64_t readoutTime = 0LL);
+ void notify(CaptureOutputStates& states,
+ const hardware::camera::device::V3_8::NotifyMsg& msg);
struct RequestBufferStates {
const String8& cameraId;
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 15cf7f4..9e0c8f3 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -247,7 +247,7 @@
return res;
}
- res = configureConsumerQueueLocked();
+ res = configureConsumerQueueLocked(false/*allowPreviewScheduler*/);
if (res != OK) {
ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
return res;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 02b6585..1405fa1 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -557,7 +557,8 @@
for (size_t i = 0; i < mPreparedBufferIdx; i++) {
mPreparedBuffers.editItemAt(i).release_fence = -1;
mPreparedBuffers.editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
- returnBufferLocked(mPreparedBuffers[i], 0);
+ returnBufferLocked(mPreparedBuffers[i], /*timestamp*/0, /*readoutTimestamp*/0,
+ /*transform*/ -1);
}
mPreparedBuffers.clear();
mPreparedBufferIdx = 0;
@@ -713,8 +714,8 @@
}
status_t Camera3Stream::returnBuffer(const camera_stream_buffer &buffer,
- nsecs_t timestamp, bool timestampIncreasing,
- const std::vector<size_t>& surface_ids, uint64_t frameNumber) {
+ nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
+ const std::vector<size_t>& surface_ids, uint64_t frameNumber, int32_t transform) {
ATRACE_HFR_CALL();
Mutex::Autolock l(mLock);
@@ -743,7 +744,7 @@
*
* Do this for getBuffer as well.
*/
- status_t res = returnBufferLocked(b, timestamp, surface_ids);
+ status_t res = returnBufferLocked(b, timestamp, readoutTimestamp, transform, surface_ids);
if (res == OK) {
fireBufferListenersLocked(b, /*acquired*/false, /*output*/true, timestamp, frameNumber);
}
@@ -931,7 +932,7 @@
}
status_t Camera3Stream::returnBufferLocked(const camera_stream_buffer &,
- nsecs_t, const std::vector<size_t>&) {
+ nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
ALOGE("%s: This type of stream does not support output", __FUNCTION__);
return INVALID_OPERATION;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 5a364ab..17041de 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -352,9 +352,9 @@
* For bidirectional streams, this method applies to the output-side buffers
*/
status_t returnBuffer(const camera_stream_buffer &buffer,
- nsecs_t timestamp, bool timestampIncreasing,
+ nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
const std::vector<size_t>& surface_ids = std::vector<size_t>(),
- uint64_t frameNumber = 0);
+ uint64_t frameNumber = 0, int32_t transform = -1);
/**
* Fill in the camera_stream_buffer with the next valid buffer for this
@@ -517,7 +517,7 @@
virtual status_t getBufferLocked(camera_stream_buffer *buffer,
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferLocked(const camera_stream_buffer &buffer,
- nsecs_t timestamp,
+ nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t getBuffersLocked(std::vector<OutstandingBuffer>*);
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 2d3397c..5f20f17 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -357,9 +357,9 @@
* For bidirectional streams, this method applies to the output-side buffers
*/
virtual status_t returnBuffer(const camera_stream_buffer &buffer,
- nsecs_t timestamp, bool timestampIncreasing = true,
+ nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing = true,
const std::vector<size_t>& surface_ids = std::vector<size_t>(),
- uint64_t frameNumber = 0) = 0;
+ uint64_t frameNumber = 0, int32_t transform = -1) = 0;
/**
* Fill in the camera_stream_buffer with the next valid buffer for this
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 523a2c7..0c97f3e 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -30,6 +30,67 @@
namespace camera3 {
+typedef struct camera_stream_configuration {
+ uint32_t num_streams;
+ camera_stream_t **streams;
+ uint32_t operation_mode;
+ bool input_is_multi_resolution;
+} camera_stream_configuration_t;
+
+typedef struct camera_capture_request {
+ uint32_t frame_number;
+ const camera_metadata_t *settings;
+ camera_stream_buffer_t *input_buffer;
+ uint32_t num_output_buffers;
+ const camera_stream_buffer_t *output_buffers;
+ uint32_t num_physcam_settings;
+ const char **physcam_id;
+ const camera_metadata_t **physcam_settings;
+ int32_t input_width;
+ int32_t input_height;
+} camera_capture_request_t;
+
+typedef struct camera_capture_result {
+ uint32_t frame_number;
+ const camera_metadata_t *result;
+ uint32_t num_output_buffers;
+ const camera_stream_buffer_t *output_buffers;
+ const camera_stream_buffer_t *input_buffer;
+ uint32_t partial_result;
+ uint32_t num_physcam_metadata;
+ const char **physcam_ids;
+ const camera_metadata_t **physcam_metadata;
+} camera_capture_result_t;
+
+typedef struct camera_shutter_msg {
+ uint32_t frame_number;
+ uint64_t timestamp;
+ uint64_t readout_timestamp;
+} camera_shutter_msg_t;
+
+typedef struct camera_error_msg {
+ uint32_t frame_number;
+ camera_stream_t *error_stream;
+ int error_code;
+} camera_error_msg_t;
+
+typedef enum camera_error_msg_code {
+ CAMERA_MSG_ERROR_DEVICE = 1,
+ CAMERA_MSG_ERROR_REQUEST = 2,
+ CAMERA_MSG_ERROR_RESULT = 3,
+ CAMERA_MSG_ERROR_BUFFER = 4,
+ CAMERA_MSG_NUM_ERRORS
+} camera_error_msg_code_t;
+
+typedef struct camera_notify_msg {
+ int type;
+
+ union {
+ camera_error_msg_t error;
+ camera_shutter_msg_t shutter;
+ } message;
+} camera_notify_msg_t;
+
typedef enum {
// Cache the buffers with STATUS_ERROR within InFlightRequest
ERROR_BUF_CACHE,
@@ -41,9 +102,10 @@
} ERROR_BUF_STRATEGY;
struct InFlightRequest {
-
// Set by notify() SHUTTER call.
nsecs_t shutterTimestamp;
+ // Set by notify() SHUTTER call with readout time.
+ nsecs_t shutterReadoutTimestamp;
// Set by process_capture_result().
nsecs_t sensorTimestamp;
int requestStatus;
@@ -122,6 +184,9 @@
// What shared surfaces an output should go to
SurfaceMap outputSurfaces;
+ // Current output transformation
+ int32_t transform;
+
// TODO: dedupe
static const nsecs_t kDefaultExpectedDuration = 100000000; // 100 ms
@@ -140,7 +205,8 @@
stillCapture(false),
zslCapture(false),
rotateAndCropAuto(false),
- requestTimeNs(0) {
+ requestTimeNs(0),
+ transform(-1) {
}
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
@@ -165,7 +231,8 @@
rotateAndCropAuto(rotateAndCropAuto),
cameraIdsWithZoom(idsWithZoom),
requestTimeNs(requestNs),
- outputSurfaces(outSurfaces) {
+ outputSurfaces(outSurfaces),
+ transform(-1) {
}
};
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
new file mode 100644
index 0000000..1fbdb18
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-PreviewFrameScheduler"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <android/looper.h>
+#include "PreviewFrameScheduler.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Internal Choreographer thread implementation for polling and handling callbacks
+ */
+
+// Callback function for Choreographer
+static void frameCallback(const AChoreographerFrameCallbackData* callbackData, void* data) {
+ PreviewFrameScheduler* parent = static_cast<PreviewFrameScheduler*>(data);
+ if (parent == nullptr) {
+ ALOGE("%s: Invalid data for Choreographer callback!", __FUNCTION__);
+ return;
+ }
+
+ size_t length = AChoreographerFrameCallbackData_getFrameTimelinesLength(callbackData);
+ std::vector<nsecs_t> timeline(length);
+ for (size_t i = 0; i < length; i++) {
+ nsecs_t timestamp = AChoreographerFrameCallbackData_getFrameTimelineExpectedPresentTime(
+ callbackData, i);
+ timeline[i] = timestamp;
+ }
+
+ parent->onNewPresentationTime(timeline);
+
+ AChoreographer_postExtendedFrameCallback(AChoreographer_getInstance(), frameCallback, data);
+}
+
+struct ChoreographerThread : public Thread {
+ ChoreographerThread();
+ status_t start(PreviewFrameScheduler* parent);
+ virtual status_t readyToRun() override;
+ virtual bool threadLoop() override;
+
+protected:
+ virtual ~ChoreographerThread() {}
+
+private:
+ ChoreographerThread &operator=(const ChoreographerThread &);
+
+ // This only impacts the shutdown time. It won't impact the choreographer
+ // callback frequency.
+ static constexpr nsecs_t kPollingTimeoutMs = 5;
+ PreviewFrameScheduler* mParent = nullptr;
+};
+
+ChoreographerThread::ChoreographerThread() : Thread(false /*canCallJava*/) {
+}
+
+status_t ChoreographerThread::start(PreviewFrameScheduler* parent) {
+ mParent = parent;
+ return run("PreviewChoreographer");
+}
+
+status_t ChoreographerThread::readyToRun() {
+ ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+ if (AChoreographer_getInstance() == NULL) {
+ return NO_INIT;
+ }
+
+ AChoreographer_postExtendedFrameCallback(
+ AChoreographer_getInstance(), frameCallback, mParent);
+ return OK;
+}
+
+bool ChoreographerThread::threadLoop() {
+ if (exitPending()) {
+ return false;
+ }
+ ALooper_pollOnce(kPollingTimeoutMs, nullptr, nullptr, nullptr);
+ return true;
+}
+
+/**
+ * PreviewFrameScheduler implementation
+ */
+
+PreviewFrameScheduler::PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer) :
+ mParent(parent),
+ mConsumer(consumer),
+ mChoreographerThread(new ChoreographerThread()) {
+}
+
+PreviewFrameScheduler::~PreviewFrameScheduler() {
+ {
+ Mutex::Autolock l(mLock);
+ mChoreographerThread->requestExit();
+ }
+ mChoreographerThread->join();
+}
+
+status_t PreviewFrameScheduler::queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+ ANativeWindowBuffer* anwBuffer, int releaseFence) {
+ // Start choreographer thread if it's not already running.
+ if (!mChoreographerThread->isRunning()) {
+ status_t res = mChoreographerThread->start(this);
+ if (res != OK) {
+ ALOGE("%s: Failed to init choreographer thread!", __FUNCTION__);
+ return res;
+ }
+ }
+
+ {
+ Mutex::Autolock l(mLock);
+ mPendingBuffers.emplace(timestamp, transform, anwBuffer, releaseFence);
+
+ // Queue buffer to client right away if pending buffers are more than
+ // the queue depth watermark.
+ if (mPendingBuffers.size() > kQueueDepthWatermark) {
+ auto oldBuffer = mPendingBuffers.front();
+ mPendingBuffers.pop();
+
+ status_t res = queueBufferToClientLocked(oldBuffer, oldBuffer.timestamp);
+ if (res != OK) {
+ return res;
+ }
+
+ // Reset the last capture and presentation time
+ mLastCameraCaptureTime = 0;
+ mLastCameraPresentTime = 0;
+ } else {
+ ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+ }
+ }
+ return OK;
+}
+
+void PreviewFrameScheduler::onNewPresentationTime(const std::vector<nsecs_t>& timeline) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ if (mPendingBuffers.size() > 0) {
+ auto nextBuffer = mPendingBuffers.front();
+ mPendingBuffers.pop();
+
+ // Find the best presentation time by finding the element in the
+ // choreographer timeline that's closest to the ideal presentation time.
+ // The ideal presentation time is the last presentation time + frame
+ // interval.
+ nsecs_t cameraInterval = nextBuffer.timestamp - mLastCameraCaptureTime;
+ nsecs_t idealPresentTime = (cameraInterval < kSpacingResetIntervalNs) ?
+ (mLastCameraPresentTime + cameraInterval) : nextBuffer.timestamp;
+ nsecs_t presentTime = *std::min_element(timeline.begin(), timeline.end(),
+ [idealPresentTime](nsecs_t p1, nsecs_t p2) {
+ return std::abs(p1 - idealPresentTime) < std::abs(p2 - idealPresentTime);
+ });
+
+ status_t res = queueBufferToClientLocked(nextBuffer, presentTime);
+ ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+
+ if (mParent.shouldLogError(res)) {
+ ALOGE("%s: Preview Stream: Error queueing buffer to native window:"
+ " %s (%d)", __FUNCTION__, strerror(-res), res);
+ }
+
+ mLastCameraCaptureTime = nextBuffer.timestamp;
+ mLastCameraPresentTime = presentTime;
+ }
+}
+
+status_t PreviewFrameScheduler::queueBufferToClientLocked(
+ const BufferHolder& bufferHolder, nsecs_t timestamp) {
+ mParent.setTransform(bufferHolder.transform);
+
+ status_t res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+ if (res != OK) {
+ ALOGE("%s: Preview Stream: Error setting timestamp: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ res = mConsumer->queueBuffer(mConsumer.get(), bufferHolder.anwBuffer.get(),
+ bufferHolder.releaseFence);
+ if (res != OK) {
+ close(bufferHolder.releaseFence);
+ }
+
+ return res;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.h b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
new file mode 100644
index 0000000..c0574fd
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+
+#include <queue>
+
+#include <android/choreographer.h>
+#include <gui/Surface.h>
+#include <gui/ISurfaceComposer.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/Looper.h>
+#include <utils/Thread.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3OutputStream;
+struct ChoreographerThread;
+
+/***
+ * Preview stream scheduler for better preview display synchronization
+ *
+ * The ideal viewfinder user experience is that frames are presented to the
+ * user in the same cadence as outputed by the camera sensor. However, the
+ * processing latency between frames could vary, due to factors such
+ * as CPU load, differences in request settings, etc. This frame processing
+ * latency results in variation in presentation of frames to the user.
+ *
+ * The PreviewFrameScheduler improves the viewfinder user experience by:
+ * 1. Cache preview buffers in the scheduler
+ * 2. For each choreographer callback, queue the oldest cached buffer with
+ * the best matching presentation timestamp. Frame N's presentation timestamp
+ * is the choreographer timeline timestamp closest to (Frame N-1's
+ * presentation time + camera capture interval between frame N-1 and frame N).
+ * 3. Maintain at most 2 queue-able buffers. If the 3rd preview buffer becomes
+ * available, queue the oldest cached buffer to the buffer queue.
+ */
+class PreviewFrameScheduler {
+ public:
+ explicit PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer);
+ virtual ~PreviewFrameScheduler();
+
+ // Queue preview buffer locally
+ status_t queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+ ANativeWindowBuffer* anwBuffer, int releaseFence);
+
+ // Callback function with a new presentation timeline from choreographer. This
+ // will trigger a locally queued buffer be sent to the buffer queue.
+ void onNewPresentationTime(const std::vector<nsecs_t>& presentationTimeline);
+
+ // Maintain at most 2 queue-able buffers
+ static constexpr int32_t kQueueDepthWatermark = 2;
+
+ private:
+ // structure holding cached preview buffer info
+ struct BufferHolder {
+ nsecs_t timestamp;
+ int32_t transform;
+ sp<ANativeWindowBuffer> anwBuffer;
+ int releaseFence;
+
+ BufferHolder(nsecs_t t, int32_t tr, ANativeWindowBuffer* anwb, int rf) :
+ timestamp(t), transform(tr), anwBuffer(anwb), releaseFence(rf) {}
+ };
+
+ status_t queueBufferToClientLocked(const BufferHolder& bufferHolder,
+ nsecs_t presentTime);
+
+ static constexpr char kPendingBufferTraceName[] = "pending_preview_buffers";
+
+ // Camera capture interval for resetting frame spacing between preview sessions
+ static constexpr nsecs_t kSpacingResetIntervalNs = 1000000000L; // 1 second
+
+ Camera3OutputStream& mParent;
+ sp<ANativeWindow> mConsumer;
+ mutable Mutex mLock;
+
+ std::queue<BufferHolder> mPendingBuffers;
+ nsecs_t mLastCameraCaptureTime = 0;
+ nsecs_t mLastCameraPresentTime = 0;
+
+ // Choreographer related
+ sp<Looper> mLooper;
+ sp<ChoreographerThread> mChoreographerThread;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 7d1b3cf..a812587 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -279,6 +279,9 @@
size_t numSections = sectionNames->size();
std::vector<std::vector<HVendorTag>> tagsBySection(numSections);
int tagCount = desc->getTagCount();
+ if (tagCount <= 0) {
+ continue;
+ }
std::vector<uint32_t> tags(tagCount);
desc->getTagArray(tags.data());
for (int i = 0; i < tagCount; i++) {
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
index 3d74f0b..4f080fe 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
@@ -65,5 +65,10 @@
"android-media-fuzzing-reports@google.com",
],
componentid: 155276,
+ libfuzzer_options: [
+ //based on b/187360866
+ "timeout=770",
+ ],
+
},
}
diff --git a/services/camera/libcameraservice/tests/Android.bp b/services/camera/libcameraservice/tests/Android.bp
new file mode 100644
index 0000000..5b8264c
--- /dev/null
+++ b/services/camera/libcameraservice/tests/Android.bp
@@ -0,0 +1,77 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: [
+ "frameworks_av_services_camera_libcameraservice_license",
+ ],
+}
+
+cc_test {
+ name: "cameraservice_test",
+
+ include_dirs: [
+ "system/media/private/camera/include",
+ "external/dynamic_depth/includes",
+ "external/dynamic_depth/internal",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libcameraservice",
+ "libhidlbase",
+ "liblog",
+ "libcamera_client",
+ "libcamera_metadata",
+ "libui",
+ "libutils",
+ "libjpeg",
+ "libexif",
+ "android.hardware.camera.common@1.0",
+ "android.hardware.camera.provider@2.4",
+ "android.hardware.camera.provider@2.5",
+ "android.hardware.camera.provider@2.6",
+ "android.hardware.camera.provider@2.7",
+ "android.hardware.camera.device@1.0",
+ "android.hardware.camera.device@3.2",
+ "android.hardware.camera.device@3.4",
+ "android.hardware.camera.device@3.7",
+ "android.hidl.token@1.0-utils",
+ ],
+
+ static_libs: [
+ "libgmock",
+ ],
+
+ srcs: [
+ "CameraProviderManagerTest.cpp",
+ "ClientManagerTest.cpp",
+ "DepthProcessorTest.cpp",
+ "DistortionMapperTest.cpp",
+ "ExifUtilsTest.cpp",
+ "NV12Compressor.cpp",
+ "RotateAndCropMapperTest.cpp",
+ "ZoomRatioTest.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+
+ test_suites: ["device-tests"],
+
+}
\ No newline at end of file
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
deleted file mode 100644
index 0b5ad79..0000000
--- a/services/camera/libcameraservice/tests/Android.mk
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= $(call all-cpp-files-under, .)
-
-LOCAL_SHARED_LIBRARIES := \
- libbase \
- libcutils \
- libcameraservice \
- libhidlbase \
- liblog \
- libcamera_client \
- libcamera_metadata \
- libui \
- libutils \
- libjpeg \
- libexif \
- android.hardware.camera.common@1.0 \
- android.hardware.camera.provider@2.4 \
- android.hardware.camera.provider@2.5 \
- android.hardware.camera.provider@2.6 \
- android.hardware.camera.provider@2.7 \
- android.hardware.camera.device@1.0 \
- android.hardware.camera.device@3.2 \
- android.hardware.camera.device@3.4 \
- android.hardware.camera.device@3.7 \
- android.hidl.token@1.0-utils
-
-LOCAL_STATIC_LIBRARIES := \
- libgmock
-
-LOCAL_C_INCLUDES += \
- system/media/private/camera/include \
- external/dynamic_depth/includes \
- external/dynamic_depth/internal \
-
-LOCAL_CFLAGS += -Wall -Wextra -Werror
-
-LOCAL_SANITIZE := address
-
-LOCAL_MODULE:= cameraservice_test
-LOCAL_LICENSE_KINDS:= SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS:= notice
-LOCAL_NOTICE_FILE:= $(LOCAL_PATH)/../NOTICE
-LOCAL_COMPATIBILITY_SUITE := device-tests
-LOCAL_MODULE_TAGS := tests
-
-include $(BUILD_NATIVE_TEST)
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index a74fd9d..c8a6b32 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -274,6 +274,8 @@
hardware::camera::common::V1_0::CameraDeviceStatus) override {}
void onTorchStatusChanged(const String8 &,
hardware::camera::common::V1_0::TorchModeStatus) override {}
+ void onTorchStatusChanged(const String8 &,
+ hardware::camera::common::V1_0::TorchModeStatus, SystemCameraKind) override {}
void onNewProviderRegistered() override {}
};
diff --git a/services/camera/libcameraservice/tests/ExifUtilsTest.cpp b/services/camera/libcameraservice/tests/ExifUtilsTest.cpp
new file mode 100644
index 0000000..3de4bf2
--- /dev/null
+++ b/services/camera/libcameraservice/tests/ExifUtilsTest.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ExifUtilsTest"
+
+#include <camera/CameraMetadata.h>
+#include "../utils/ExifUtils.h"
+#include <gtest/gtest.h>
+
+using android::camera3::ExifUtils;
+using android::camera3::ExifOrientation;
+using android::CameraMetadata;
+
+uint32_t kImageWidth = 1920;
+uint32_t kImageHeight = 1440;
+ExifOrientation kExifOrientation = ExifOrientation::ORIENTATION_0_DEGREES;
+
+// Test that setFromMetadata works correctly, without errors.
+TEST(ExifUtilsTest, SetFromMetadataTest) {
+ std::unique_ptr<ExifUtils> utils(ExifUtils::create());
+ uint8_t invalidSensorPixelMode = 2;
+ uint8_t validSensorPixelMode = ANDROID_SENSOR_PIXEL_MODE_DEFAULT;
+ CameraMetadata metadata;
+ // Empty staticInfo
+ CameraMetadata staticInfo;
+ ASSERT_TRUE(utils->initializeEmpty());
+ ASSERT_TRUE(
+ metadata.update(ANDROID_SENSOR_PIXEL_MODE, &invalidSensorPixelMode, 1) == android::OK);
+ ASSERT_FALSE(utils->setFromMetadata(metadata, staticInfo, kImageWidth, kImageHeight));
+ ASSERT_TRUE(
+ metadata.update(ANDROID_SENSOR_PIXEL_MODE, &validSensorPixelMode, 1) == android::OK);
+ ASSERT_TRUE(utils->setFromMetadata(metadata, staticInfo, kImageWidth, kImageHeight));
+ ASSERT_TRUE(utils->setImageWidth(kImageWidth));
+ ASSERT_TRUE(utils->setImageHeight(kImageHeight));
+ ASSERT_TRUE(utils->setOrientationValue(kExifOrientation));
+ ASSERT_TRUE(utils->generateApp1());
+ const uint8_t* exifBuffer = utils->getApp1Buffer();
+ ASSERT_NE(exifBuffer, nullptr);
+ size_t exifBufferSize = utils->getApp1Length();
+ ASSERT_TRUE(exifBufferSize != 0);
+}
diff --git a/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
new file mode 100644
index 0000000..025521a
--- /dev/null
+++ b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PreviewSchedulerTest"
+
+#include <chrono>
+#include <thread>
+#include <utility>
+
+#include <gtest/gtest.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+
+#include <gui/BufferItemConsumer.h>
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/Surface.h>
+
+#include "../device3/Camera3OutputStream.h"
+#include "../device3/PreviewFrameScheduler.h"
+
+using namespace android;
+using namespace android::camera3;
+
+// Consumer buffer available listener
+class SimpleListener : public BufferItemConsumer::FrameAvailableListener {
+public:
+ SimpleListener(size_t frameCount): mFrameCount(frameCount) {}
+
+ void waitForFrames() {
+ Mutex::Autolock lock(mMutex);
+ while (mFrameCount > 0) {
+ mCondition.wait(mMutex);
+ }
+ }
+
+ void onFrameAvailable(const BufferItem& /*item*/) override {
+ Mutex::Autolock lock(mMutex);
+ if (mFrameCount > 0) {
+ mFrameCount--;
+ mCondition.signal();
+ }
+ }
+
+ void reset(size_t frameCount) {
+ Mutex::Autolock lock(mMutex);
+ mFrameCount = frameCount;
+ }
+private:
+ size_t mFrameCount;
+ Mutex mMutex;
+ Condition mCondition;
+};
+
+// Test the PreviewFrameScheduler functionatliy of re-timing buffers
+TEST(PreviewSchedulerTest, BasicPreviewSchedulerTest) {
+ const int ID = 0;
+ const int FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ const uint32_t WIDTH = 640;
+ const uint32_t HEIGHT = 480;
+ const int32_t TRANSFORM = 0;
+ const nsecs_t T_OFFSET = 0;
+ const android_dataspace DATASPACE = HAL_DATASPACE_UNKNOWN;
+ const camera_stream_rotation_t ROTATION = CAMERA_STREAM_ROTATION_0;
+ const String8 PHY_ID;
+ const std::unordered_set<int32_t> PIX_MODES;
+ const int BUFFER_COUNT = 4;
+ const int TOTAL_BUFFER_COUNT = BUFFER_COUNT * 2;
+
+ // Create buffer queue
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ ASSERT_NE(producer, nullptr);
+ ASSERT_NE(consumer, nullptr);
+ ASSERT_EQ(NO_ERROR, consumer->setDefaultBufferSize(WIDTH, HEIGHT));
+
+ // Set up consumer
+ sp<BufferItemConsumer> bufferConsumer = new BufferItemConsumer(consumer,
+ GRALLOC_USAGE_HW_COMPOSER, BUFFER_COUNT);
+ ASSERT_NE(bufferConsumer, nullptr);
+ sp<SimpleListener> consumerListener = new SimpleListener(BUFFER_COUNT);
+ bufferConsumer->setFrameAvailableListener(consumerListener);
+
+ // Set up producer
+ sp<Surface> surface = new Surface(producer);
+ sp<StubProducerListener> listener = new StubProducerListener();
+ ASSERT_EQ(NO_ERROR, surface->connect(NATIVE_WINDOW_API_CPU, listener));
+ sp<ANativeWindow> anw(surface);
+ ASSERT_EQ(NO_ERROR, native_window_set_buffer_count(anw.get(), TOTAL_BUFFER_COUNT));
+
+ // Create Camera3OutputStream and PreviewFrameScheduler
+ sp<Camera3OutputStream> stream = new Camera3OutputStream(ID, surface, WIDTH, HEIGHT,
+ FORMAT, DATASPACE, ROTATION, T_OFFSET, PHY_ID, PIX_MODES);
+ ASSERT_NE(stream, nullptr);
+ std::unique_ptr<PreviewFrameScheduler> scheduler =
+ std::make_unique<PreviewFrameScheduler>(*stream, surface);
+ ASSERT_NE(scheduler, nullptr);
+
+ // The pair of nsecs_t: camera timestamp delta (negative means in the past) and frame interval
+ const std::pair<nsecs_t, nsecs_t> inputTimestamps[][BUFFER_COUNT] = {
+ // 30fps, 33ms interval
+ {{-100000000LL, 33333333LL}, {-66666667LL, 33333333LL},
+ {-33333333LL, 33333333LL}, {0, 0}},
+ // 30fps, variable interval
+ {{-100000000LL, 16666667LL}, {-66666667LL, 33333333LL},
+ {-33333333LL, 50000000LL}, {0, 0}},
+ // 60fps, 16.7ms interval
+ {{-50000000LL, 16666667LL}, {-33333333LL, 16666667LL},
+ {-16666667LL, 16666667LL}, {0, 0}},
+ // 60fps, variable interval
+ {{-50000000LL, 8666667LL}, {-33333333LL, 19666667LL},
+ {-16666667LL, 20666667LL}, {0, 0}},
+ };
+
+ // Go through different use cases, and check the buffer timestamp
+ size_t iterations = sizeof(inputTimestamps)/sizeof(inputTimestamps[0]);
+ for (size_t i = 0; i < iterations; i++) {
+ // Space out different test sets to reset the frame scheduler
+ nsecs_t timeBase = systemTime() - s2ns(1) * (iterations - i);
+ nsecs_t lastQueueTime = 0;
+ nsecs_t duration = 0;
+ for (size_t j = 0; j < BUFFER_COUNT; j++) {
+ ANativeWindowBuffer* buffer = nullptr;
+ int fenceFd;
+ ASSERT_EQ(NO_ERROR, anw->dequeueBuffer(anw.get(), &buffer, &fenceFd));
+
+ // Sleep to space out queuePreviewBuffer
+ nsecs_t currentTime = systemTime();
+ if (duration > 0 && duration > currentTime - lastQueueTime) {
+ std::this_thread::sleep_for(
+ std::chrono::nanoseconds(duration + lastQueueTime - currentTime));
+ }
+ nsecs_t timestamp = timeBase + inputTimestamps[i][j].first;
+ ASSERT_EQ(NO_ERROR,
+ scheduler->queuePreviewBuffer(timestamp, TRANSFORM, buffer, fenceFd));
+
+ lastQueueTime = systemTime();
+ duration = inputTimestamps[i][j].second;
+ }
+
+ // Collect output timestamps, making sure they are either set by
+ // producer, or set by the scheduler.
+ consumerListener->waitForFrames();
+ nsecs_t outputTimestamps[BUFFER_COUNT];
+ for (size_t j = 0; j < BUFFER_COUNT; j++) {
+ BufferItem bufferItem;
+ ASSERT_EQ(NO_ERROR, bufferConsumer->acquireBuffer(&bufferItem, 0/*presentWhen*/));
+
+ outputTimestamps[j] = bufferItem.mTimestamp;
+ ALOGV("%s: [%zu][%zu]: input: %" PRId64 ", output: %" PRId64, __FUNCTION__,
+ i, j, timeBase + inputTimestamps[i][j].first, bufferItem.mTimestamp);
+ ASSERT_GT(bufferItem.mTimestamp, inputTimestamps[i][j].first);
+
+ ASSERT_EQ(NO_ERROR, bufferConsumer->releaseBuffer(bufferItem));
+ }
+
+ // Check the output timestamp intervals are aligned with input intervals
+ const nsecs_t SHIFT_THRESHOLD = ms2ns(2);
+ for (size_t j = 0; j < BUFFER_COUNT - 1; j ++) {
+ nsecs_t interval_shift = outputTimestamps[j+1] - outputTimestamps[j] -
+ (inputTimestamps[i][j+1].first - inputTimestamps[i][j].first);
+ ASSERT_LE(std::abs(interval_shift), SHIFT_THRESHOLD);
+ }
+
+ consumerListener->reset(BUFFER_COUNT);
+ }
+
+ // Disconnect the surface
+ ASSERT_EQ(NO_ERROR, surface->disconnect(NATIVE_WINDOW_API_CPU));
+}
diff --git a/services/camera/libcameraservice/tests/how_to_run.txt b/services/camera/libcameraservice/tests/how_to_run.txt
new file mode 100644
index 0000000..93239e3
--- /dev/null
+++ b/services/camera/libcameraservice/tests/how_to_run.txt
@@ -0,0 +1,5 @@
+adb root &&
+m cameraservice_test &&
+adb push $ANDROID_PRODUCT_OUT/data/nativetest/cameraservice_test/cameraservice_test \
+ /data/nativetest/cameraservice_test/arm64/cameraservice_test &&
+adb shell /data/nativetest/cameraservice_test/arm64/cameraservice_test
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 76927c0..8d170f1 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -120,13 +120,11 @@
proxyBinder->pingForUserUpdate();
}
-bool CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
- String16 packageName, int sensorOrientation, int lensFacing) {
+int CameraServiceProxyWrapper::getRotateAndCropOverride(String16 packageName, int lensFacing) {
sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
if (proxyBinder == nullptr) return true;
- bool ret = true;
- auto status = proxyBinder->isRotateAndCropOverrideNeeded(packageName, sensorOrientation,
- lensFacing, &ret);
+ int ret = 0;
+ auto status = proxyBinder->getRotateAndCropOverride(packageName, lensFacing, &ret);
if (!status.isOk()) {
ALOGE("%s: Failed during top activity orientation query: %s", __FUNCTION__,
status.exceptionMessage().c_str());
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index ad9db68..a51e568 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -91,9 +91,8 @@
// Ping camera service proxy for user update
static void pingCameraServiceProxy();
- // Check whether the current top activity needs a rotate and crop override.
- static bool isRotateAndCropOverrideNeeded(String16 packageName, int sensorOrientation,
- int lensFacing);
+ // Return the current top activity rotate and crop override.
+ static int getRotateAndCropOverride(String16 packageName, int lensFacing);
};
} // android
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
index 0198690..0cd4f5d 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.cpp
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -64,7 +64,7 @@
ATRACE_END();
}
-status_t CameraTraces::dump(int fd, const Vector<String16> &args __attribute__((unused))) {
+status_t CameraTraces::dump(int fd) {
ALOGV("%s: fd = %d", __FUNCTION__, fd);
Mutex::Autolock al(sImpl.tracesLock);
List<ProcessCallStack>& pcsList = sImpl.pcsList;
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
index 13ca16d..71fa334 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.h
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -42,7 +42,7 @@
*
* <p>Each line is indented by DUMP_INDENT spaces.</p>
*/
- static status_t dump(int fd, const Vector<String16>& args);
+ static status_t dump(int fd);
private:
enum {
diff --git a/services/camera/libcameraservice/utils/ExifUtils.cpp b/services/camera/libcameraservice/utils/ExifUtils.cpp
index 485705c..21f02db 100644
--- a/services/camera/libcameraservice/utils/ExifUtils.cpp
+++ b/services/camera/libcameraservice/utils/ExifUtils.cpp
@@ -920,7 +920,7 @@
camera_metadata_ro_entry sensorPixelModeEntry = metadata.find(ANDROID_SENSOR_PIXEL_MODE);
if (sensorPixelModeEntry.count != 0) {
sensorPixelMode = sensorPixelModeEntry.data.u8[0];
- if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT ||
+ if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT &&
sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) {
ALOGE("%s: Request sensor pixel mode is not one of the valid values %d",
__FUNCTION__, sensorPixelMode);
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 192e241..1053327 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -21,7 +21,7 @@
#include <camera/camera2/OutputConfiguration.h>
#include <camera/camera2/SessionConfiguration.h>
#include <camera/camera2/SubmitInfo.h>
-#include <android/hardware/camera/device/3.7/types.h>
+#include <android/hardware/camera/device/3.8/types.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index 262f962..461f5e9 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -24,6 +24,7 @@
#include <utils/Log.h>
#include <camera/VendorTagDescriptor.h>
#include <camera_metadata_hidden.h>
+#include <device3/Camera3Stream.h>
namespace android {
@@ -112,11 +113,15 @@
mLastMonitoredResultValues.clear();
mLastMonitoredPhysicalRequestKeys.clear();
mLastMonitoredPhysicalResultKeys.clear();
+ mLastStreamIds.clear();
+ mLastInputStreamId = -1;
}
void TagMonitor::monitorMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
const CameraMetadata& metadata,
- const std::unordered_map<std::string, CameraMetadata>& physicalMetadata) {
+ const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+ const camera3::camera_stream_buffer_t *outputBuffers, uint32_t numOutputBuffers,
+ int32_t inputStreamId) {
if (!mMonitoringEnabled) return;
std::lock_guard<std::mutex> lock(mMonitorMutex);
@@ -124,19 +129,27 @@
if (timestamp == 0) {
timestamp = systemTime(SYSTEM_TIME_BOOTTIME);
}
-
+ std::unordered_set<int32_t> outputStreamIds;
+ for (size_t i = 0; i < numOutputBuffers; i++) {
+ const camera3::camera_stream_buffer_t *src = outputBuffers + i;
+ int32_t streamId = camera3::Camera3Stream::cast(src->stream)->getId();
+ outputStreamIds.emplace(streamId);
+ }
std::string emptyId;
for (auto tag : mMonitoredTagList) {
- monitorSingleMetadata(source, frameNumber, timestamp, emptyId, tag, metadata);
+ monitorSingleMetadata(source, frameNumber, timestamp, emptyId, tag, metadata,
+ outputStreamIds, inputStreamId);
for (auto& m : physicalMetadata) {
- monitorSingleMetadata(source, frameNumber, timestamp, m.first, tag, m.second);
+ monitorSingleMetadata(source, frameNumber, timestamp, m.first, tag, m.second,
+ outputStreamIds, inputStreamId);
}
}
}
void TagMonitor::monitorSingleMetadata(eventSource source, int64_t frameNumber, nsecs_t timestamp,
- const std::string& cameraId, uint32_t tag, const CameraMetadata& metadata) {
+ const std::string& cameraId, uint32_t tag, const CameraMetadata& metadata,
+ const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId) {
CameraMetadata &lastValues = (source == REQUEST) ?
(cameraId.empty() ? mLastMonitoredRequestValues :
@@ -177,13 +190,22 @@
// No last entry, so always consider to be different
isDifferent = true;
}
-
+ // Also monitor when the stream ids change, this helps visually see what
+ // monitored metadata values are for capture requests with different
+ // stream ids.
+ if (source == REQUEST &&
+ (inputStreamId != mLastInputStreamId || outputStreamIds != mLastStreamIds)) {
+ mLastInputStreamId = inputStreamId;
+ mLastStreamIds = outputStreamIds;
+ isDifferent = true;
+ }
if (isDifferent) {
ALOGV("%s: Tag %s changed", __FUNCTION__,
get_local_camera_metadata_tag_name_vendor_id(
tag, mVendorTagId));
lastValues.update(entry);
- mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId);
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId,
+ outputStreamIds, inputStreamId);
}
} else if (lastEntry.count > 0) {
// Value has been removed
@@ -195,7 +217,10 @@
entry.type = get_local_camera_metadata_tag_type_vendor_id(tag,
mVendorTagId);
entry.count = 0;
- mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId);
+ mLastInputStreamId = inputStreamId;
+ mLastStreamIds = outputStreamIds;
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId, outputStreamIds,
+ inputStreamId);
}
}
@@ -214,37 +239,59 @@
} else {
dprintf(fd, " Tag monitoring disabled (enable with -m <name1,..,nameN>)\n");
}
- if (mMonitoringEvents.size() > 0) {
- dprintf(fd, " Monitored tag event log:\n");
- for (const auto& event : mMonitoringEvents) {
- int indentation = (event.source == REQUEST) ? 15 : 30;
- dprintf(fd, " f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
- event.frameNumber, event.timestamp,
- 2, event.cameraId.c_str(),
- indentation,
- event.source == REQUEST ? "REQ:" : "RES:",
- get_local_camera_metadata_section_name_vendor_id(event.tag,
- mVendorTagId),
- get_local_camera_metadata_tag_name_vendor_id(event.tag,
- mVendorTagId));
- if (event.newData.size() == 0) {
- dprintf(fd, " (Removed)\n");
- } else {
- printData(fd, event.newData.data(), event.tag,
- event.type, event.newData.size() / camera_metadata_type_size[event.type],
- indentation + 18);
- }
- }
- }
+ if (mMonitoringEvents.size() == 0) { return; }
+
+ dprintf(fd, " Monitored tag event log:\n");
+
+ std::vector<std::string> eventStrs;
+ dumpMonitoredTagEventsToVectorLocked(eventStrs);
+ for (const std::string &eventStr : eventStrs) {
+ dprintf(fd, " %s", eventStr.c_str());
+ }
}
-// TODO: Consolidate with printData from camera_metadata.h
+void TagMonitor::getLatestMonitoredTagEvents(std::vector<std::string> &out) {
+ std::lock_guard<std::mutex> lock(mMonitorMutex);
+ dumpMonitoredTagEventsToVectorLocked(out);
+}
+
+void TagMonitor::dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &vec) {
+ if (mMonitoringEvents.size() == 0) { return; }
+
+ for (const auto& event : mMonitoringEvents) {
+ int indentation = (event.source == REQUEST) ? 15 : 30;
+ String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
+ event.frameNumber, event.timestamp,
+ 2, event.cameraId.c_str(),
+ indentation,
+ event.source == REQUEST ? "REQ:" : "RES:",
+ get_local_camera_metadata_section_name_vendor_id(event.tag, mVendorTagId),
+ get_local_camera_metadata_tag_name_vendor_id(event.tag, mVendorTagId));
+ if (event.newData.size() == 0) {
+ eventString += " (Removed)";
+ } else {
+ eventString += getEventDataString(event.newData.data(),
+ event.tag,
+ event.type,
+ event.newData.size() / camera_metadata_type_size[event.type],
+ indentation + 18,
+ event.outputStreamIds,
+ event.inputStreamId);
+ }
+ vec.emplace_back(eventString.string());
+ }
+}
#define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29
-void TagMonitor::printData(int fd, const uint8_t *data_ptr, uint32_t tag,
- int type, int count, int indentation) {
+String8 TagMonitor::getEventDataString(const uint8_t* data_ptr,
+ uint32_t tag,
+ int type,
+ int count,
+ int indentation,
+ const std::unordered_set<int32_t>& outputStreamIds,
+ int32_t inputStreamId) {
static int values_per_line[NUM_TYPES] = {
[TYPE_BYTE] = 16,
[TYPE_INT32] = 8,
@@ -253,6 +300,7 @@
[TYPE_DOUBLE] = 4,
[TYPE_RATIONAL] = 4,
};
+
size_t type_size = camera_metadata_type_size[type];
char value_string_tmp[CAMERA_METADATA_ENUM_STRING_MAX_SIZE];
uint32_t value;
@@ -260,10 +308,11 @@
int lines = count / values_per_line[type];
if (count % values_per_line[type] != 0) lines++;
+ String8 returnStr = String8();
int index = 0;
int j, k;
for (j = 0; j < lines; j++) {
- dprintf(fd, "%*s[", (j != 0) ? indentation + 4 : 0, "");
+ returnStr.appendFormat("%*s[", (j != 0) ? indentation + 4 : 0, "");
for (k = 0;
k < values_per_line[type] && count > 0;
k++, count--, index += type_size) {
@@ -276,10 +325,9 @@
value_string_tmp,
sizeof(value_string_tmp))
== OK) {
- dprintf(fd, "%s ", value_string_tmp);
+ returnStr += value_string_tmp;
} else {
- dprintf(fd, "%hhu ",
- *(data_ptr + index));
+ returnStr.appendFormat("%hhu", *(data_ptr + index));
}
break;
case TYPE_INT32:
@@ -290,49 +338,57 @@
value_string_tmp,
sizeof(value_string_tmp))
== OK) {
- dprintf(fd, "%s ", value_string_tmp);
+ returnStr += value_string_tmp;
} else {
- dprintf(fd, "%" PRId32 " ",
- *(int32_t*)(data_ptr + index));
+ returnStr.appendFormat("%" PRId32 " ", *(int32_t*)(data_ptr + index));
}
break;
case TYPE_FLOAT:
- dprintf(fd, "%0.8f ",
- *(float*)(data_ptr + index));
+ returnStr.appendFormat("%0.8f", *(float*)(data_ptr + index));
break;
case TYPE_INT64:
- dprintf(fd, "%" PRId64 " ",
- *(int64_t*)(data_ptr + index));
+ returnStr.appendFormat("%" PRId64 " ", *(int64_t*)(data_ptr + index));
break;
case TYPE_DOUBLE:
- dprintf(fd, "%0.8f ",
- *(double*)(data_ptr + index));
+ returnStr.appendFormat("%0.8f ", *(double*)(data_ptr + index));
break;
case TYPE_RATIONAL: {
int32_t numerator = *(int32_t*)(data_ptr + index);
int32_t denominator = *(int32_t*)(data_ptr + index + 4);
- dprintf(fd, "(%d / %d) ",
- numerator, denominator);
+ returnStr.appendFormat("(%d / %d) ", numerator, denominator);
break;
}
default:
- dprintf(fd, "??? ");
+ returnStr += "??? ";
}
}
- dprintf(fd, "]\n");
+ returnStr += "] ";
+ if (!outputStreamIds.empty()) {
+ returnStr += "output stream ids: ";
+ for (const auto &id : outputStreamIds) {
+ returnStr.appendFormat(" %d ", id);
+ }
+ }
+ if (inputStreamId != -1) {
+ returnStr.appendFormat("input stream id: %d", inputStreamId);
+ }
+ returnStr += "\n";
}
+ return returnStr;
}
template<typename T>
TagMonitor::MonitorEvent::MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
- const T &value, const std::string& cameraId) :
+ const T &value, const std::string& cameraId,
+ const std::unordered_set<int32_t> &outputStreamIds,
+ int32_t inputStreamId) :
source(src),
frameNumber(frameNumber),
timestamp(timestamp),
tag(value.tag),
type(value.type),
newData(value.data.u8, value.data.u8 + camera_metadata_type_size[value.type] * value.count),
- cameraId(cameraId) {
+ cameraId(cameraId), outputStreamIds(outputStreamIds), inputStreamId(inputStreamId) {
}
TagMonitor::MonitorEvent::~MonitorEvent() {
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index 413f502..088d6fe 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -30,6 +30,7 @@
#include <system/camera_metadata.h>
#include <system/camera_vendor_tags.h>
#include <camera/CameraMetadata.h>
+#include <device3/InFlightRequest.h>
namespace android {
@@ -66,19 +67,35 @@
// Scan through the metadata and update the monitoring information
void monitorMetadata(eventSource source, int64_t frameNumber,
nsecs_t timestamp, const CameraMetadata& metadata,
- const std::unordered_map<std::string, CameraMetadata>& physicalMetadata);
+ const std::unordered_map<std::string, CameraMetadata>& physicalMetadata,
+ const camera3::camera_stream_buffer_t *outputBuffers = nullptr,
+ uint32_t numOutputBuffers = 0, int32_t inputStreamId = -1);
// Dump current event log to the provided fd
void dumpMonitoredMetadata(int fd);
- private:
+ // Dumps the latest monitored Tag events to the passed vector.
+ // NOTE: The events are appended to the vector in reverser chronological order
+ // (i.e. most recent first)
+ void getLatestMonitoredTagEvents(std::vector<std::string> &out);
- static void printData(int fd, const uint8_t *data_ptr, uint32_t tag,
- int type, int count, int indentation);
+ private:
+ // Dumps monitored tag events to the passed vector without acquiring
+ // mMonitorMutex. mMonitorMutex must be acquired before calling this
+ // function.
+ void dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &out);
+
+ static String8 getEventDataString(const uint8_t *data_ptr,
+ uint32_t tag, int type,
+ int count,
+ int indentation,
+ const std::unordered_set<int32_t> &outputStreamIds,
+ int32_t inputStreamId);
void monitorSingleMetadata(TagMonitor::eventSource source, int64_t frameNumber,
nsecs_t timestamp, const std::string& cameraId, uint32_t tag,
- const CameraMetadata& metadata);
+ const CameraMetadata& metadata, const std::unordered_set<int32_t> &outputStreamIds,
+ int32_t inputStreamId);
std::atomic<bool> mMonitoringEnabled;
std::mutex mMonitorMutex;
@@ -93,6 +110,9 @@
std::unordered_map<std::string, CameraMetadata> mLastMonitoredPhysicalRequestKeys;
std::unordered_map<std::string, CameraMetadata> mLastMonitoredPhysicalResultKeys;
+ int32_t mLastInputStreamId = -1;
+ std::unordered_set<int32_t> mLastStreamIds;
+
/**
* A monitoring event
* Stores a new metadata field value and the timestamp at which it changed.
@@ -101,7 +121,8 @@
struct MonitorEvent {
template<typename T>
MonitorEvent(eventSource src, uint32_t frameNumber, nsecs_t timestamp,
- const T &newValue, const std::string& cameraId);
+ const T &newValue, const std::string& cameraId,
+ const std::unordered_set<int32_t> &outputStreamIds, int32_t inputStreamId);
~MonitorEvent();
eventSource source;
@@ -111,6 +132,8 @@
uint8_t type;
std::vector<uint8_t> newData;
std::string cameraId;
+ std::unordered_set<int32_t> outputStreamIds;
+ int32_t inputStreamId = 1;
};
// A ring buffer for tracking the last kMaxMonitorEvents metadata changes
diff --git a/services/mediacodec/OWNERS b/services/mediacodec/OWNERS
index c716cce..3453a76 100644
--- a/services/mediacodec/OWNERS
+++ b/services/mediacodec/OWNERS
@@ -1,2 +1,3 @@
jeffv@google.com
-marcone@google.com
+essick@google.com
+wonsik@google.com
diff --git a/services/mediacodec/android.hardware.media.omx@1.0-service.rc b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
index 3ef9a85..845e5cc 100644
--- a/services/mediacodec/android.hardware.media.omx@1.0-service.rc
+++ b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
@@ -3,4 +3,4 @@
user mediacodec
group camera drmrpc mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 696b967..d10e339 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -7,7 +7,7 @@
default_applicable_licenses: ["frameworks_av_services_mediacodec_license"],
}
-cc_library_shared {
+cc_library {
name: "libmedia_codecserviceregistrant",
vendor_available: true,
srcs: [
diff --git a/services/mediacodec/registrant/fuzzer/Android.bp b/services/mediacodec/registrant/fuzzer/Android.bp
new file mode 100644
index 0000000..43afbf1
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/Android.bp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_services_mediacodec_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_services_mediacodec_license"],
+}
+
+cc_fuzz {
+ name: "codecServiceRegistrant_fuzzer",
+ srcs: [
+ "codecServiceRegistrant_fuzzer.cpp",
+ ],
+ static_libs: [
+ "libmedia_codecserviceregistrant",
+ ],
+ header_libs: [
+ "libmedia_headers",
+ ],
+ defaults: [
+ "libcodec2-hidl-defaults",
+ ],
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
diff --git a/services/mediacodec/registrant/fuzzer/README.md b/services/mediacodec/registrant/fuzzer/README.md
new file mode 100644
index 0000000..0ffa063
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/README.md
@@ -0,0 +1,56 @@
+# Fuzzer for libmedia_codecserviceregistrant
+
+## Plugin Design Considerations
+The fuzzer plugin for libmedia_codecserviceregistrant is designed based on the understanding of the library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+libmedia_codecserviceregistrant supports the following parameters:
+1. C2String (parameter name: `c2String`)
+2. Width (parameter name: `width`)
+3. Height (parameter name: `height`)
+4. SamplingRate (parameter name: `samplingRate`)
+5. Channels (parameter name: `channels`)
+6. Stream (parameter name: `stream`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `c2String` |`String` | Value obtained from FuzzedDataProvider|
+| `width` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `height` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `samplingRate` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `channels` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `stream` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the libmedia_codecserviceregistrant module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build codecServiceRegistrant_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) codecServiceRegistrant_fuzzer
+```
+#### Steps to run
+
+To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/${TARGET_ARCH}/codecServiceRegistrant_fuzzer/codecServiceRegistrant_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp b/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp
new file mode 100644
index 0000000..e5983e4
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "../CodecServiceRegistrant.cpp"
+#include "fuzzer/FuzzedDataProvider.h"
+#include <C2Config.h>
+#include <C2Param.h>
+
+using namespace std;
+
+constexpr char kServiceName[] = "software";
+
+class CodecServiceRegistrantFuzzer {
+public:
+ void process(const uint8_t *data, size_t size);
+ ~CodecServiceRegistrantFuzzer() {
+ delete mH2C2;
+ if (mInputSize) {
+ delete mInputSize;
+ }
+ if (mSampleRateInfo) {
+ delete mSampleRateInfo;
+ }
+ if (mChannelCountInfo) {
+ delete mChannelCountInfo;
+ }
+ }
+
+private:
+ void initH2C2ComponentStore();
+ void invokeH2C2ComponentStore();
+ void invokeConfigSM();
+ void invokeQuerySM();
+ H2C2ComponentStore *mH2C2 = nullptr;
+ C2StreamPictureSizeInfo::input *mInputSize = nullptr;
+ C2StreamSampleRateInfo::output *mSampleRateInfo = nullptr;
+ C2StreamChannelCountInfo::output *mChannelCountInfo = nullptr;
+ C2Param::Index mIndex = C2StreamProfileLevelInfo::output::PARAM_TYPE;
+ C2StreamFrameRateInfo::output mFrameRate;
+ FuzzedDataProvider *mFDP = nullptr;
+};
+
+void CodecServiceRegistrantFuzzer::initH2C2ComponentStore() {
+ using namespace ::android::hardware::media::c2;
+ shared_ptr<C2ComponentStore> store =
+ android::GetCodec2PlatformComponentStore();
+ if (!store) {
+ return;
+ }
+ android::sp<V1_1::IComponentStore> storeV1_1 =
+ new V1_1::utils::ComponentStore(store);
+ if (storeV1_1->registerAsService(string(kServiceName)) != android::OK) {
+ return;
+ }
+ string const preferredStoreName = string(kServiceName);
+ sp<IComponentStore> preferredStore =
+ IComponentStore::getService(preferredStoreName.c_str());
+ mH2C2 = new H2C2ComponentStore(preferredStore);
+}
+
+void CodecServiceRegistrantFuzzer::invokeConfigSM() {
+ vector<C2Param *> configParams;
+ uint32_t width = mFDP->ConsumeIntegral<uint32_t>();
+ uint32_t height = mFDP->ConsumeIntegral<uint32_t>();
+ uint32_t samplingRate = mFDP->ConsumeIntegral<uint32_t>();
+ uint32_t channels = mFDP->ConsumeIntegral<uint32_t>();
+ if (mFDP->ConsumeBool()) {
+ mInputSize = new C2StreamPictureSizeInfo::input(0u, width, height);
+ configParams.push_back(mInputSize);
+ } else {
+ if (mFDP->ConsumeBool()) {
+ mSampleRateInfo = new C2StreamSampleRateInfo::output(0u, samplingRate);
+ configParams.push_back(mSampleRateInfo);
+ }
+ if (mFDP->ConsumeBool()) {
+ mChannelCountInfo = new C2StreamChannelCountInfo::output(0u, channels);
+ configParams.push_back(mChannelCountInfo);
+ }
+ }
+ vector<unique_ptr<C2SettingResult>> failures;
+ mH2C2->config_sm(configParams, &failures);
+}
+
+void CodecServiceRegistrantFuzzer::invokeQuerySM() {
+ vector<C2Param *> stackParams;
+ vector<C2Param::Index> heapParamIndices;
+ if (mFDP->ConsumeBool()) {
+ stackParams = {};
+ heapParamIndices = {};
+ } else {
+ uint32_t stream = mFDP->ConsumeIntegral<uint32_t>();
+ mFrameRate.setStream(stream);
+ stackParams.push_back(&mFrameRate);
+ heapParamIndices.push_back(mIndex);
+ }
+ vector<unique_ptr<C2Param>> heapParams;
+ mH2C2->query_sm(stackParams, heapParamIndices, &heapParams);
+}
+
+void CodecServiceRegistrantFuzzer::invokeH2C2ComponentStore() {
+ initH2C2ComponentStore();
+ shared_ptr<C2Component> component;
+ shared_ptr<C2ComponentInterface> interface;
+ string c2String = mFDP->ConsumeRandomLengthString();
+ mH2C2->createComponent(c2String, &component);
+ mH2C2->createInterface(c2String, &interface);
+ invokeConfigSM();
+ invokeQuerySM();
+
+ vector<shared_ptr<C2ParamDescriptor>> params;
+ mH2C2->querySupportedParams_nb(¶ms);
+
+ C2StoreIonUsageInfo usageInfo;
+ std::vector<C2FieldSupportedValuesQuery> query = {
+ C2FieldSupportedValuesQuery::Possible(
+ C2ParamField::Make(usageInfo, usageInfo.usage)),
+ C2FieldSupportedValuesQuery::Possible(
+ C2ParamField::Make(usageInfo, usageInfo.capacity)),
+ };
+ mH2C2->querySupportedValues_sm(query);
+
+ mH2C2->getName();
+ shared_ptr<C2ParamReflector> paramReflector = mH2C2->getParamReflector();
+ if (paramReflector) {
+ paramReflector->describe(C2ComponentDomainSetting::CORE_INDEX);
+ }
+ mH2C2->listComponents();
+ shared_ptr<C2GraphicBuffer> src;
+ shared_ptr<C2GraphicBuffer> dst;
+ mH2C2->copyBuffer(src, dst);
+}
+
+void CodecServiceRegistrantFuzzer::process(const uint8_t *data, size_t size) {
+ mFDP = new FuzzedDataProvider(data, size);
+ invokeH2C2ComponentStore();
+ /** RegisterCodecServices is called here to improve code coverage */
+ /** as currently it is not called by codecServiceRegistrant */
+ RegisterCodecServices();
+ delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ CodecServiceRegistrantFuzzer codecServiceRegistrantFuzzer;
+ codecServiceRegistrantFuzzer.process(data, size);
+ return 0;
+}
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
index 9058f10..41efce0 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
@@ -84,5 +84,6 @@
getgid32: 1
getegid32: 1
getgroups32: 1
+sysinfo: 1
@include /apex/com.android.media.swcodec/etc/seccomp_policy/code_coverage.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
index 7ff858b..4317ccc 100644
--- a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -78,6 +78,7 @@
getgid: 1
getegid: 1
getgroups: 1
+sysinfo: 1
# Android profiler (heapprofd, traced_perf) additions, where not already
# covered by the rest of the file, or by builtin minijail allow-listing of
diff --git a/services/mediaextractor/OWNERS b/services/mediaextractor/OWNERS
index c716cce..2a779c2 100644
--- a/services/mediaextractor/OWNERS
+++ b/services/mediaextractor/OWNERS
@@ -1,2 +1,3 @@
jeffv@google.com
-marcone@google.com
+essick@google.com
+aquilescanta@google.com
diff --git a/services/mediaextractor/TEST_MAPPING b/services/mediaextractor/TEST_MAPPING
new file mode 100644
index 0000000..7a66eeb
--- /dev/null
+++ b/services/mediaextractor/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsMediaTranscodingTestCases"
+ }
+ ]
+}
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
index 5fc2941..4fb50d0 100644
--- a/services/mediaextractor/mediaextractor.rc
+++ b/services/mediaextractor/mediaextractor.rc
@@ -3,4 +3,4 @@
user mediaex
group drmrpc mediadrm
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ task_profiles ProcessCapacityHigh
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index cfc4c40..8088ef0 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -26,6 +26,7 @@
"libmediautils",
"libnblog",
"libutils",
+ "packagemanager_aidl-cpp",
],
cflags: [
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 5989181..74e4715 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -114,6 +114,7 @@
"libmediautils",
"libutils",
"mediametricsservice-aidl-cpp",
+ "packagemanager_aidl-cpp",
],
header_libs: [
"libaudioutils_headers",
@@ -148,7 +149,8 @@
"statsd_mediaparser.cpp",
"statsd_nuplayer.cpp",
"statsd_recorder.cpp",
- "StringUtils.cpp"
+ "StringUtils.cpp",
+ "ValidateId.cpp",
],
proto: {
@@ -171,6 +173,7 @@
"libstatspull",
"libstatssocket",
"libutils",
+ "packagemanager_aidl-cpp",
],
export_shared_lib_headers: [
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 45c9f56..270fe2f 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -29,6 +29,7 @@
#include "AudioTypes.h" // string to int conversions
#include "MediaMetricsService.h" // package info
#include "StringUtils.h"
+#include "ValidateId.h"
#define PROP_AUDIO_ANALYTICS_CLOUD_ENABLED "persist.audio.analytics.cloud.enabled"
@@ -563,7 +564,7 @@
const auto flagsForStats = types::lookup<types::INPUT_FLAG, short_enum_type_t>(flags);
const auto sourceForStats = types::lookup<types::SOURCE_TYPE, short_enum_type_t>(source);
// Android S
- const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+ const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
LOG(LOG_LEVEL) << "key:" << key
<< " id:" << id
@@ -718,7 +719,7 @@
types::lookup<types::TRACK_TRAITS, short_enum_type_t>(traits);
const auto usageForStats = types::lookup<types::USAGE, short_enum_type_t>(usage);
// Android S
- const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+ const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
LOG(LOG_LEVEL) << "key:" << key
<< " id:" << id
diff --git a/services/mediametrics/LruSet.h b/services/mediametrics/LruSet.h
new file mode 100644
index 0000000..1f0ab60
--- /dev/null
+++ b/services/mediametrics/LruSet.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <list>
+#include <sstream>
+#include <unordered_map>
+
+namespace android::mediametrics {
+
+/**
+ * LruSet keeps a set of the last "Size" elements added or accessed.
+ *
+ * (Lru stands for least-recently-used eviction policy).
+ *
+ * Runs in O(1) time for add, remove, and check. Internally implemented
+ * with an unordered_map and a list. In order to remove elements,
+ * a list iterator is stored in the unordered_map
+ * (noting that std::list::erase() contractually
+ * does not affect iterators other than the one erased).
+ */
+
+template <typename T>
+class LruSet {
+ const size_t mMaxSize;
+ std::list<T> mAccessOrder; // front is the most recent, back is the oldest.
+ // item T with its access order iterator.
+ std::unordered_map<T, typename std::list<T>::iterator> mMap;
+
+public:
+ /**
+ * Constructs a LruSet which checks whether the element was
+ * accessed or added recently.
+ *
+ * The parameter maxSize is used to cap growth of LruSet;
+ * eviction is based on least recently used LRU.
+ * If maxSize is zero, the LruSet contains no elements
+ * and check() always returns false.
+ *
+ * \param maxSize the maximum number of elements that are tracked.
+ */
+ explicit LruSet(size_t maxSize) : mMaxSize(maxSize) {}
+
+ /**
+ * Returns the number of entries in the LruSet.
+ *
+ * This is a number between 0 and maxSize.
+ */
+ size_t size() const {
+ return mMap.size();
+ }
+
+ /** Clears the container contents. */
+ void clear() {
+ mMap.clear();
+ mAccessOrder.clear();
+ }
+
+ /** Returns a string dump of the last n entries. */
+ std::string dump(size_t n) const {
+ std::stringstream ss;
+ auto it = mAccessOrder.cbegin();
+ for (size_t i = 0; i < n && it != mAccessOrder.cend(); ++i) {
+ ss << *it++ << "\n";
+ }
+ return ss.str();
+ }
+
+ /** Adds a new item to the set. */
+ void add(const T& t) {
+ if (mMaxSize == 0) return;
+ auto it = mMap.find(t);
+ if (it != mMap.end()) { // already exists.
+ mAccessOrder.erase(it->second); // move-to-front on the chronologically ordered list.
+ } else if (mAccessOrder.size() >= mMaxSize) {
+ const T last = mAccessOrder.back();
+ mAccessOrder.pop_back();
+ mMap.erase(last);
+ }
+ mAccessOrder.push_front(t);
+ mMap[t] = mAccessOrder.begin();
+ }
+
+ /**
+ * Removes an item from the set.
+ *
+ * \param t item to be removed.
+ * \return false if the item doesn't exist.
+ */
+ bool remove(const T& t) {
+ auto it = mMap.find(t);
+ if (it == mMap.end()) return false;
+ mAccessOrder.erase(it->second);
+ mMap.erase(it);
+ return true;
+ }
+
+ /** Returns true if t is present (and moves the access order of t to the front). */
+ bool check(const T& t) { // not const, as it adjusts the least-recently-used order.
+ auto it = mMap.find(t);
+ if (it == mMap.end()) return false;
+ mAccessOrder.erase(it->second);
+ mAccessOrder.push_front(it->first);
+ it->second = mAccessOrder.begin();
+ return true;
+ }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index 1d64878..35e0ae4 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include "MediaMetricsService.h"
+#include "ValidateId.h"
#include "iface_statsd.h"
#include <pwd.h> //getpwuid
@@ -204,6 +205,15 @@
// now attach either the item or its dup to a const shared pointer
std::shared_ptr<const mediametrics::Item> sitem(release ? item : item->dup());
+ // register log session ids with singleton.
+ if (startsWith(item->getKey(), "metrics.manager")) {
+ std::string logSessionId;
+ if (item->get("logSessionId", &logSessionId)
+ && mediametrics::stringutils::isLogSessionId(logSessionId.c_str())) {
+ mediametrics::ValidateId::get()->registerId(logSessionId);
+ }
+ }
+
(void)mAudioAnalytics.submit(sitem, isTrusted);
(void)dump2Statsd(sitem, mStatsdLog); // failure should be logged in function.
@@ -309,6 +319,9 @@
result << "-- some lines may be truncated --\n";
}
+ result << "LogSessionId:\n"
+ << mediametrics::ValidateId::get()->dump();
+
// Dump the statsd atoms we sent out.
result << "Statsd atoms:\n"
<< mStatsdLog->dumpToString(" " /* prefix */,
diff --git a/services/mediametrics/TransactionLog.h b/services/mediametrics/TransactionLog.h
index 0ca4639..fd42518 100644
--- a/services/mediametrics/TransactionLog.h
+++ b/services/mediametrics/TransactionLog.h
@@ -158,7 +158,7 @@
++it) {
if (ll <= 0) break;
if (prefix != nullptr && !startsWith(it->first, prefix)) break;
- auto [s, l] = dumpMapTimeItem(it->second, ll - 1, sinceNs, prefix);
+ std::tie(s, l) = dumpMapTimeItem(it->second, ll - 1, sinceNs, prefix);
if (l == 0) continue; // don't show empty groups (due to sinceNs).
ss << " " << it->first << "\n" << s;
ll -= l + 1;
diff --git a/services/mediametrics/ValidateId.cpp b/services/mediametrics/ValidateId.cpp
new file mode 100644
index 0000000..0cc8593
--- /dev/null
+++ b/services/mediametrics/ValidateId.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaMetricsService" // not ValidateId
+#include <utils/Log.h>
+
+#include "ValidateId.h"
+
+namespace android::mediametrics {
+
+std::string ValidateId::dump() const
+{
+ std::stringstream ss;
+ ss << "Entries:" << mIdSet.size() << " InvalidIds:" << mInvalidIds << "\n";
+ ss << mIdSet.dump(10);
+ return ss.str();
+}
+
+void ValidateId::registerId(const std::string& id)
+{
+ if (id.empty()) return;
+ if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+ ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+ return;
+ }
+ ALOGV("%s: registering %s", __func__, id.c_str());
+ mIdSet.add(id);
+}
+
+const std::string& ValidateId::validateId(const std::string& id)
+{
+ static const std::string empty{};
+ if (id.empty()) return empty;
+
+ // reject because the id is malformed
+ if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+ ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+ ++mInvalidIds;
+ return empty;
+ }
+
+ // reject because the id is unregistered
+ if (!mIdSet.check(id)) {
+ ALOGW("%s: rejecting unregistered id %s", __func__, id.c_str());
+ ++mInvalidIds;
+ return empty;
+ }
+ return id;
+}
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/ValidateId.h b/services/mediametrics/ValidateId.h
new file mode 100644
index 0000000..166b39a
--- /dev/null
+++ b/services/mediametrics/ValidateId.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "LruSet.h"
+#include "StringUtils.h"
+#include "Wrap.h"
+
+namespace android::mediametrics {
+
+/*
+ * ValidateId is used to check whether the log session id is properly formed
+ * and has been registered (i.e. from the Java MediaMetricsManagerService).
+ *
+ * The default memory window to track registered ids is set to SINGLETON_LRU_SET_SIZE.
+ *
+ * This class is not thread-safe, but the singleton returned by get() uses LockWrap<>
+ * to ensure thread-safety.
+ */
+class ValidateId {
+ mediametrics::LruSet<std::string> mIdSet;
+ size_t mInvalidIds = 0; // count invalid ids encountered.
+public:
+ /** Creates a ValidateId object with size memory window. */
+ explicit ValidateId(size_t size) : mIdSet{size} {}
+
+ /** Returns a string dump of recent contents and stats. */
+ std::string dump() const;
+
+ /**
+ * Registers the id string.
+ *
+ * If id string is malformed (not 16 Base64Url chars), it is ignored.
+ * Once registered, calling validateId() will return id (instead of the empty string).
+ * ValidateId may "forget" the id after not encountering it within the past N ids,
+ * where N is the size set in the constructor.
+ *
+ * param id string (from MediaMetricsManagerService).
+ */
+ void registerId(const std::string& id);
+
+ /**
+ * Returns the empty string if id string is malformed (not 16 Base64Url chars)
+ * or if id string has not been seen (in the recent size ids);
+ * otherwise it returns the same id parameter.
+ *
+ * \param id string (to be sent to statsd).
+ */
+ const std::string& validateId(const std::string& id);
+
+ /** Singleton set size */
+ static inline constexpr size_t SINGLETON_LRU_SET_SIZE = 2000;
+
+ using LockedValidateId = mediametrics::LockWrap<ValidateId>;
+ /**
+ * Returns a singleton locked ValidateId object that is thread-safe using LockWrap<>.
+ *
+ * The Singleton ValidateId object is created with size LRU_SET_SIZE (during first call).
+ */
+ static inline LockedValidateId& get() {
+ static LockedValidateId privateSet{SINGLETON_LRU_SET_SIZE};
+ return privateSet;
+ }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/fuzzer/Android.bp b/services/mediametrics/fuzzer/Android.bp
index b03e518..9da7282 100644
--- a/services/mediametrics/fuzzer/Android.bp
+++ b/services/mediametrics/fuzzer/Android.bp
@@ -56,6 +56,7 @@
"libstatssocket",
"libutils",
"mediametricsservice-aidl-cpp",
+ "packagemanager_aidl-cpp",
],
include_dirs: [
diff --git a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
index 8b0b479..06ab16e 100644
--- a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
+++ b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
@@ -48,6 +48,7 @@
void invokeAudioAnalytics(const uint8_t *data, size_t size);
void invokeTimedAction(const uint8_t *data, size_t size);
void process(const uint8_t *data, size_t size);
+ std::atomic_int mValue = 0;
};
void MediaMetricsServiceFuzzer::invokeStartsWith(const uint8_t *data, size_t size) {
@@ -342,11 +343,10 @@
void MediaMetricsServiceFuzzer::invokeTimedAction(const uint8_t *data, size_t size) {
FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
android::mediametrics::TimedAction timedAction;
- std::atomic_int value = 0;
while (fdp.remaining_bytes()) {
timedAction.postIn(std::chrono::seconds(fdp.ConsumeIntegral<int32_t>()),
- [&value] { ++value; });
+ [this] { ++mValue; });
timedAction.size();
}
}
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index 41efcaa..c53b6f3 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -143,8 +143,7 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiorecord.logSessionId", &logSessionId);
- const auto log_session_id =
- mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index 59627ae..707effd 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -137,8 +137,7 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiotrack.logSessionId", &logSessionId);
- const auto log_session_id =
- mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index 46cbdc8..8581437 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -34,7 +34,7 @@
#include "cleaner.h"
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -228,7 +228,7 @@
std::string sessionId;
if (item->getString("android.media.mediacodec.log-session-id", &sessionId)) {
- sessionId = mediametrics::stringutils::sanitizeLogSessionId(sessionId);
+ sessionId = mediametrics::ValidateId::get()->validateId(sessionId);
metrics_proto.set_log_session_id(sessionId);
}
AStatsEvent_writeString(event, codec.c_str());
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index bcf2e0a..a8bfeaa 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -86,7 +86,7 @@
std::string log_session_id;
if (item->getString("android.media.mediaextractor.logSessionId", &log_session_id)) {
- log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+ log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
metrics_proto.set_log_session_id(log_session_id);
}
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 921b320..67ca874b 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -31,7 +31,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/enums/stats/mediametrics/mediametrics.pb.h"
#include "iface_statsd.h"
@@ -81,7 +81,7 @@
std::string logSessionId;
item->getString("android.media.mediaparser.logSessionId", &logSessionId);
- logSessionId = mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ logSessionId = mediametrics::ValidateId::get()->validateId(logSessionId);
int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
timestamp_nanos,
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index b29ad73..5f54a68 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -59,7 +59,7 @@
// string kRecorderLogSessionId = "android.media.mediarecorder.log-session-id";
std::string log_session_id;
if (item->getString("android.media.mediarecorder.log-session-id", &log_session_id)) {
- log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+ log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
metrics_proto.set_log_session_id(log_session_id);
}
// string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
diff --git a/services/mediametrics/tests/Android.bp b/services/mediametrics/tests/Android.bp
index 3baf739..f46fbad 100644
--- a/services/mediametrics/tests/Android.bp
+++ b/services/mediametrics/tests/Android.bp
@@ -33,6 +33,7 @@
"libmediautils",
"libutils",
"mediametricsservice-aidl-cpp",
+ "packagemanager_aidl-cpp",
],
header_libs: [
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index 2336d6f..cd6af9f 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -28,6 +28,7 @@
#include "AudioTypes.h"
#include "StringUtils.h"
+#include "ValidateId.h"
using namespace android;
@@ -1127,3 +1128,126 @@
validId2[3] = '!';
ASSERT_EQ("", mediametrics::stringutils::sanitizeLogSessionId(validId2));
}
+
+TEST(mediametrics_tests, LruSet) {
+ constexpr size_t LRU_SET_SIZE = 2;
+ mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+ // test adding a couple strings.
+ lruSet.add("abc");
+ ASSERT_EQ(1u, lruSet.size());
+ ASSERT_TRUE(lruSet.check("abc"));
+ lruSet.add("def");
+ ASSERT_EQ(2u, lruSet.size());
+
+ // now adding the third string causes eviction of the oldest.
+ lruSet.add("ghi");
+ ASSERT_FALSE(lruSet.check("abc"));
+ ASSERT_TRUE(lruSet.check("ghi"));
+ ASSERT_TRUE(lruSet.check("def")); // "def" is most recent.
+ ASSERT_EQ(2u, lruSet.size()); // "abc" is correctly discarded.
+
+ // adding another string will evict the oldest.
+ lruSet.add("foo");
+ ASSERT_FALSE(lruSet.check("ghi")); // note: "ghi" discarded when "foo" added.
+ ASSERT_TRUE(lruSet.check("foo"));
+ ASSERT_TRUE(lruSet.check("def"));
+
+ // manual removing of a string works, too.
+ ASSERT_TRUE(lruSet.remove("def"));
+ ASSERT_FALSE(lruSet.check("def")); // we manually removed "def".
+ ASSERT_TRUE(lruSet.check("foo")); // "foo" is still there.
+ ASSERT_EQ(1u, lruSet.size());
+
+ // you can't remove a string that has not been added.
+ ASSERT_FALSE(lruSet.remove("bar")); // Note: "bar" doesn't exist, so remove returns false.
+ ASSERT_EQ(1u, lruSet.size());
+
+ lruSet.add("foo"); // adding "foo" (which already exists) doesn't change size.
+ ASSERT_EQ(1u, lruSet.size());
+ lruSet.add("bar"); // add "bar"
+ ASSERT_EQ(2u, lruSet.size());
+ lruSet.add("glorp"); // add "glorp" evicts "foo".
+ ASSERT_EQ(2u, lruSet.size());
+ ASSERT_TRUE(lruSet.check("bar"));
+ ASSERT_TRUE(lruSet.check("glorp"));
+ ASSERT_FALSE(lruSet.check("foo"));
+}
+
+TEST(mediametrics_tests, LruSet0) {
+ constexpr size_t LRU_SET_SIZE = 0;
+ mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+ lruSet.add("a");
+ ASSERT_EQ(0u, lruSet.size());
+ ASSERT_FALSE(lruSet.check("a"));
+ ASSERT_FALSE(lruSet.remove("a")); // never added.
+ ASSERT_EQ(0u, lruSet.size());
+}
+
+// Returns a 16 Base64Url string representing the decimal representation of value
+// (with leading 0s) e.g. 0000000000000000, 0000000000000001, 0000000000000002, ...
+static std::string generateId(size_t value)
+{
+ char id[16 + 1]; // to be filled with 16 Base64Url chars (and zero termination)
+ char *sptr = id + 16; // start at the end.
+ *sptr-- = 0; // zero terminate.
+ // output the digits from least significant to most significant.
+ while (value) {
+ *sptr-- = value % 10;
+ value /= 10;
+ }
+ // add leading 0's
+ while (sptr > id) {
+ *sptr-- = '0';
+ }
+ return std::string(id);
+}
+
+TEST(mediametrics_tests, ValidateId) {
+ constexpr size_t LRU_SET_SIZE = 3;
+ constexpr size_t IDS = 10;
+ static_assert(IDS > LRU_SET_SIZE); // IDS must be greater than LRU_SET_SIZE.
+ mediametrics::ValidateId validateId(LRU_SET_SIZE);
+
+
+ // register IDs as integer strings counting from 0.
+ for (size_t i = 0; i < IDS; ++i) {
+ validateId.registerId(generateId(i));
+ }
+
+ // only the last LRU_SET_SIZE exist.
+ for (size_t i = 0; i < IDS - LRU_SET_SIZE; ++i) {
+ ASSERT_EQ("", validateId.validateId(generateId(i)));
+ }
+ for (size_t i = IDS - LRU_SET_SIZE; i < IDS; ++i) {
+ const std::string id = generateId(i);
+ ASSERT_EQ(id, validateId.validateId(id));
+ }
+}
+
+TEST(mediametrics_tests, ErrorConversion) {
+ constexpr status_t errors[] = {
+ NO_ERROR,
+ BAD_VALUE,
+ DEAD_OBJECT,
+ NO_MEMORY,
+ PERMISSION_DENIED,
+ INVALID_OPERATION,
+ WOULD_BLOCK,
+ UNKNOWN_ERROR,
+ };
+
+ auto roundTrip = [](status_t status) {
+ return android::mediametrics::errorStringToStatus(
+ android::mediametrics::statusToErrorString(status));
+ };
+
+ // Primary status error categories.
+ for (const auto error : errors) {
+ ASSERT_EQ(error, roundTrip(error));
+ }
+
+ // Status errors specially considered.
+ ASSERT_EQ(DEAD_OBJECT, roundTrip(FAILED_TRANSACTION));
+}
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index f31202b..5d80744 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -90,7 +90,7 @@
],
static_libs: [
- "resourceobserver_aidl_interface-V1-ndk_platform",
+ "resourceobserver_aidl_interface-V1-ndk",
],
include_dirs: ["frameworks/av/include"],
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
index ec4ba58..618626f 100644
--- a/services/mediaresourcemanager/test/Android.bp
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -56,7 +56,7 @@
test_suites: ["device-tests"],
static_libs: [
"libresourcemanagerservice",
- "resourceobserver_aidl_interface-V1-ndk_platform",
+ "resourceobserver_aidl_interface-V1-ndk",
],
shared_libs: [
"libbinder",
diff --git a/services/mediatranscoding/Android.bp b/services/mediatranscoding/Android.bp
index a9fd14f..fa5eb4e 100644
--- a/services/mediatranscoding/Android.bp
+++ b/services/mediatranscoding/Android.bp
@@ -47,7 +47,7 @@
],
static_libs: [
- "mediatranscoding_aidl_interface-ndk_platform",
+ "mediatranscoding_aidl_interface-ndk",
],
cflags: [
@@ -80,7 +80,7 @@
],
static_libs: [
- "mediatranscoding_aidl_interface-ndk_platform",
+ "mediatranscoding_aidl_interface-ndk",
],
cflags: [
diff --git a/services/mediatranscoding/tests/Android.bp b/services/mediatranscoding/tests/Android.bp
index cb180ec..ae13656 100644
--- a/services/mediatranscoding/tests/Android.bp
+++ b/services/mediatranscoding/tests/Android.bp
@@ -34,8 +34,8 @@
],
static_libs: [
- "mediatranscoding_aidl_interface-ndk_platform",
- "resourcemanager_aidl_interface-ndk_platform",
+ "mediatranscoding_aidl_interface-ndk",
+ "resourcemanager_aidl_interface-ndk",
"libmediatranscodingservice",
],
diff --git a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
index 0cb2fad..8e17f55 100644
--- a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
+++ b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
@@ -259,9 +259,7 @@
static constexpr bool success = true;
static constexpr bool fail = false;
-struct TestClientCallback : public BnTranscodingClientCallback,
- public EventTracker,
- public std::enable_shared_from_this<TestClientCallback> {
+struct TestClientCallback : public BnTranscodingClientCallback, public EventTracker {
TestClientCallback(const char* packageName, int32_t id)
: mClientId(id), mClientPid(PID(id)), mClientUid(UID(id)), mPackageName(packageName) {
ALOGI("TestClientCallback %d created: pid %d, uid %d", id, PID(id), UID(id));
@@ -348,8 +346,8 @@
ALOGD("registering %s with uid %d", packageName, mClientUid);
std::shared_ptr<ITranscodingClient> client;
- Status status =
- service->registerClient(shared_from_this(), kClientName, packageName, &client);
+ Status status = service->registerClient(ref<TestClientCallback>(), kClientName, packageName,
+ &client);
mClient = status.isOk() ? client : nullptr;
return status;
diff --git a/services/minijail/Android.bp b/services/minijail/Android.bp
index 3a89e12..038197f 100644
--- a/services/minijail/Android.bp
+++ b/services/minijail/Android.bp
@@ -31,17 +31,6 @@
export_include_dirs: ["."],
}
-// By adding "vendor_available: true" to "libavservices_minijail", we don't
-// need to have "libavservices_minijail_vendor" any longer.
-// "libavservices_minijail_vendor" will be removed, once we replace it with
-// "libavservices_minijail" in all vendor modules. (b/146313710)
-cc_library_shared {
- name: "libavservices_minijail_vendor",
- vendor: true,
- defaults: ["libavservices_minijail_defaults"],
- export_include_dirs: ["."],
-}
-
// Unit tests.
cc_test {
name: "libavservices_minijail_unittest",
diff --git a/services/minijail/OWNERS b/services/minijail/OWNERS
index 19f4f9f..9ebf41e 100644
--- a/services/minijail/OWNERS
+++ b/services/minijail/OWNERS
@@ -1,2 +1,2 @@
jorgelo@google.com
-marcone@google.com
+essick@google.com
diff --git a/services/oboeservice/AAudioCommandQueue.cpp b/services/oboeservice/AAudioCommandQueue.cpp
new file mode 100644
index 0000000..ddaabe8
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioCommandQueue"
+//#define LOG_NDEBUG 0
+
+#include <chrono>
+
+#include <utils/Log.h>
+
+#include "AAudioCommandQueue.h"
+
+namespace aaudio {
+
+aaudio_result_t AAudioCommandQueue::sendCommand(std::shared_ptr<AAudioCommand> command) {
+ {
+ std::scoped_lock<std::mutex> _l(mLock);
+ mCommands.push(command);
+ mWaitWorkCond.notify_one();
+ }
+
+ std::unique_lock _cl(command->lock);
+ android::base::ScopedLockAssertion lockAssertion(command->lock);
+ ALOGV("Sending command %d, wait for reply(%d) with timeout %jd",
+ command->operationCode, command->isWaitingForReply, command->timeoutNanoseconds);
+ // `mWaitForReply` is first initialized when the command is constructed. It will be flipped
+ // when the command is completed.
+ auto timeoutExpire = std::chrono::steady_clock::now()
+ + std::chrono::nanoseconds(command->timeoutNanoseconds);
+ while (command->isWaitingForReply) {
+ if (command->conditionVariable.wait_until(_cl, timeoutExpire)
+ == std::cv_status::timeout) {
+ ALOGD("Command %d time out", command->operationCode);
+ command->result = AAUDIO_ERROR_TIMEOUT;
+ command->isWaitingForReply = false;
+ }
+ }
+ ALOGV("Command %d sent with result as %d", command->operationCode, command->result);
+ return command->result;
+}
+
+std::shared_ptr<AAudioCommand> AAudioCommandQueue::waitForCommand(int64_t timeoutNanos) {
+ std::shared_ptr<AAudioCommand> command;
+ {
+ std::unique_lock _l(mLock);
+ android::base::ScopedLockAssertion lockAssertion(mLock);
+ if (timeoutNanos >= 0) {
+ mWaitWorkCond.wait_for(_l, std::chrono::nanoseconds(timeoutNanos), [this]() {
+ android::base::ScopedLockAssertion lockAssertion(mLock);
+ return !mRunning || !mCommands.empty();
+ });
+ } else {
+ mWaitWorkCond.wait(_l, [this]() {
+ android::base::ScopedLockAssertion lockAssertion(mLock);
+ return !mRunning || !mCommands.empty();
+ });
+ }
+ if (!mCommands.empty()) {
+ command = mCommands.front();
+ mCommands.pop();
+ }
+ }
+ return command;
+}
+
+void AAudioCommandQueue::stopWaiting() {
+ std::scoped_lock<std::mutex> _l(mLock);
+ mRunning = false;
+ mWaitWorkCond.notify_one();
+}
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioCommandQueue.h b/services/oboeservice/AAudioCommandQueue.h
new file mode 100644
index 0000000..5f25507
--- /dev/null
+++ b/services/oboeservice/AAudioCommandQueue.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <queue>
+
+#include <aaudio/AAudio.h>
+#include <android-base/thread_annotations.h>
+
+namespace aaudio {
+
+typedef int32_t aaudio_command_opcode;
+
+class AAudioCommandParam {
+public:
+ AAudioCommandParam() = default;
+ virtual ~AAudioCommandParam() = default;
+};
+
+class AAudioCommand {
+public:
+ explicit AAudioCommand(
+ aaudio_command_opcode opCode, std::shared_ptr<AAudioCommandParam> param = nullptr,
+ bool waitForReply = false, int64_t timeoutNanos = 0)
+ : operationCode(opCode), parameter(param), isWaitingForReply(waitForReply),
+ timeoutNanoseconds(timeoutNanos) { }
+ virtual ~AAudioCommand() = default;
+
+ std::mutex lock;
+ std::condition_variable conditionVariable;
+
+ const aaudio_command_opcode operationCode;
+ std::shared_ptr<AAudioCommandParam> parameter;
+ bool isWaitingForReply GUARDED_BY(lock);
+ const int64_t timeoutNanoseconds;
+ aaudio_result_t result GUARDED_BY(lock) = AAUDIO_OK;
+};
+
+class AAudioCommandQueue {
+public:
+ AAudioCommandQueue() = default;
+ ~AAudioCommandQueue() = default;
+
+ /**
+ * Send a command to the command queue. The return will be waiting for a specified timeout
+ * period indicated by the command if it is required.
+ *
+ * @param command the command to send to the command queue.
+ * @return the result of sending the command or the result of executing the command if command
+ * need to wait for a reply. If timeout happens, AAUDIO_ERROR_TIMEOUT will be returned.
+ */
+ aaudio_result_t sendCommand(std::shared_ptr<AAudioCommand> command);
+
+ /**
+ * Wait for next available command OR until the timeout is expired.
+ *
+ * @param timeoutNanos the maximum time to wait for next command (0 means return immediately in
+ * any case), negative to wait forever.
+ * @return the next available command if any or a nullptr when there is none.
+ */
+ std::shared_ptr<AAudioCommand> waitForCommand(int64_t timeoutNanos = -1);
+
+ /**
+ * Force stop waiting for next command
+ */
+ void stopWaiting();
+
+private:
+ std::mutex mLock;
+ std::condition_variable mWaitWorkCond;
+
+ std::queue<std::shared_ptr<AAudioCommand>> mCommands GUARDED_BY(mLock);
+ bool mRunning GUARDED_BY(mLock) = true;
+};
+
+} // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 13dd3d3..390cd5c 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -59,6 +59,7 @@
result << " Device Id: " << getDeviceId() << "\n";
result << " Sample Rate: " << getSampleRate() << "\n";
result << " Channel Count: " << getSamplesPerFrame() << "\n";
+ result << " Channel Mask: 0x" << std::hex << getChannelMask() << std::dec << "\n";
result << " Format: " << getFormat() << "\n";
result << " Frames Per Burst: " << mFramesPerBurst << "\n";
result << " Usage: " << getUsage() << "\n";
@@ -164,6 +165,10 @@
configuration.getSamplesPerFrame() != getSamplesPerFrame()) {
return false;
}
+ if (configuration.getChannelMask() != AAUDIO_UNSPECIFIED &&
+ configuration.getChannelMask() != getChannelMask()) {
+ return false;
+ }
return true;
}
@@ -188,7 +193,9 @@
if (direction == AAUDIO_DIRECTION_OUTPUT) {
flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
| AAudioConvert_allowCapturePolicyToAudioFlagsMask(
- params->getAllowedCapturePolicy()));
+ params->getAllowedCapturePolicy(),
+ params->getSpatializationBehavior(),
+ params->isContentSpatialized()));
} else {
flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
| AAudioConvert_privacySensitiveToAudioFlagsMask(params->isPrivacySensitive()));
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index a08098c..b9c1260 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -80,16 +80,16 @@
audio_format_t audioFormat = getFormat();
- // FLOAT is not directly supported by the HAL so ask for a 32-bit.
- if (audioFormat == AUDIO_FORMAT_PCM_FLOAT) {
- // TODO remove these logs when finished debugging.
- ALOGD("%s() change format from %d to 32_BIT", __func__, audioFormat);
- audioFormat = AUDIO_FORMAT_PCM_32_BIT;
- }
-
result = openWithFormat(audioFormat);
if (result == AAUDIO_OK) return result;
+ if (result == AAUDIO_ERROR_UNAVAILABLE && audioFormat == AUDIO_FORMAT_PCM_FLOAT) {
+ ALOGD("%s() FLOAT failed, perhaps due to format. Try again with 32_BIT", __func__);
+ audioFormat = AUDIO_FORMAT_PCM_32_BIT;
+ result = openWithFormat(audioFormat);
+ }
+ if (result == AAUDIO_OK) return result;
+
if (result == AAUDIO_ERROR_UNAVAILABLE && audioFormat == AUDIO_FORMAT_PCM_32_BIT) {
ALOGD("%s() 32_BIT failed, perhaps due to format. Try again with 24_BIT_PACKED", __func__);
audioFormat = AUDIO_FORMAT_PCM_24_BIT_PACKED;
@@ -126,20 +126,15 @@
}
config.sample_rate = aaudioSampleRate;
- int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
-
const aaudio_direction_t direction = getDirection();
+ config.channel_mask = AAudio_getChannelMaskForOpen(
+ getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
+
if (direction == AAUDIO_DIRECTION_OUTPUT) {
- config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
- ? AUDIO_CHANNEL_OUT_STEREO
- : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
} else if (direction == AAUDIO_DIRECTION_INPUT) {
- config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
- ? AUDIO_CHANNEL_IN_STEREO
- : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
} else {
@@ -225,9 +220,9 @@
}
// Get information about the stream and pass it back to the caller.
- setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT)
- ? audio_channel_count_from_out_mask(config.channel_mask)
- : audio_channel_count_from_in_mask(config.channel_mask));
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ config.channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
+ AAudio_isChannelIndexMask(config.channel_mask)));
// AAudio creates a copy of this FD and retains ownership of the copy.
// Assume that AudioFlinger will close the original shared_memory_fd.
@@ -247,9 +242,9 @@
setFormat(config.format);
setSampleRate(config.sample_rate);
- ALOGD("%s() actual rate = %d, channels = %d"
- ", deviceId = %d, capacity = %d\n",
- __func__, getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
+ ALOGD("%s() actual rate = %d, channels = %d channelMask = %#x, deviceId = %d, capacity = %d\n",
+ __func__, getSampleRate(), getSamplesPerFrame(), getChannelMask(),
+ deviceId, getBufferCapacity());
ALOGD("%s() format = 0x%08x, frame size = %d, burst size = %d",
__func__, getFormat(), calculateBytesPerFrame(), mFramesPerBurst);
@@ -406,16 +401,17 @@
/**
* Get an immutable description of the data queue from the HAL.
*/
-aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(
+ AudioEndpointParcelable* parcelable)
{
// Gather information on the data queue based on HAL info.
int32_t bytesPerFrame = calculateBytesPerFrame();
int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
- int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
- parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
- parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
- parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
- parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
+ int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+ parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+ parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+ parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+ parcelable->mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
return AAUDIO_OK;
}
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 5a53885..ddfac63 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -79,7 +79,7 @@
void onRoutingChanged(audio_port_handle_t portHandle) override;
// ------------------------------------------------------------------------------
- aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable);
+ aaudio_result_t getDownDataDescription(AudioEndpointParcelable* parcelable);
int64_t getHardwareTimeOffsetNanos() const {
return mHardwareTimeOffsetNanos;
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 4e46033..f590fc8 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -21,6 +21,7 @@
#include <assert.h>
#include <map>
#include <mutex>
+#include <media/AudioSystem.h>
#include <utils/Singleton.h>
#include "AAudioEndpointManager.h"
@@ -51,7 +52,7 @@
mMixer.allocate(getStreamInternal()->getSamplesPerFrame(),
getStreamInternal()->getFramesPerBurst());
- int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
+ int32_t burstsPerBuffer = AudioSystem::getAAudioMixerBurstCount();
if (burstsPerBuffer == 0) {
mLatencyTuningEnabled = true;
burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT;
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 5fbcadb..5af0a91 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -78,7 +78,7 @@
result = mStreamInternal->open(builder);
setSampleRate(mStreamInternal->getSampleRate());
- setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
+ setChannelMask(mStreamInternal->getChannelMask());
setDeviceId(mStreamInternal->getDeviceId());
setSessionId(mStreamInternal->getSessionId());
setFormat(AUDIO_FORMAT_PCM_FLOAT); // force for mixer
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 34ddd4d..a25a791 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -34,23 +34,25 @@
#include "AAudioService.h"
#include "AAudioServiceEndpoint.h"
#include "AAudioServiceStreamBase.h"
-#include "TimestampScheduler.h"
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
using content::AttributionSourceState;
+static const int64_t TIMEOUT_NANOS = 3LL * 1000 * 1000 * 1000;
+
/**
* Base class for streams in the service.
* @return
*/
AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
- : mTimestampThread("AATime")
+ : mCommandThread("AACommand")
, mAtomicStreamTimestamp()
, mAudioService(audioService) {
mMmapClient.attributionSource = AttributionSourceState();
+ mThreadEnabled = true;
}
AAudioServiceStreamBase::~AAudioServiceStreamBase() {
@@ -70,10 +72,18 @@
|| getState() == AAUDIO_STREAM_STATE_UNINITIALIZED),
"service stream %p still open, state = %d",
this, getState());
+
+ // Stop the command thread before destroying.
+ if (mThreadEnabled) {
+ mThreadEnabled = false;
+ mCommandQueue.stopWaiting();
+ mCommandThread.stop();
+ }
}
std::string AAudioServiceStreamBase::dumpHeader() {
- return std::string(" T Handle UId Port Run State Format Burst Chan Capacity");
+ return std::string(
+ " T Handle UId Port Run State Format Burst Chan Mask Capacity");
}
std::string AAudioServiceStreamBase::dump() const {
@@ -88,6 +98,7 @@
result << std::setw(7) << getFormat();
result << std::setw(6) << mFramesPerBurst;
result << std::setw(5) << getSamplesPerFrame();
+ result << std::setw(8) << std::hex << getChannelMask() << std::dec;
result << std::setw(9) << getBufferCapacity();
return result.str();
@@ -164,6 +175,16 @@
mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
copyFrom(*mServiceEndpoint);
}
+
+ // Make sure this object does not get deleted before the run() method
+ // can protect it by making a strong pointer.
+ mThreadEnabled = true;
+ incStrong(nullptr); // See run() method.
+ result = mCommandThread.start(this);
+ if (result != AAUDIO_OK) {
+ decStrong(nullptr); // run() can't do it so we have to do it here.
+ goto error;
+ }
return result;
error:
@@ -172,8 +193,16 @@
}
aaudio_result_t AAudioServiceStreamBase::close() {
- std::lock_guard<std::mutex> lock(mLock);
- return close_l();
+ auto command = std::make_shared<AAudioCommand>(
+ CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+ aaudio_result_t result = mCommandQueue.sendCommand(command);
+
+ // Stop the command thread as the stream is closed.
+ mThreadEnabled = false;
+ mCommandQueue.stopWaiting();
+ mCommandThread.stop();
+
+ return result;
}
aaudio_result_t AAudioServiceStreamBase::close_l() {
@@ -181,8 +210,7 @@
return AAUDIO_OK;
}
- // This will call stopTimestampThread() and also stop the stream,
- // just in case it was not already stopped.
+ // This will stop the stream, just in case it was not already stopped.
stop_l();
aaudio_result_t result = AAUDIO_OK;
@@ -222,8 +250,12 @@
* An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
*/
aaudio_result_t AAudioServiceStreamBase::start() {
- std::lock_guard<std::mutex> lock(mLock);
+ auto command = std::make_shared<AAudioCommand>(
+ START, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+ return mCommandQueue.sendCommand(command);
+}
+aaudio_result_t AAudioServiceStreamBase::start_l() {
const int64_t beginNs = AudioClock::getNanoseconds();
aaudio_result_t result = AAUDIO_OK;
@@ -259,15 +291,6 @@
// This should happen at the end of the start.
sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
setState(AAUDIO_STREAM_STATE_STARTED);
- mThreadEnabled.store(true);
- // Make sure this object does not get deleted before the run() method
- // can protect it by making a strong pointer.
- incStrong(nullptr); // See run() method.
- result = mTimestampThread.start(this);
- if (result != AAUDIO_OK) {
- decStrong(nullptr); // run() can't do it so we have to do it here.
- goto error;
- }
return result;
@@ -277,8 +300,9 @@
}
aaudio_result_t AAudioServiceStreamBase::pause() {
- std::lock_guard<std::mutex> lock(mLock);
- return pause_l();
+ auto command = std::make_shared<AAudioCommand>(
+ PAUSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+ return mCommandQueue.sendCommand(command);
}
aaudio_result_t AAudioServiceStreamBase::pause_l() {
@@ -296,12 +320,6 @@
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
.record(); });
- result = stopTimestampThread();
- if (result != AAUDIO_OK) {
- disconnect_l();
- return result;
- }
-
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
@@ -320,8 +338,9 @@
}
aaudio_result_t AAudioServiceStreamBase::stop() {
- std::lock_guard<std::mutex> lock(mLock);
- return stop_l();
+ auto command = std::make_shared<AAudioCommand>(
+ STOP, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+ return mCommandQueue.sendCommand(command);
}
aaudio_result_t AAudioServiceStreamBase::stop_l() {
@@ -341,12 +360,6 @@
setState(AAUDIO_STREAM_STATE_STOPPING);
- // Temporarily unlock because we are joining the timestamp thread and it may try
- // to acquire mLock.
- mLock.unlock();
- result = stopTimestampThread();
- mLock.lock();
-
if (result != AAUDIO_OK) {
disconnect_l();
return result;
@@ -371,17 +384,13 @@
return result;
}
-aaudio_result_t AAudioServiceStreamBase::stopTimestampThread() {
- aaudio_result_t result = AAUDIO_OK;
- // clear flag that tells thread to loop
- if (mThreadEnabled.exchange(false)) {
- result = mTimestampThread.stop();
- }
- return result;
+aaudio_result_t AAudioServiceStreamBase::flush() {
+ auto command = std::make_shared<AAudioCommand>(
+ FLUSH, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+ return mCommandQueue.sendCommand(command);
}
-aaudio_result_t AAudioServiceStreamBase::flush() {
- std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::flush_l() {
aaudio_result_t result = AAudio_isFlushAllowed(getState());
if (result != AAUDIO_OK) {
return result;
@@ -402,48 +411,111 @@
return AAUDIO_OK;
}
-// implement Runnable, periodically send timestamps to client
+// implement Runnable, periodically send timestamps to client and process commands from queue.
__attribute__((no_sanitize("integer")))
void AAudioServiceStreamBase::run() {
- ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
+ ALOGD("%s() %s entering >>>>>>>>>>>>>> COMMANDS", __func__, getTypeText());
// Hold onto the ref counted stream until the end.
android::sp<AAudioServiceStreamBase> holdStream(this);
TimestampScheduler timestampScheduler;
+ int64_t nextTime;
// Balance the incStrong from when the thread was launched.
holdStream->decStrong(nullptr);
- timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
- timestampScheduler.start(AudioClock::getNanoseconds());
- int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+ // Taking mLock while starting the thread. All the operation must be able to
+ // run with holding the lock.
+ std::scoped_lock<std::mutex> _l(mLock);
+
int32_t loopCount = 0;
- aaudio_result_t result = AAUDIO_OK;
- while(mThreadEnabled.load()) {
+ while (mThreadEnabled.load()) {
loopCount++;
- if (AudioClock::getNanoseconds() >= nextTime) {
- result = sendCurrentTimestamp();
- if (result != AAUDIO_OK) {
- ALOGE("%s() timestamp thread got result = %d", __func__, result);
- break;
+ int64_t timeoutNanos = -1;
+ if (isRunning()) {
+ timeoutNanos = nextTime - AudioClock::getNanoseconds();
+ timeoutNanos = std::max<int64_t>(0, timeoutNanos);
+ }
+
+ auto command = mCommandQueue.waitForCommand(timeoutNanos);
+ if (!mThreadEnabled) {
+ // Break the loop if the thread is disabled.
+ break;
+ }
+
+ if (isRunning() && AudioClock::getNanoseconds() >= nextTime) {
+ // It is time to update timestamp.
+ if (sendCurrentTimestamp_l() != AAUDIO_OK) {
+ ALOGE("Failed to send current timestamp, stop updating timestamp");
+ disconnect_l();
+ } else {
+ nextTime = timestampScheduler.nextAbsoluteTime();
}
- nextTime = timestampScheduler.nextAbsoluteTime();
- } else {
- // Sleep until it is time to send the next timestamp.
- // TODO Wait for a signal with a timeout so that we can stop more quickly.
- AudioClock::sleepUntilNanoTime(nextTime);
+ }
+
+ if (command != nullptr) {
+ std::scoped_lock<std::mutex> _commandLock(command->lock);
+ switch (command->operationCode) {
+ case START:
+ command->result = start_l();
+ timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
+ timestampScheduler.start(AudioClock::getNanoseconds());
+ nextTime = timestampScheduler.nextAbsoluteTime();
+ break;
+ case PAUSE:
+ command->result = pause_l();
+ break;
+ case STOP:
+ command->result = stop_l();
+ break;
+ case FLUSH:
+ command->result = flush_l();
+ break;
+ case CLOSE:
+ command->result = close_l();
+ break;
+ case DISCONNECT:
+ disconnect_l();
+ break;
+ case REGISTER_AUDIO_THREAD: {
+ RegisterAudioThreadParam *param =
+ (RegisterAudioThreadParam *) command->parameter.get();
+ command->result =
+ param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+ : registerAudioThread_l(param->mOwnerPid,
+ param->mClientThreadId,
+ param->mPriority);
+ }
+ break;
+ case UNREGISTER_AUDIO_THREAD: {
+ UnregisterAudioThreadParam *param =
+ (UnregisterAudioThreadParam *) command->parameter.get();
+ command->result =
+ param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+ : unregisterAudioThread_l(param->mClientThreadId);
+ }
+ break;
+ case GET_DESCRIPTION: {
+ GetDescriptionParam *param = (GetDescriptionParam *) command->parameter.get();
+ command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
+ : getDescription_l(param->mParcelable);
+ }
+ break;
+ default:
+ ALOGE("Invalid command op code: %d", command->operationCode);
+ break;
+ }
+ if (command->isWaitingForReply) {
+ command->isWaitingForReply = false;
+ command->conditionVariable.notify_one();
+ }
}
}
- // This was moved from the calls in stop_l() and pause_l(), which could cause a deadlock
- // if it resulted in a call to disconnect.
- if (result == AAUDIO_OK) {
- (void) sendCurrentTimestamp();
- }
- ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
+ ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< COMMANDS",
__func__, getTypeText(), loopCount);
}
void AAudioServiceStreamBase::disconnect() {
- std::lock_guard<std::mutex> lock(mLock);
- disconnect_l();
+ auto command = std::make_shared<AAudioCommand>(DISCONNECT);
+ mCommandQueue.sendCommand(command);
}
void AAudioServiceStreamBase::disconnect_l() {
@@ -459,15 +531,23 @@
}
}
-aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId,
- int priority) {
- std::lock_guard<std::mutex> lock(mLock);
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId, int priority) {
+ const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+ auto command = std::make_shared<AAudioCommand>(
+ REGISTER_AUDIO_THREAD,
+ std::make_shared<RegisterAudioThreadParam>(ownerPid, clientThreadId, priority),
+ true /*waitForReply*/,
+ TIMEOUT_NANOS);
+ return mCommandQueue.sendCommand(command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::registerAudioThread_l(
+ pid_t ownerPid, pid_t clientThreadId, int priority) {
aaudio_result_t result = AAUDIO_OK;
if (getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
ALOGE("AAudioService::registerAudioThread(), thread already registered");
result = AAUDIO_ERROR_INVALID_STATE;
} else {
- const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
setRegisteredThread(clientThreadId);
int err = android::requestPriority(ownerPid, clientThreadId,
priority, true /* isForApp */);
@@ -481,7 +561,15 @@
}
aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread(pid_t clientThreadId) {
- std::lock_guard<std::mutex> lock(mLock);
+ auto command = std::make_shared<AAudioCommand>(
+ UNREGISTER_AUDIO_THREAD,
+ std::make_shared<UnregisterAudioThreadParam>(clientThreadId),
+ true /*waitForReply*/,
+ TIMEOUT_NANOS);
+ return mCommandQueue.sendCommand(command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread_l(pid_t clientThreadId) {
aaudio_result_t result = AAUDIO_OK;
if (getRegisteredThread() != clientThreadId) {
ALOGE("%s(), wrong thread", __func__);
@@ -550,7 +638,7 @@
return sendServiceEvent(AAUDIO_SERVICE_EVENT_XRUN, (int64_t) xRunCount);
}
-aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
+aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp_l() {
AAudioServiceMessage command;
// It is not worth filling up the queue with timestamps.
// That can cause the stream to get suspended.
@@ -560,8 +648,8 @@
}
// Send a timestamp for the clock model.
- aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
- &command.timestamp.timestamp);
+ aaudio_result_t result = getFreeRunningPosition_l(&command.timestamp.position,
+ &command.timestamp.timestamp);
if (result == AAUDIO_OK) {
ALOGV("%s() SERVICE %8lld at %lld", __func__,
(long long) command.timestamp.position,
@@ -571,8 +659,8 @@
if (result == AAUDIO_OK) {
// Send a hardware timestamp for presentation time.
- result = getHardwareTimestamp(&command.timestamp.position,
- &command.timestamp.timestamp);
+ result = getHardwareTimestamp_l(&command.timestamp.position,
+ &command.timestamp.timestamp);
if (result == AAUDIO_OK) {
ALOGV("%s() HARDWARE %8lld at %lld", __func__,
(long long) command.timestamp.position,
@@ -594,7 +682,15 @@
* used to communicate with the underlying HAL or Service.
*/
aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
- std::lock_guard<std::mutex> lock(mLock);
+ auto command = std::make_shared<AAudioCommand>(
+ GET_DESCRIPTION,
+ std::make_shared<GetDescriptionParam>(&parcelable),
+ true /*waitForReply*/,
+ TIMEOUT_NANOS);
+ return mCommandQueue.sendCommand(command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::getDescription_l(AudioEndpointParcelable* parcelable) {
{
std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
if (mUpMessageQueue == nullptr) {
@@ -603,9 +699,9 @@
}
// Gather information on the message queue.
mUpMessageQueue->fillParcelable(parcelable,
- parcelable.mUpMessageQueueParcelable);
+ parcelable->mUpMessageQueueParcelable);
}
- return getAudioDataDescription(parcelable);
+ return getAudioDataDescription_l(parcelable);
}
void AAudioServiceStreamBase::onVolumeChanged(float volume) {
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 976996d..aa8e8cf 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -33,8 +33,10 @@
#include "utility/AAudioUtilities.h"
#include "utility/AudioClock.h"
-#include "SharedRingBuffer.h"
+#include "AAudioCommandQueue.h"
#include "AAudioThread.h"
+#include "SharedRingBuffer.h"
+#include "TimestampScheduler.h"
namespace android {
class AAudioService;
@@ -235,10 +237,46 @@
aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
aaudio_sharing_mode_t sharingMode);
+ aaudio_result_t start_l() REQUIRES(mLock);
virtual aaudio_result_t close_l() REQUIRES(mLock);
virtual aaudio_result_t pause_l() REQUIRES(mLock);
virtual aaudio_result_t stop_l() REQUIRES(mLock);
void disconnect_l() REQUIRES(mLock);
+ aaudio_result_t flush_l() REQUIRES(mLock);
+
+ class RegisterAudioThreadParam : public AAudioCommandParam {
+ public:
+ RegisterAudioThreadParam(pid_t ownerPid, pid_t clientThreadId, int priority)
+ : AAudioCommandParam(), mOwnerPid(ownerPid),
+ mClientThreadId(clientThreadId), mPriority(priority) { }
+ ~RegisterAudioThreadParam() = default;
+
+ pid_t mOwnerPid;
+ pid_t mClientThreadId;
+ int mPriority;
+ };
+ aaudio_result_t registerAudioThread_l(
+ pid_t ownerPid, pid_t clientThreadId, int priority) REQUIRES(mLock);
+
+ class UnregisterAudioThreadParam : public AAudioCommandParam {
+ public:
+ UnregisterAudioThreadParam(pid_t clientThreadId)
+ : AAudioCommandParam(), mClientThreadId(clientThreadId) { }
+ ~UnregisterAudioThreadParam() = default;
+
+ pid_t mClientThreadId;
+ };
+ aaudio_result_t unregisterAudioThread_l(pid_t clientThreadId) REQUIRES(mLock);
+
+ class GetDescriptionParam : public AAudioCommandParam {
+ public:
+ GetDescriptionParam(AudioEndpointParcelable* parcelable)
+ : AAudioCommandParam(), mParcelable(parcelable) { }
+ ~GetDescriptionParam() = default;
+
+ AudioEndpointParcelable* mParcelable;
+ };
+ aaudio_result_t getDescription_l(AudioEndpointParcelable* parcelable) REQUIRES(mLock);
void setState(aaudio_stream_state_t state);
@@ -250,7 +288,7 @@
aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
- aaudio_result_t sendCurrentTimestamp() EXCLUDES(mLock);
+ aaudio_result_t sendCurrentTimestamp_l() REQUIRES(mLock);
aaudio_result_t sendXRunCount(int32_t xRunCount);
@@ -259,11 +297,13 @@
* @param timeNanos
* @return AAUDIO_OK or AAUDIO_ERROR_UNAVAILABLE or other negative error
*/
- virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+ virtual aaudio_result_t getFreeRunningPosition_l(
+ int64_t *positionFrames, int64_t *timeNanos) = 0;
- virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) = 0;
+ virtual aaudio_result_t getHardwareTimestamp_l(int64_t *positionFrames, int64_t *timeNanos) = 0;
- virtual aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) = 0;
+ virtual aaudio_result_t getAudioDataDescription_l(AudioEndpointParcelable* parcelable) = 0;
+
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
@@ -279,9 +319,20 @@
std::mutex mUpMessageQueueLock;
std::shared_ptr<SharedRingBuffer> mUpMessageQueue;
- AAudioThread mTimestampThread;
- // This is used by one thread to tell another thread to exit. So it must be atomic.
+ enum : int32_t {
+ START,
+ PAUSE,
+ STOP,
+ FLUSH,
+ CLOSE,
+ DISCONNECT,
+ REGISTER_AUDIO_THREAD,
+ UNREGISTER_AUDIO_THREAD,
+ GET_DESCRIPTION,
+ };
+ AAudioThread mCommandThread;
std::atomic<bool> mThreadEnabled{false};
+ AAudioCommandQueue mCommandQueue;
int32_t mFramesPerBurst = 0;
android::AudioClient mMmapClient; // set in open, used in MMAP start()
@@ -336,6 +387,8 @@
protected:
// Locking order is important.
// Acquire mLock before acquiring AAudioServiceEndpoint::mLockStreams
+ // The lock will be held by the command thread. All operations needing the lock must run from
+ // the command thread.
std::mutex mLock; // Prevent start/stop/close etcetera from colliding
};
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 57dc1ab..05b7f7d 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -141,7 +141,7 @@
}
// Get free-running DSP or DMA hardware position from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition_l(int64_t *positionFrames,
int64_t *timeNanos) {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
@@ -158,14 +158,14 @@
*positionFrames = timestamp.getPosition();
*timeNanos = timestamp.getNanoseconds();
} else if (result != AAUDIO_ERROR_UNAVAILABLE) {
- disconnect();
+ disconnect_l();
}
return result;
}
// Get timestamp from presentation position.
// If it fails, get timestamp that was written by getFreeRunningPosition()
-aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
+aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp_l(int64_t *positionFrames,
int64_t *timeNanos) {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
@@ -198,8 +198,8 @@
}
// Get an immutable description of the data queue from the HAL.
-aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription(
- AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription_l(
+ AudioEndpointParcelable* parcelable)
{
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index 667465a..28da120 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -71,12 +71,14 @@
aaudio_result_t stop_l() REQUIRES(mLock) override;
- aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+ aaudio_result_t getAudioDataDescription_l(
+ AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
- aaudio_result_t getFreeRunningPosition(int64_t *positionFrames,
- int64_t *timeNanos) EXCLUDES(mLock) override;
+ aaudio_result_t getFreeRunningPosition_l(int64_t *positionFrames,
+ int64_t *timeNanos) REQUIRES(mLock) override;
- aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+ aaudio_result_t getHardwareTimestamp_l(
+ int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
/**
* Device specific startup.
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index c665cda..04fcd6d 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -164,11 +164,11 @@
goto error;
}
- setSamplesPerFrame(configurationInput.getSamplesPerFrame());
- if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
- setSamplesPerFrame(endpoint->getSamplesPerFrame());
+ setChannelMask(configurationInput.getChannelMask());
+ if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+ setChannelMask(endpoint->getChannelMask());
} else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
- ALOGD("%s() mSamplesPerFrame = %d, need %d",
+ ALOGD("%s() mSamplesPerFrame = %#x, need %#x",
__func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
@@ -211,8 +211,8 @@
/**
* Get an immutable description of the data queue created by this service.
*/
-aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription(
- AudioEndpointParcelable &parcelable)
+aaudio_result_t AAudioServiceStreamShared::getAudioDataDescription_l(
+ AudioEndpointParcelable* parcelable)
{
std::lock_guard<std::mutex> lock(audioDataQueueLock);
if (mAudioDataQueue == nullptr) {
@@ -221,8 +221,8 @@
}
// Gather information on the data queue.
mAudioDataQueue->fillParcelable(parcelable,
- parcelable.mDownDataQueueParcelable);
- parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
+ parcelable->mDownDataQueueParcelable);
+ parcelable->mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
return AAUDIO_OK;
}
@@ -231,8 +231,8 @@
}
// Get timestamp that was written by mixer or distributor.
-aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
- int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition_l(int64_t *positionFrames,
+ int64_t *timeNanos) {
// TODO Get presentation timestamp from the HAL
if (mAtomicStreamTimestamp.isValid()) {
Timestamp timestamp = mAtomicStreamTimestamp.read();
@@ -245,8 +245,8 @@
}
// Get timestamp from lower level service.
-aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames,
- int64_t *timeNanos) {
+aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp_l(int64_t *positionFrames,
+ int64_t *timeNanos) {
int64_t position = 0;
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index 4fae5b4..78f9787 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -88,11 +88,14 @@
protected:
- aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
+ aaudio_result_t getAudioDataDescription_l(
+ AudioEndpointParcelable* parcelable) REQUIRES(mLock) override;
- aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+ aaudio_result_t getFreeRunningPosition_l(
+ int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
- aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+ aaudio_result_t getHardwareTimestamp_l(
+ int64_t *positionFrames, int64_t *timeNanos) REQUIRES(mLock) override;
/**
* @param requestedCapacityFrames
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
index 68496ac..549fa59 100644
--- a/services/oboeservice/AAudioThread.cpp
+++ b/services/oboeservice/AAudioThread.cpp
@@ -16,9 +16,10 @@
#define LOG_TAG "AAudioThread"
//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-#include <pthread.h>
+#include <system_error>
+
+#include <utils/Log.h>
#include <aaudio/AAudio.h>
#include <utility/AAudioUtilities.h>
@@ -38,7 +39,7 @@
}
AAudioThread::~AAudioThread() {
- ALOGE_IF(pthread_equal(pthread_self(), mThread),
+ ALOGE_IF(mThread.get_id() == std::this_thread::get_id(),
"%s() destructor running in thread", __func__);
ALOGE_IF(mHasThread, "%s() thread never joined", __func__);
}
@@ -60,32 +61,16 @@
}
}
-// This is the entry point for the new thread created by createThread_l().
-// It converts the 'C' function call to a C++ method call.
-static void * AAudioThread_internalThreadProc(void *arg) {
- AAudioThread *aaudioThread = (AAudioThread *) arg;
- aaudioThread->dispatch();
- return nullptr;
-}
-
aaudio_result_t AAudioThread::start(Runnable *runnable) {
if (mHasThread) {
ALOGE("start() - mHasThread already true");
return AAUDIO_ERROR_INVALID_STATE;
}
- // mRunnable will be read by the new thread when it starts.
- // pthread_create() forces a memory synchronization so mRunnable does not need to be atomic.
+ // mRunnable will be read by the new thread when it starts. A std::thread is created.
mRunnable = runnable;
- int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
- if (err != 0) {
- ALOGE("start() - pthread_create() returned %d %s", err, strerror(err));
- return AAudioConvert_androidToAAudioResult(-err);
- } else {
- int err = pthread_setname_np(mThread, mName);
- ALOGW_IF((err != 0), "Could not set name of AAudioThread. err = %d", err);
- mHasThread = true;
- return AAUDIO_OK;
- }
+ mHasThread = true;
+ mThread = std::thread(&AAudioThread::dispatch, this);
+ return AAUDIO_OK;
}
aaudio_result_t AAudioThread::stop() {
@@ -93,18 +78,18 @@
ALOGE("stop() but no thread running");
return AAUDIO_ERROR_INVALID_STATE;
}
- // Check to see if the thread is trying to stop itself.
- if (pthread_equal(pthread_self(), mThread)) {
- ALOGE("%s() attempt to pthread_join() from launched thread!", __func__);
- return AAUDIO_ERROR_INTERNAL;
- }
- int err = pthread_join(mThread, nullptr);
- if (err != 0) {
- ALOGE("stop() - pthread_join() returned %d %s", err, strerror(err));
- return AAudioConvert_androidToAAudioResult(-err);
- } else {
+ if (mThread.get_id() == std::this_thread::get_id()) {
+ // The thread must not be joined by itself.
+ ALOGE("%s() attempt to join() from launched thread!", __func__);
+ return AAUDIO_ERROR_INTERNAL;
+ } else if (mThread.joinable()) {
+ // Double check if the thread is joinable to avoid exception when calling join.
+ mThread.join();
mHasThread = false;
return AAUDIO_OK;
+ } else {
+ ALOGE("%s() the thread is not joinable", __func__);
+ return AAUDIO_ERROR_INTERNAL;
}
}
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index 08a8a98..b2774e0 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -18,7 +18,7 @@
#define AAUDIO_THREAD_H
#include <atomic>
-#include <pthread.h>
+#include <thread>
#include <aaudio/AAudio.h>
@@ -37,7 +37,6 @@
/**
* Abstraction for a host dependent thread.
- * TODO Consider using Android "Thread" class or std::thread instead.
*/
class AAudioThread
{
@@ -73,7 +72,7 @@
Runnable *mRunnable = nullptr;
bool mHasThread = false;
- pthread_t mThread = {};
+ std::thread mThread;
static std::atomic<uint32_t> mNextThreadIndex;
char mName[16]; // max length for a pthread_name
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 4c58040..80e4296 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -27,6 +27,7 @@
srcs: [
"AAudioClientTracker.cpp",
+ "AAudioCommandQueue.cpp",
"AAudioEndpointManager.cpp",
"AAudioMixer.cpp",
"AAudioService.cpp",
@@ -68,6 +69,8 @@
"aaudio-aidl-cpp",
"framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
+ "packagemanager_aidl-cpp",
+ "android.media.audio.common.types-V1-cpp",
],
export_shared_lib_headers: [
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index c1d4e16..fd2a454 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -85,9 +85,9 @@
return AAUDIO_OK;
}
-void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
+void SharedRingBuffer::fillParcelable(AudioEndpointParcelable* endpointParcelable,
RingBufferParcelable &ringBufferParcelable) {
- int fdIndex = endpointParcelable.addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
+ int fdIndex = endpointParcelable->addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
ringBufferParcelable.setupMemory(fdIndex,
SHARED_RINGBUFFER_DATA_OFFSET,
mDataMemorySizeInBytes,
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index c3a9bb7..cff1261 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -45,7 +45,7 @@
aaudio_result_t allocate(android::fifo_frames_t bytesPerFrame, android::fifo_frames_t capacityInFrames);
- void fillParcelable(AudioEndpointParcelable &endpointParcelable,
+ void fillParcelable(AudioEndpointParcelable* endpointParcelable,
RingBufferParcelable &ringBufferParcelable);
/**
diff --git a/services/oboeservice/fuzzer/README.md b/services/oboeservice/fuzzer/README.md
index 00b85df..ae7af3eb 100644
--- a/services/oboeservice/fuzzer/README.md
+++ b/services/oboeservice/fuzzer/README.md
@@ -15,7 +15,7 @@
4. InService
5. DeviceId
6. SampleRate
-7. SamplesPerFrame
+7. ChannelMask
8. Direction
9. SharingMode
10. Usage
@@ -31,7 +31,7 @@
| `InService` | `bool` | Value obtained from FuzzedDataProvider |
| `DeviceId` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
| `SampleRate` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
-| `SamplesPerFrame` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
+| `ChannelMask` | `AAUDIO_UNSPECIFIED`, `AAUDIO_CHANNEL_INDEX_MASK_1`, `AAUDIO_CHANNEL_INDEX_MASK_2`, `AAUDIO_CHANNEL_INDEX_MASK_3`, `AAUDIO_CHANNEL_INDEX_MASK_4`, `AAUDIO_CHANNEL_INDEX_MASK_5`, `AAUDIO_CHANNEL_INDEX_MASK_6`, `AAUDIO_CHANNEL_INDEX_MASK_7`, `AAUDIO_CHANNEL_INDEX_MASK_8`, `AAUDIO_CHANNEL_INDEX_MASK_9`, `AAUDIO_CHANNEL_INDEX_MASK_10`, `AAUDIO_CHANNEL_INDEX_MASK_11`, `AAUDIO_CHANNEL_INDEX_MASK_12`, `AAUDIO_CHANNEL_INDEX_MASK_13`, `AAUDIO_CHANNEL_INDEX_MASK_14`, `AAUDIO_CHANNEL_INDEX_MASK_15`, `AAUDIO_CHANNEL_INDEX_MASK_16`, `AAUDIO_CHANNEL_INDEX_MASK_17`, `AAUDIO_CHANNEL_INDEX_MASK_18`, `AAUDIO_CHANNEL_INDEX_MASK_19`, `AAUDIO_CHANNEL_INDEX_MASK_20`, `AAUDIO_CHANNEL_INDEX_MASK_21`, `AAUDIO_CHANNEL_INDEX_MASK_22`, `AAUDIO_CHANNEL_INDEX_MASK_23`, `AAUDIO_CHANNEL_INDEX_MASK_24`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_FRONT_BACK`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_2POINT1`, `AAUDIO_CHANNEL_TRI`, `AAUDIO_CHANNEL_TRI_BACK`, `AAUDIO_CHANNEL_3POINT1`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_QUAD`, `AAUDIO_CHANNEL_QUAD_SIDE`, `AAUDIO_CHANNEL_SURROUND`, `AAUDIO_CHANNEL_PENTA`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_5POINT1_SIDE`, `AAUDIO_CHANNEL_5POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1POINT4`, `AAUDIO_CHANNEL_6POINT1`, `AAUDIO_CHANNEL_7POINT1`, `AAUDIO_CHANNEL_7POINT1POINT2`, `AAUDIO_CHANNEL_7POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT6` | Value obtained from FuzzedDataProvider |
| `Direction` | `AAUDIO_DIRECTION_OUTPUT`, `AAUDIO_DIRECTION_INPUT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
| `SharingMode` | `AAUDIO_SHARING_MODE_EXCLUSIVE`, `AAUDIO_SHARING_MODE_SHARED` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
| `Usage` | `AAUDIO_USAGE_MEDIA`, `AAUDIO_USAGE_VOICE_COMMUNICATION`, `AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING`, `AAUDIO_USAGE_ALARM`, `AAUDIO_USAGE_NOTIFICATION`, `AAUDIO_USAGE_NOTIFICATION_RINGTONE`, `AAUDIO_USAGE_NOTIFICATION_EVENT`, `AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY`, `AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE`, `AAUDIO_USAGE_ASSISTANCE_SONIFICATION`, `AAUDIO_USAGE_GAME`, `AAUDIO_USAGE_ASSISTANT`, `AAUDIO_SYSTEM_USAGE_EMERGENCY`, `AAUDIO_SYSTEM_USAGE_SAFETY`, `AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS`, `AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
diff --git a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
index 4bc661c..17e8d36 100644
--- a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
+++ b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
@@ -68,10 +68,71 @@
AAUDIO_INPUT_PRESET_UNPROCESSED, AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
};
+aaudio_channel_mask_t kAAudioChannelMasks[] = {
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_CHANNEL_INDEX_MASK_1,
+ AAUDIO_CHANNEL_INDEX_MASK_2,
+ AAUDIO_CHANNEL_INDEX_MASK_3,
+ AAUDIO_CHANNEL_INDEX_MASK_4,
+ AAUDIO_CHANNEL_INDEX_MASK_5,
+ AAUDIO_CHANNEL_INDEX_MASK_6,
+ AAUDIO_CHANNEL_INDEX_MASK_7,
+ AAUDIO_CHANNEL_INDEX_MASK_8,
+ AAUDIO_CHANNEL_INDEX_MASK_9,
+ AAUDIO_CHANNEL_INDEX_MASK_10,
+ AAUDIO_CHANNEL_INDEX_MASK_11,
+ AAUDIO_CHANNEL_INDEX_MASK_12,
+ AAUDIO_CHANNEL_INDEX_MASK_13,
+ AAUDIO_CHANNEL_INDEX_MASK_14,
+ AAUDIO_CHANNEL_INDEX_MASK_15,
+ AAUDIO_CHANNEL_INDEX_MASK_16,
+ AAUDIO_CHANNEL_INDEX_MASK_17,
+ AAUDIO_CHANNEL_INDEX_MASK_18,
+ AAUDIO_CHANNEL_INDEX_MASK_19,
+ AAUDIO_CHANNEL_INDEX_MASK_20,
+ AAUDIO_CHANNEL_INDEX_MASK_21,
+ AAUDIO_CHANNEL_INDEX_MASK_22,
+ AAUDIO_CHANNEL_INDEX_MASK_23,
+ AAUDIO_CHANNEL_INDEX_MASK_24,
+ AAUDIO_CHANNEL_MONO,
+ AAUDIO_CHANNEL_STEREO,
+ AAUDIO_CHANNEL_FRONT_BACK,
+ AAUDIO_CHANNEL_2POINT0POINT2,
+ AAUDIO_CHANNEL_2POINT1POINT2,
+ AAUDIO_CHANNEL_3POINT0POINT2,
+ AAUDIO_CHANNEL_3POINT1POINT2,
+ AAUDIO_CHANNEL_5POINT1,
+ AAUDIO_CHANNEL_MONO,
+ AAUDIO_CHANNEL_STEREO,
+ AAUDIO_CHANNEL_2POINT1,
+ AAUDIO_CHANNEL_TRI,
+ AAUDIO_CHANNEL_TRI_BACK,
+ AAUDIO_CHANNEL_3POINT1,
+ AAUDIO_CHANNEL_2POINT0POINT2,
+ AAUDIO_CHANNEL_2POINT1POINT2,
+ AAUDIO_CHANNEL_3POINT0POINT2,
+ AAUDIO_CHANNEL_3POINT1POINT2,
+ AAUDIO_CHANNEL_QUAD,
+ AAUDIO_CHANNEL_QUAD_SIDE,
+ AAUDIO_CHANNEL_SURROUND,
+ AAUDIO_CHANNEL_PENTA,
+ AAUDIO_CHANNEL_5POINT1,
+ AAUDIO_CHANNEL_5POINT1_SIDE,
+ AAUDIO_CHANNEL_5POINT1POINT2,
+ AAUDIO_CHANNEL_5POINT1POINT4,
+ AAUDIO_CHANNEL_6POINT1,
+ AAUDIO_CHANNEL_7POINT1,
+ AAUDIO_CHANNEL_7POINT1POINT2,
+ AAUDIO_CHANNEL_7POINT1POINT4,
+ AAUDIO_CHANNEL_9POINT1POINT4,
+ AAUDIO_CHANNEL_9POINT1POINT6,
+};
+
const size_t kNumAAudioFormats = std::size(kAAudioFormats);
const size_t kNumAAudioUsages = std::size(kAAudioUsages);
const size_t kNumAAudioContentTypes = std::size(kAAudioContentTypes);
const size_t kNumAAudioInputPresets = std::size(kAAudioInputPresets);
+const size_t kNumAAudioChannelMasks = std::size(kAAudioChannelMasks);
class FuzzAAudioClient : public virtual RefBase, public AAudioServiceInterface {
public:
@@ -305,7 +366,11 @@
request.getConfiguration().setDeviceId(fdp.ConsumeIntegral<int32_t>());
request.getConfiguration().setSampleRate(fdp.ConsumeIntegral<int32_t>());
- request.getConfiguration().setSamplesPerFrame(fdp.ConsumeIntegral<int32_t>());
+ request.getConfiguration().setChannelMask((aaudio_channel_mask_t)(
+ fdp.ConsumeBool()
+ ? fdp.ConsumeIntegral<int32_t>()
+ : kAAudioChannelMasks[fdp.ConsumeIntegralInRange<int32_t>(
+ 0, kNumAAudioChannelMasks - 1)]));
request.getConfiguration().setDirection(
fdp.ConsumeBool() ? fdp.ConsumeIntegral<int32_t>()
: (fdp.ConsumeBool() ? AAUDIO_DIRECTION_OUTPUT : AAUDIO_DIRECTION_INPUT));
diff --git a/services/tuner/.clang-format b/services/tuner/.clang-format
new file mode 100644
index 0000000..f14cc88
--- /dev/null
+++ b/services/tuner/.clang-format
@@ -0,0 +1,33 @@
+---
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: true
+AllowShortLoopsOnASingleLine: true
+BinPackArguments: true
+BinPackParameters: true
+CommentPragmas: NOLINT:.*
+ContinuationIndentWidth: 8
+DerivePointerAlignment: false
+IndentWidth: 4
+PointerAlignment: Left
+TabWidth: 4
+
+# Deviations from the above file:
+# "Don't indent the section label"
+AccessModifierOffset: -4
+# "Each line of text in your code should be at most 100 columns long."
+ColumnLimit: 100
+# "Constructor initializer lists can be all on one line or with subsequent
+# lines indented eight spaces.". clang-format does not support having the colon
+# on the same line as the constructor function name, so this is the best
+# approximation of that rule, which makes all entries in the list (except the
+# first one) have an eight space indentation.
+ConstructorInitializerIndentWidth: 6
+# There is nothing in go/droidcppstyle about case labels, but there seems to be
+# more code that does not indent the case labels in frameworks/base.
+IndentCaseLabels: false
+# There have been some bugs in which subsequent formatting operations introduce
+# weird comment jumps.
+ReflowComments: false
+# Android does support C++11 now.
+Standard: Cpp11
\ No newline at end of file
diff --git a/services/tuner/Android.bp b/services/tuner/Android.bp
index cd11c88..ec62d4e 100644
--- a/services/tuner/Android.bp
+++ b/services/tuner/Android.bp
@@ -7,33 +7,15 @@
default_applicable_licenses: ["frameworks_av_license"],
}
-filegroup {
- name: "tv_tuner_aidl",
- srcs: [
- "aidl/android/media/tv/tuner/*.aidl",
- ],
- path: "aidl",
-}
-
-filegroup {
- name: "tv_tuner_frontend_info",
- srcs: [
- "aidl/android/media/tv/tuner/TunerFrontendInfo.aidl",
- "aidl/android/media/tv/tuner/TunerFrontend*Capabilities.aidl",
- ],
- path: "aidl",
-}
-
aidl_interface {
name: "tv_tuner_aidl_interface",
unstable: true,
local_include_dir: "aidl",
- srcs: [
- ":tv_tuner_aidl",
- ],
+ srcs: ["aidl/android/media/tv/tuner/*.aidl"],
imports: [
"android.hardware.common-V2",
"android.hardware.common.fmq-V1",
+ "android.hardware.tv.tuner-V1",
],
backend: {
@@ -49,37 +31,18 @@
},
}
-aidl_interface {
- name: "tv_tuner_frontend_info_aidl_interface",
- unstable: true,
- local_include_dir: "aidl",
- srcs: [
- ":tv_tuner_frontend_info",
- ],
-
- backend: {
- java: {
- enabled: true,
- },
- cpp: {
- enabled: true,
- },
- ndk: {
- enabled: true,
- },
- },
-}
-
cc_library {
name: "libtunerservice",
srcs: [
"Tuner*.cpp",
+ "hidl/Tuner*.cpp",
],
shared_libs: [
"android.hardware.tv.tuner@1.0",
"android.hardware.tv.tuner@1.1",
+ "android.hardware.tv.tuner-V1-ndk",
"libbase",
"libbinder",
"libbinder_ndk",
@@ -89,13 +52,13 @@
"liblog",
"libmedia",
"libutils",
- "tv_tuner_aidl_interface-ndk_platform",
- "tv_tuner_resource_manager_aidl_interface-ndk_platform",
- "tv_tuner_resource_manager_aidl_interface-cpp",
+ "packagemanager_aidl-cpp",
+ "tv_tuner_aidl_interface-ndk",
+ "tv_tuner_resource_manager_aidl_interface-ndk",
],
static_libs: [
- "android.hardware.common.fmq-V1-ndk_platform",
+ "android.hardware.common.fmq-V1-ndk",
"libaidlcommonsupport",
],
@@ -122,18 +85,18 @@
shared_libs: [
"android.hardware.tv.tuner@1.0",
"android.hardware.tv.tuner@1.1",
+ "android.hardware.tv.tuner-V1-ndk",
"libbase",
"libbinder",
"libfmq",
"liblog",
"libtunerservice",
"libutils",
- "tv_tuner_resource_manager_aidl_interface-ndk_platform",
- "tv_tuner_resource_manager_aidl_interface-cpp",
+ "tv_tuner_resource_manager_aidl_interface-ndk",
],
static_libs: [
- "tv_tuner_aidl_interface-ndk_platform",
+ "tv_tuner_aidl_interface-ndk",
],
init_rc: ["mediatuner.rc"],
diff --git a/services/tuner/OWNERS b/services/tuner/OWNERS
index 0ceb8e8..bf9fe34 100644
--- a/services/tuner/OWNERS
+++ b/services/tuner/OWNERS
@@ -1,2 +1,2 @@
-nchalko@google.com
+hgchen@google.com
quxiangfang@google.com
diff --git a/services/tuner/TunerDemux.cpp b/services/tuner/TunerDemux.cpp
index 1122368..a6f3a2c 100644
--- a/services/tuner/TunerDemux.cpp
+++ b/services/tuner/TunerDemux.cpp
@@ -16,23 +16,32 @@
#define LOG_TAG "TunerDemux"
-#include "TunerDvr.h"
#include "TunerDemux.h"
+
+#include <aidl/android/hardware/tv/tuner/IDvr.h>
+#include <aidl/android/hardware/tv/tuner/IDvrCallback.h>
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
+#include <aidl/android/hardware/tv/tuner/IFilterCallback.h>
+#include <aidl/android/hardware/tv/tuner/ITimeFilter.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerDvr.h"
#include "TunerTimeFilter.h"
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::DvrType;
-using ::android::hardware::tv::tuner::V1_0::Result;
+using ::aidl::android::hardware::tv::tuner::IDvr;
+using ::aidl::android::hardware::tv::tuner::IDvrCallback;
+using ::aidl::android::hardware::tv::tuner::IFilter;
+using ::aidl::android::hardware::tv::tuner::IFilterCallback;
+using ::aidl::android::hardware::tv::tuner::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::Result;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-TunerDemux::TunerDemux(sp<IDemux> demux, int id) {
+TunerDemux::TunerDemux(shared_ptr<IDemux> demux, int id) {
mDemux = demux;
mDemuxId = id;
}
@@ -41,192 +50,143 @@
mDemux = nullptr;
}
-Status TunerDemux::setFrontendDataSource(const std::shared_ptr<ITunerFrontend>& frontend) {
+::ndk::ScopedAStatus TunerDemux::setFrontendDataSource(
+ const shared_ptr<ITunerFrontend>& in_frontend) {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
int frontendId;
- frontend->getFrontendId(&frontendId);
- Result res = mDemux->setFrontendDataSource(frontendId);
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ in_frontend->getFrontendId(&frontendId);
+
+ return mDemux->setFrontendDataSource(frontendId);
}
-Status TunerDemux::openFilter(
- int type, int subType, int bufferSize, const std::shared_ptr<ITunerFilterCallback>& cb,
- std::shared_ptr<ITunerFilter>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::setFrontendDataSourceById(int frontendId) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ return mDemux->setFrontendDataSource(frontendId);
+}
+
+::ndk::ScopedAStatus TunerDemux::openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+ const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ shared_ptr<IFilter> filter;
+ shared_ptr<TunerFilter::FilterCallback> filterCb =
+ ::ndk::SharedRefBase::make<TunerFilter::FilterCallback>(in_cb);
+ shared_ptr<IFilterCallback> cb = filterCb;
+ auto status = mDemux->openFilter(in_type, in_bufferSize, cb, &filter);
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filter, filterCb, in_type);
+ }
+
+ return status;
+}
+
+::ndk::ScopedAStatus TunerDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- DemuxFilterMainType mainType = static_cast<DemuxFilterMainType>(type);
- DemuxFilterType filterType {
- .mainType = mainType,
- };
-
- switch(mainType) {
- case DemuxFilterMainType::TS:
- filterType.subType.tsFilterType(static_cast<DemuxTsFilterType>(subType));
- break;
- case DemuxFilterMainType::MMTP:
- filterType.subType.mmtpFilterType(static_cast<DemuxMmtpFilterType>(subType));
- break;
- case DemuxFilterMainType::IP:
- filterType.subType.ipFilterType(static_cast<DemuxIpFilterType>(subType));
- break;
- case DemuxFilterMainType::TLV:
- filterType.subType.tlvFilterType(static_cast<DemuxTlvFilterType>(subType));
- break;
- case DemuxFilterMainType::ALP:
- filterType.subType.alpFilterType(static_cast<DemuxAlpFilterType>(subType));
- break;
- }
- Result status;
- sp<IFilter> filterSp;
- sp<IFilterCallback> cbSp = new TunerFilter::FilterCallback(cb);
- mDemux->openFilter(filterType, bufferSize, cbSp,
- [&](Result r, const sp<IFilter>& filter) {
- filterSp = filter;
- status = r;
- });
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ shared_ptr<ITimeFilter> filter;
+ auto status = mDemux->openTimeFilter(&filter);
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerTimeFilter>(filter);
}
- *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filterSp, type, subType);
- return Status::ok();
+ return status;
}
-Status TunerDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter,
+ int32_t* _aidl_return) {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status;
- sp<ITimeFilter> filterSp;
- mDemux->openTimeFilter([&](Result r, const sp<ITimeFilter>& filter) {
- filterSp = filter;
- status = r;
- });
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
-
- *_aidl_return = ::ndk::SharedRefBase::make<TunerTimeFilter>(filterSp);
- return Status::ok();
+ shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(tunerFilter.get()))->getHalFilter();
+ return mDemux->getAvSyncHwId(halFilter, _aidl_return);
}
-Status TunerDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::getAvSyncTime(int32_t avSyncHwId, int64_t* _aidl_return) {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- uint32_t avSyncHwId;
- Result res;
- sp<IFilter> halFilter = static_cast<TunerFilter*>(tunerFilter.get())->getHalFilter();
- mDemux->getAvSyncHwId(halFilter,
- [&](Result r, uint32_t id) {
- res = r;
- avSyncHwId = id;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
-
- *_aidl_return = (int)avSyncHwId;
- return Status::ok();
+ return mDemux->getAvSyncTime(avSyncHwId, _aidl_return);
}
-Status TunerDemux::getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+ const shared_ptr<ITunerDvrCallback>& in_cb,
+ shared_ptr<ITunerDvr>* _aidl_return) {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- uint64_t time;
- Result res;
- mDemux->getAvSyncTime(static_cast<uint32_t>(avSyncHwId),
- [&](Result r, uint64_t ts) {
- res = r;
- time = ts;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ shared_ptr<IDvrCallback> callback = ::ndk::SharedRefBase::make<TunerDvr::DvrCallback>(in_cb);
+ shared_ptr<IDvr> halDvr;
+ auto res = mDemux->openDvr(in_dvbType, in_bufferSize, callback, &halDvr);
+ if (res.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerDvr>(halDvr, in_dvbType);
}
- *_aidl_return = (int64_t)time;
- return Status::ok();
+ return res;
}
-Status TunerDemux::openDvr(int dvrType, int bufferSize, const shared_ptr<ITunerDvrCallback>& cb,
- shared_ptr<ITunerDvr>* _aidl_return) {
+::ndk::ScopedAStatus TunerDemux::connectCiCam(int32_t ciCamId) {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res;
- sp<IDvrCallback> callback = new TunerDvr::DvrCallback(cb);
- sp<IDvr> hidlDvr;
- mDemux->openDvr(static_cast<DvrType>(dvrType), bufferSize, callback,
- [&](Result r, const sp<IDvr>& dvr) {
- hidlDvr = dvr;
- res = r;
- });
- if (res != Result::SUCCESS) {
- *_aidl_return = NULL;
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
-
- *_aidl_return = ::ndk::SharedRefBase::make<TunerDvr>(hidlDvr, dvrType);
- return Status::ok();
+ return mDemux->connectCiCam(ciCamId);
}
-Status TunerDemux::connectCiCam(int ciCamId) {
+::ndk::ScopedAStatus TunerDemux::disconnectCiCam() {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDemux->connectCiCam(static_cast<uint32_t>(ciCamId));
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return mDemux->disconnectCiCam();
}
-Status TunerDemux::disconnectCiCam() {
+::ndk::ScopedAStatus TunerDemux::close() {
if (mDemux == nullptr) {
ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDemux->disconnectCiCam();
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ auto res = mDemux->close();
+ mDemux = nullptr;
+
+ return res;
}
-Status TunerDemux::close() {
- if (mDemux == nullptr) {
- ALOGE("IDemux is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- Result res = mDemux->close();
- mDemux = NULL;
-
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
-}
+} // namespace tuner
+} // namespace tv
+} // namespace media
} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerDemux.h b/services/tuner/TunerDemux.h
index 2a9836b..cdb3aa0 100644
--- a/services/tuner/TunerDemux.h
+++ b/services/tuner/TunerDemux.h
@@ -17,52 +17,55 @@
#ifndef ANDROID_MEDIA_TUNERDEMUX_H
#define ANDROID_MEDIA_TUNERDEMUX_H
+#include <aidl/android/hardware/tv/tuner/IDemux.h>
#include <aidl/android/media/tv/tuner/BnTunerDemux.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerDvr;
-using ::aidl::android::media::tv::tuner::ITunerDvrCallback;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
-using ::aidl::android::media::tv::tuner::ITunerFrontend;
-using ::aidl::android::media::tv::tuner::ITunerTimeFilter;
-using ::android::hardware::tv::tuner::V1_0::IDemux;
-using ::android::hardware::tv::tuner::V1_0::IDvr;
-using ::android::hardware::tv::tuner::V1_0::IDvrCallback;
-using ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::aidl::android::hardware::tv::tuner::IDemux;
using namespace std;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
class TunerDemux : public BnTunerDemux {
public:
- TunerDemux(sp<IDemux> demux, int demuxId);
+ TunerDemux(shared_ptr<IDemux> demux, int demuxId);
virtual ~TunerDemux();
- Status setFrontendDataSource(const shared_ptr<ITunerFrontend>& frontend) override;
- Status openFilter(
- int mainType, int subtype, int bufferSize, const shared_ptr<ITunerFilterCallback>& cb,
- shared_ptr<ITunerFilter>* _aidl_return) override;
- Status openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
- Status getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) override;
- Status getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) override;
- Status openDvr(
- int dvbType, int bufferSize, const shared_ptr<ITunerDvrCallback>& cb,
- shared_ptr<ITunerDvr>* _aidl_return) override;
- Status connectCiCam(int ciCamId) override;
- Status disconnectCiCam() override;
- Status close() override;
+
+ ::ndk::ScopedAStatus setFrontendDataSource(
+ const shared_ptr<ITunerFrontend>& in_frontend) override;
+ ::ndk::ScopedAStatus setFrontendDataSourceById(int frontendId) override;
+ ::ndk::ScopedAStatus openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+ const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) override;
+ ::ndk::ScopedAStatus openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
+ ::ndk::ScopedAStatus getAvSyncHwId(const shared_ptr<ITunerFilter>& in_tunerFilter,
+ int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getAvSyncTime(int32_t in_avSyncHwId, int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+ const shared_ptr<ITunerDvrCallback>& in_cb,
+ shared_ptr<ITunerDvr>* _aidl_return) override;
+ ::ndk::ScopedAStatus connectCiCam(int32_t in_ciCamId) override;
+ ::ndk::ScopedAStatus disconnectCiCam() override;
+ ::ndk::ScopedAStatus close() override;
int getId() { return mDemuxId; }
private:
- sp<IDemux> mDemux;
+ shared_ptr<IDemux> mDemux;
int mDemuxId;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERDEMUX_H
diff --git a/services/tuner/TunerDescrambler.cpp b/services/tuner/TunerDescrambler.cpp
index b7ae167..70aee20 100644
--- a/services/tuner/TunerDescrambler.cpp
+++ b/services/tuner/TunerDescrambler.cpp
@@ -16,17 +16,27 @@
#define LOG_TAG "TunerDescrambler"
-#include "TunerFilter.h"
-#include "TunerDemux.h"
#include "TunerDescrambler.h"
-using ::android::hardware::tv::tuner::V1_0::Result;
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <utils/Log.h>
+
+#include "TunerDemux.h"
+#include "TunerFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::IFilter;
+using ::aidl::android::hardware::tv::tuner::Result;
using namespace std;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-TunerDescrambler::TunerDescrambler(sp<IDescrambler> descrambler) {
+TunerDescrambler::TunerDescrambler(shared_ptr<IDescrambler> descrambler) {
mDescrambler = descrambler;
}
@@ -34,91 +44,74 @@
mDescrambler = nullptr;
}
-Status TunerDescrambler::setDemuxSource(const std::shared_ptr<ITunerDemux>& demux) {
+::ndk::ScopedAStatus TunerDescrambler::setDemuxSource(
+ const shared_ptr<ITunerDemux>& in_tunerDemux) {
if (mDescrambler == nullptr) {
ALOGE("IDescrambler is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDescrambler->setDemuxSource(static_cast<TunerDemux*>(demux.get())->getId());
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return mDescrambler->setDemuxSource((static_cast<TunerDemux*>(in_tunerDemux.get()))->getId());
}
-Status TunerDescrambler::setKeyToken(const vector<uint8_t>& keyToken) {
+::ndk::ScopedAStatus TunerDescrambler::setKeyToken(const vector<uint8_t>& in_keyToken) {
if (mDescrambler == nullptr) {
ALOGE("IDescrambler is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDescrambler->setKeyToken(keyToken);
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return mDescrambler->setKeyToken(in_keyToken);
}
-Status TunerDescrambler::addPid(const TunerDemuxPid& pid,
- const shared_ptr<ITunerFilter>& optionalSourceFilter) {
+::ndk::ScopedAStatus TunerDescrambler::addPid(
+ const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
if (mDescrambler == nullptr) {
ALOGE("IDescrambler is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- sp<IFilter> halFilter = (optionalSourceFilter == NULL)
- ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
- Result res = mDescrambler->addPid(getHidlDemuxPid(pid), halFilter);
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ shared_ptr<IFilter> halFilter =
+ (in_optionalSourceFilter == nullptr)
+ ? nullptr
+ : static_cast<TunerFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+
+ return mDescrambler->addPid(in_pid, halFilter);
}
-Status TunerDescrambler::removePid(const TunerDemuxPid& pid,
- const shared_ptr<ITunerFilter>& optionalSourceFilter) {
+::ndk::ScopedAStatus TunerDescrambler::removePid(
+ const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
if (mDescrambler == nullptr) {
ALOGE("IDescrambler is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- sp<IFilter> halFilter = (optionalSourceFilter == NULL)
- ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
- Result res = mDescrambler->removePid(getHidlDemuxPid(pid), halFilter);
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ shared_ptr<IFilter> halFilter =
+ (in_optionalSourceFilter == nullptr)
+ ? nullptr
+ : static_cast<TunerFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+
+ return mDescrambler->removePid(in_pid, halFilter);
}
-Status TunerDescrambler::close() {
+::ndk::ScopedAStatus TunerDescrambler::close() {
if (mDescrambler == nullptr) {
ALOGE("IDescrambler is not initialized.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDescrambler->close();
- mDescrambler = NULL;
+ auto res = mDescrambler->close();
+ mDescrambler = nullptr;
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return res;
}
-DemuxPid TunerDescrambler::getHidlDemuxPid(const TunerDemuxPid& pid) {
- DemuxPid hidlPid;
- switch (pid.getTag()) {
- case TunerDemuxPid::tPid: {
- hidlPid.tPid((uint16_t)pid.get<TunerDemuxPid::tPid>());
- break;
- }
- case TunerDemuxPid::mmtpPid: {
- hidlPid.mmtpPid((uint16_t)pid.get<TunerDemuxPid::mmtpPid>());
- break;
- }
- }
- return hidlPid;
-}
+} // namespace tuner
+} // namespace tv
+} // namespace media
} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerDescrambler.h b/services/tuner/TunerDescrambler.h
index 1970fb7..b1d5fb9 100644
--- a/services/tuner/TunerDescrambler.h
+++ b/services/tuner/TunerDescrambler.h
@@ -17,38 +17,43 @@
#ifndef ANDROID_MEDIA_TUNERDESCRAMBLER_H
#define ANDROID_MEDIA_TUNERDESCRAMBLER_H
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
#include <aidl/android/media/tv/tuner/BnTunerDescrambler.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerDescrambler;
-using ::aidl::android::media::tv::tuner::ITunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::TunerDemuxPid;
-using ::android::hardware::tv::tuner::V1_0::DemuxPid;
-using ::android::hardware::tv::tuner::V1_0::IDescrambler;
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::aidl::android::hardware::tv::tuner::IDescrambler;
+using namespace std;
+
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
class TunerDescrambler : public BnTunerDescrambler {
public:
- TunerDescrambler(sp<IDescrambler> descrambler);
+ TunerDescrambler(shared_ptr<IDescrambler> descrambler);
virtual ~TunerDescrambler();
- Status setDemuxSource(const shared_ptr<ITunerDemux>& demux) override;
- Status setKeyToken(const vector<uint8_t>& keyToken) override;
- Status addPid(const TunerDemuxPid& pid,
- const shared_ptr<ITunerFilter>& optionalSourceFilter) override;
- Status removePid(const TunerDemuxPid& pid,
- const shared_ptr<ITunerFilter>& optionalSourceFilter) override;
- Status close() override;
+
+ ::ndk::ScopedAStatus setDemuxSource(const shared_ptr<ITunerDemux>& in_tunerDemux) override;
+ ::ndk::ScopedAStatus setKeyToken(const vector<uint8_t>& in_keyToken) override;
+ ::ndk::ScopedAStatus addPid(const DemuxPid& in_pid,
+ const shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+ ::ndk::ScopedAStatus removePid(
+ const DemuxPid& in_pid,
+ const shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+ ::ndk::ScopedAStatus close() override;
private:
- DemuxPid getHidlDemuxPid(const TunerDemuxPid& pid);
-
- sp<IDescrambler> mDescrambler;
+ shared_ptr<IDescrambler> mDescrambler;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERDESCRAMBLER_H
diff --git a/services/tuner/TunerDvr.cpp b/services/tuner/TunerDvr.cpp
index db4e07b..8776f7e 100644
--- a/services/tuner/TunerDvr.cpp
+++ b/services/tuner/TunerDvr.cpp
@@ -16,194 +16,152 @@
#define LOG_TAG "TunerDvr"
-#include <fmq/ConvertMQDescriptors.h>
#include "TunerDvr.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <utils/Log.h>
+
#include "TunerFilter.h"
-using ::android::hardware::tv::tuner::V1_0::DataFormat;
-using ::android::hardware::tv::tuner::V1_0::Result;
+using ::aidl::android::hardware::tv::tuner::Result;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-TunerDvr::TunerDvr(sp<IDvr> dvr, int type) {
+TunerDvr::TunerDvr(shared_ptr<IDvr> dvr, DvrType type) {
mDvr = dvr;
- mType = static_cast<DvrType>(type);
+ mType = type;
}
TunerDvr::~TunerDvr() {
- mDvr = NULL;
+ mDvr = nullptr;
}
-Status TunerDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- MQDesc dvrMQDesc;
- Result res;
- mDvr->getQueueDesc([&](Result r, const MQDesc& desc) {
- dvrMQDesc = desc;
- res = r;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
-
- AidlMQDesc aidlMQDesc;
- unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
- dvrMQDesc, &aidlMQDesc);
- *_aidl_return = move(aidlMQDesc);
- return Status::ok();
+ return mDvr->getQueueDesc(_aidl_return);
}
-Status TunerDvr::configure(const TunerDvrSettings& settings) {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::configure(const DvrSettings& in_settings) {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDvr->configure(getHidlDvrSettingsFromAidl(settings));
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return mDvr->configure(in_settings);
}
-Status TunerDvr::attachFilter(const shared_ptr<ITunerFilter>& filter) {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::attachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- ITunerFilter* tunerFilter = filter.get();
- sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
- if (hidlFilter == NULL) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ if (in_filter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
}
- Result res = mDvr->attachFilter(hidlFilter);
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(in_filter.get()))->getHalFilter();
+ if (halFilter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
}
- return Status::ok();
+
+ return mDvr->attachFilter(halFilter);
}
-Status TunerDvr::detachFilter(const shared_ptr<ITunerFilter>& filter) {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::detachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- ITunerFilter* tunerFilter = filter.get();
- sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
- if (hidlFilter == NULL) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ if (in_filter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
}
- Result res = mDvr->detachFilter(hidlFilter);
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ shared_ptr<IFilter> halFilter = (static_cast<TunerFilter*>(in_filter.get()))->getHalFilter();
+ if (halFilter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
}
- return Status::ok();
+
+ return mDvr->detachFilter(halFilter);
}
-Status TunerDvr::start() {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::start() {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDvr->start();
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return mDvr->start();
}
-Status TunerDvr::stop() {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::stop() {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDvr->stop();
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return mDvr->stop();
}
-Status TunerDvr::flush() {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::flush() {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDvr->flush();
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return mDvr->flush();
}
-Status TunerDvr::close() {
- if (mDvr == NULL) {
+::ndk::ScopedAStatus TunerDvr::close() {
+ if (mDvr == nullptr) {
ALOGE("IDvr is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDvr->close();
- mDvr = NULL;
+ auto status = mDvr->close();
+ mDvr = nullptr;
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
-}
-
-DvrSettings TunerDvr::getHidlDvrSettingsFromAidl(TunerDvrSettings settings) {
- DvrSettings s;
- switch (mType) {
- case DvrType::PLAYBACK: {
- s.playback({
- .statusMask = static_cast<uint8_t>(settings.statusMask),
- .lowThreshold = static_cast<uint32_t>(settings.lowThreshold),
- .highThreshold = static_cast<uint32_t>(settings.highThreshold),
- .dataFormat = static_cast<DataFormat>(settings.dataFormat),
- .packetSize = static_cast<uint8_t>(settings.packetSize),
- });
- return s;
- }
- case DvrType::RECORD: {
- s.record({
- .statusMask = static_cast<uint8_t>(settings.statusMask),
- .lowThreshold = static_cast<uint32_t>(settings.lowThreshold),
- .highThreshold = static_cast<uint32_t>(settings.highThreshold),
- .dataFormat = static_cast<DataFormat>(settings.dataFormat),
- .packetSize = static_cast<uint8_t>(settings.packetSize),
- });
- return s;
- }
- default:
- break;
- }
- return s;
+ return status;
}
/////////////// IDvrCallback ///////////////////////
-
-Return<void> TunerDvr::DvrCallback::onRecordStatus(const RecordStatus status) {
- if (mTunerDvrCallback != NULL) {
- mTunerDvrCallback->onRecordStatus(static_cast<int>(status));
+::ndk::ScopedAStatus TunerDvr::DvrCallback::onRecordStatus(const RecordStatus status) {
+ if (mTunerDvrCallback != nullptr) {
+ mTunerDvrCallback->onRecordStatus(status);
}
- return Void();
+ return ndk::ScopedAStatus::ok();
}
-Return<void> TunerDvr::DvrCallback::onPlaybackStatus(const PlaybackStatus status) {
- if (mTunerDvrCallback != NULL) {
- mTunerDvrCallback->onPlaybackStatus(static_cast<int>(status));
+::ndk::ScopedAStatus TunerDvr::DvrCallback::onPlaybackStatus(const PlaybackStatus status) {
+ if (mTunerDvrCallback != nullptr) {
+ mTunerDvrCallback->onPlaybackStatus(status);
}
- return Void();
+ return ndk::ScopedAStatus::ok();
}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerDvr.h b/services/tuner/TunerDvr.h
index a508e99..1854d08 100644
--- a/services/tuner/TunerDvr.h
+++ b/services/tuner/TunerDvr.h
@@ -17,81 +17,71 @@
#ifndef ANDROID_MEDIA_TUNERDVR_H
#define ANDROID_MEDIA_TUNERDVR_H
+#include <aidl/android/hardware/tv/tuner/BnDvrCallback.h>
+#include <aidl/android/hardware/tv/tuner/DvrSettings.h>
+#include <aidl/android/hardware/tv/tuner/DvrType.h>
+#include <aidl/android/hardware/tv/tuner/IDvr.h>
+#include <aidl/android/hardware/tv/tuner/PlaybackStatus.h>
+#include <aidl/android/hardware/tv/tuner/RecordStatus.h>
#include <aidl/android/media/tv/tuner/BnTunerDvr.h>
#include <aidl/android/media/tv/tuner/ITunerDvrCallback.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <fmq/MessageQueue.h>
-#include <TunerFilter.h>
+#include "TunerFilter.h"
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::hardware::common::fmq::GrantorDescriptor;
using ::aidl::android::hardware::common::fmq::MQDescriptor;
using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
-using ::aidl::android::media::tv::tuner::BnTunerDvr;
-using ::aidl::android::media::tv::tuner::ITunerDvrCallback;
-using ::aidl::android::media::tv::tuner::ITunerFilter;
-using ::aidl::android::media::tv::tuner::TunerDvrSettings;
-
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-
-using ::android::hardware::tv::tuner::V1_0::DvrSettings;
-using ::android::hardware::tv::tuner::V1_0::DvrType;
-using ::android::hardware::tv::tuner::V1_0::IDvr;
-using ::android::hardware::tv::tuner::V1_0::IDvrCallback;
-using ::android::hardware::tv::tuner::V1_0::PlaybackStatus;
-using ::android::hardware::tv::tuner::V1_0::RecordStatus;
+using ::aidl::android::hardware::tv::tuner::BnDvrCallback;
+using ::aidl::android::hardware::tv::tuner::DvrSettings;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::aidl::android::hardware::tv::tuner::IDvr;
+using ::aidl::android::hardware::tv::tuner::PlaybackStatus;
+using ::aidl::android::hardware::tv::tuner::RecordStatus;
using namespace std;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-using MQDesc = MQDescriptorSync<uint8_t>;
using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
class TunerDvr : public BnTunerDvr {
public:
- TunerDvr(sp<IDvr> dvr, int type);
+ TunerDvr(shared_ptr<IDvr> dvr, DvrType type);
~TunerDvr();
- Status getQueueDesc(AidlMQDesc* _aidl_return) override;
+ ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+ ::ndk::ScopedAStatus configure(const DvrSettings& in_settings) override;
+ ::ndk::ScopedAStatus attachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+ ::ndk::ScopedAStatus detachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+ ::ndk::ScopedAStatus start() override;
+ ::ndk::ScopedAStatus stop() override;
+ ::ndk::ScopedAStatus flush() override;
+ ::ndk::ScopedAStatus close() override;
- Status configure(const TunerDvrSettings& settings) override;
-
- Status attachFilter(const shared_ptr<ITunerFilter>& filter) override;
-
- Status detachFilter(const shared_ptr<ITunerFilter>& filter) override;
-
- Status start() override;
-
- Status stop() override;
-
- Status flush() override;
-
- Status close() override;
-
- struct DvrCallback : public IDvrCallback {
+ struct DvrCallback : public BnDvrCallback {
DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)
- : mTunerDvrCallback(tunerDvrCallback) {};
+ : mTunerDvrCallback(tunerDvrCallback){};
- virtual Return<void> onRecordStatus(const RecordStatus status);
- virtual Return<void> onPlaybackStatus(const PlaybackStatus status);
+ ::ndk::ScopedAStatus onRecordStatus(const RecordStatus status) override;
+ ::ndk::ScopedAStatus onPlaybackStatus(const PlaybackStatus status) override;
- private:
- shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
+ private:
+ shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
};
private:
- DvrSettings getHidlDvrSettingsFromAidl(TunerDvrSettings settings);
-
- sp<IDvr> mDvr;
+ shared_ptr<IDvr> mDvr;
DvrType mType;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERDVR_H
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index 039fd31..fb5bfa3 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -18,893 +18,460 @@
#include "TunerFilter.h"
-using ::aidl::android::media::tv::tuner::TunerFilterSectionCondition;
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <binder/IPCThreadState.h>
-using ::android::hardware::hidl_handle;
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpLengthType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpAddress;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpPid;
-using ::android::hardware::tv::tuner::V1_0::DemuxRecordScIndexType;
-using ::android::hardware::tv::tuner::V1_0::DemuxStreamId;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::AudioStreamType;
-using ::android::hardware::tv::tuner::V1_1::Constant;
-using ::android::hardware::tv::tuner::V1_1::VideoStreamType;
+#include "TunerHelper.h"
+#include "TunerService.h"
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using ::android::IPCThreadState;
using namespace std;
-TunerFilter::TunerFilter(
- sp<IFilter> filter, int mainType, int subType) {
- mFilter = filter;
- mFilter_1_1 = ::android::hardware::tv::tuner::V1_1::IFilter::castFrom(filter);
- mMainType = mainType;
- mSubType = subType;
-}
+TunerFilter::TunerFilter(shared_ptr<IFilter> filter, shared_ptr<FilterCallback> cb,
+ DemuxFilterType type)
+ : mFilter(filter),
+ mType(type),
+ mStarted(false),
+ mShared(false),
+ mClientPid(-1),
+ mFilterCallback(cb) {}
TunerFilter::~TunerFilter() {
+ Mutex::Autolock _l(mLock);
mFilter = nullptr;
- mFilter_1_1 = nullptr;
}
-Status TunerFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
- if (mFilter == NULL) {
- ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- MQDesc filterMQDesc;
- Result res;
- mFilter->getQueueDesc([&](Result r, const MQDesc& desc) {
- filterMQDesc = desc;
- res = r;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
-
- AidlMQDesc aidlMQDesc;
- unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
- filterMQDesc, &aidlMQDesc);
- *_aidl_return = move(aidlMQDesc);
- return Status::ok();
-}
-
-Status TunerFilter::getId(int32_t* _aidl_return) {
+::ndk::ScopedAStatus TunerFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
+ Mutex::Autolock _l(mLock);
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res;
- mFilter->getId([&](Result r, uint32_t filterId) {
- res = r;
- mId = filterId;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
}
- *_aidl_return = mId;
- return Status::ok();
+
+ return mFilter->getQueueDesc(_aidl_return);
}
-Status TunerFilter::getId64Bit(int64_t* _aidl_return) {
- if (mFilter_1_1 == nullptr) {
- ALOGE("IFilter_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- Result res;
- mFilter_1_1->getId64Bit([&](Result r, uint64_t filterId) {
- res = r;
- mId64Bit = filterId;
- });
- if (res != Result::SUCCESS) {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- *_aidl_return = mId64Bit;
- return Status::ok();
-}
-
-Status TunerFilter::configure(const TunerFilterConfiguration& config) {
+::ndk::ScopedAStatus TunerFilter::getId(int32_t* _aidl_return) {
+ Mutex::Autolock _l(mLock);
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- DemuxFilterSettings settings;
- switch (config.getTag()) {
- case TunerFilterConfiguration::ts: {
- getHidlTsSettings(config, settings);
- break;
- }
- case TunerFilterConfiguration::mmtp: {
- getHidlMmtpSettings(config, settings);
- break;
- }
- case TunerFilterConfiguration::ip: {
- getHidlIpSettings(config, settings);
- break;
- }
- case TunerFilterConfiguration::tlv: {
- getHidlTlvSettings(config, settings);
- break;
- }
- case TunerFilterConfiguration::alp: {
- getHidlAlpSettings(config, settings);
- break;
- }
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
}
- Result res = mFilter->configure(settings);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ auto status = mFilter->getId(&mId);
+ if (status.isOk()) {
+ *_aidl_return = mId;
}
- return Status::ok();
+ return status;
}
-Status TunerFilter::configureMonitorEvent(int monitorEventType) {
- if (mFilter_1_1 == nullptr) {
- ALOGE("IFilter_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- Result res = mFilter_1_1->configureMonitorEvent(monitorEventType);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
-}
-
-Status TunerFilter::configureIpFilterContextId(int cid) {
- if (mFilter_1_1 == nullptr) {
- ALOGE("IFilter_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- Result res = mFilter_1_1->configureIpCid(cid);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
-}
-
-Status TunerFilter::configureAvStreamType(int avStreamType) {
- if (mFilter_1_1 == nullptr) {
- ALOGE("IFilter_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- AvStreamType type;
- if (!getHidlAvStreamType(avStreamType, type)) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_STATE));
- }
-
- Result res = mFilter_1_1->configureAvStreamType(type);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
-}
-
-Status TunerFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+::ndk::ScopedAStatus TunerFilter::getId64Bit(int64_t* _aidl_return) {
+ Mutex::Autolock _l(mLock);
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- ITunerFilter* tunerFilter = filter.get();
- sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
- Result res = mFilter->setDataSource(hidlFilter);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
-}
-
-void TunerFilter::getHidlTsSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
- auto tsConf = config.get<TunerFilterConfiguration::ts>();
- DemuxTsFilterSettings ts{
- .tpid = static_cast<uint16_t>(tsConf.tpid),
- };
-
- TunerFilterSettings tunerSettings = tsConf.filterSettings;
- switch (tunerSettings.getTag()) {
- case TunerFilterSettings::av: {
- ts.filterSettings.av(getAvSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::section: {
- ts.filterSettings.section(getSectionSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::pesData: {
- ts.filterSettings.pesData(getPesDataSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::record: {
- ts.filterSettings.record(getRecordSettings(tunerSettings));
- break;
- }
- default: {
- ts.filterSettings.noinit();
- break;
- }
- }
- settings.ts(ts);
-}
-
-void TunerFilter::getHidlMmtpSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
- auto mmtpConf = config.get<TunerFilterConfiguration::mmtp>();
- DemuxMmtpFilterSettings mmtp{
- .mmtpPid = static_cast<DemuxMmtpPid>(mmtpConf.mmtpPid),
- };
-
- TunerFilterSettings tunerSettings = mmtpConf.filterSettings;
- switch (tunerSettings.getTag()) {
- case TunerFilterSettings::av: {
- mmtp.filterSettings.av(getAvSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::section: {
- mmtp.filterSettings.section(getSectionSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::pesData: {
- mmtp.filterSettings.pesData(getPesDataSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::record: {
- mmtp.filterSettings.record(getRecordSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::download: {
- mmtp.filterSettings.download(getDownloadSettings(tunerSettings));
- break;
- }
- default: {
- mmtp.filterSettings.noinit();
- break;
- }
- }
- settings.mmtp(mmtp);
-}
-
-void TunerFilter::getHidlIpSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
- auto ipConf = config.get<TunerFilterConfiguration::ip>();
- DemuxIpAddress ipAddr{
- .srcPort = static_cast<uint16_t>(ipConf.ipAddr.srcPort),
- .dstPort = static_cast<uint16_t>(ipConf.ipAddr.dstPort),
- };
-
- ipConf.ipAddr.srcIpAddress.isIpV6
- ? ipAddr.srcIpAddress.v6(getIpV6Address(ipConf.ipAddr.srcIpAddress))
- : ipAddr.srcIpAddress.v4(getIpV4Address(ipConf.ipAddr.srcIpAddress));
- ipConf.ipAddr.dstIpAddress.isIpV6
- ? ipAddr.dstIpAddress.v6(getIpV6Address(ipConf.ipAddr.dstIpAddress))
- : ipAddr.dstIpAddress.v4(getIpV4Address(ipConf.ipAddr.dstIpAddress));
- DemuxIpFilterSettings ip{
- .ipAddr = ipAddr,
- };
-
- TunerFilterSettings tunerSettings = ipConf.filterSettings;
- switch (tunerSettings.getTag()) {
- case TunerFilterSettings::section: {
- ip.filterSettings.section(getSectionSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::isPassthrough: {
- ip.filterSettings.bPassthrough(tunerSettings.isPassthrough);
- break;
- }
- default: {
- ip.filterSettings.noinit();
- break;
- }
- }
- settings.ip(ip);
-}
-
-hidl_array<uint8_t, IP_V6_LENGTH> TunerFilter::getIpV6Address(TunerDemuxIpAddress addr) {
- hidl_array<uint8_t, IP_V6_LENGTH> ip;
- if (addr.addr.size() != IP_V6_LENGTH) {
- return ip;
- }
- copy(addr.addr.begin(), addr.addr.end(), ip.data());
- return ip;
-}
-
-hidl_array<uint8_t, IP_V4_LENGTH> TunerFilter::getIpV4Address(TunerDemuxIpAddress addr) {
- hidl_array<uint8_t, IP_V4_LENGTH> ip;
- if (addr.addr.size() != IP_V4_LENGTH) {
- return ip;
- }
- copy(addr.addr.begin(), addr.addr.end(), ip.data());
- return ip;
-}
-
-void TunerFilter::getHidlTlvSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
- auto tlvConf = config.get<TunerFilterConfiguration::tlv>();
- DemuxTlvFilterSettings tlv{
- .packetType = static_cast<uint8_t>(tlvConf.packetType),
- .isCompressedIpPacket = tlvConf.isCompressedIpPacket,
- };
-
- TunerFilterSettings tunerSettings = tlvConf.filterSettings;
- switch (tunerSettings.getTag()) {
- case TunerFilterSettings::section: {
- tlv.filterSettings.section(getSectionSettings(tunerSettings));
- break;
- }
- case TunerFilterSettings::isPassthrough: {
- tlv.filterSettings.bPassthrough(tunerSettings.isPassthrough);
- break;
- }
- default: {
- tlv.filterSettings.noinit();
- break;
- }
- }
- settings.tlv(tlv);
-}
-
-void TunerFilter::getHidlAlpSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
- auto alpConf = config.get<TunerFilterConfiguration::alp>();
- DemuxAlpFilterSettings alp{
- .packetType = static_cast<uint8_t>(alpConf.packetType),
- .lengthType = static_cast<DemuxAlpLengthType>(alpConf.lengthType),
- };
-
- TunerFilterSettings tunerSettings = alpConf.filterSettings;
- switch (tunerSettings.getTag()) {
- case TunerFilterSettings::section: {
- alp.filterSettings.section(getSectionSettings(tunerSettings));
- break;
- }
- default: {
- alp.filterSettings.noinit();
- break;
- }
- }
- settings.alp(alp);
-}
-
-DemuxFilterAvSettings TunerFilter::getAvSettings(const TunerFilterSettings& settings) {
- DemuxFilterAvSettings av {
- .isPassthrough = settings.get<TunerFilterSettings::av>().isPassthrough,
- };
- return av;
-}
-
-DemuxFilterSectionSettings TunerFilter::getSectionSettings(const TunerFilterSettings& settings) {
- auto s = settings.get<TunerFilterSettings::section>();
- DemuxFilterSectionSettings section{
- .isCheckCrc = s.isCheckCrc,
- .isRepeat = s.isRepeat,
- .isRaw = s.isRaw,
- };
-
- switch (s.condition.getTag()) {
- case TunerFilterSectionCondition::sectionBits: {
- auto sectionBits = s.condition.get<TunerFilterSectionCondition::sectionBits>();
- vector<uint8_t> filter(sectionBits.filter.begin(), sectionBits.filter.end());
- vector<uint8_t> mask(sectionBits.mask.begin(), sectionBits.mask.end());
- vector<uint8_t> mode(sectionBits.mode.begin(), sectionBits.mode.end());
- section.condition.sectionBits({
- .filter = filter,
- .mask = mask,
- .mode = mode,
- });
- break;
- }
- case TunerFilterSectionCondition::tableInfo: {
- auto tableInfo = s.condition.get<TunerFilterSectionCondition::tableInfo>();
- section.condition.tableInfo({
- .tableId = static_cast<uint16_t>(tableInfo.tableId),
- .version = static_cast<uint16_t>(tableInfo.version),
- });
- break;
- }
- default: {
- break;
- }
- }
- return section;
-}
-
-DemuxFilterPesDataSettings TunerFilter::getPesDataSettings(const TunerFilterSettings& settings) {
- DemuxFilterPesDataSettings pes{
- .streamId = static_cast<DemuxStreamId>(
- settings.get<TunerFilterSettings::pesData>().streamId),
- .isRaw = settings.get<TunerFilterSettings::pesData>().isRaw,
- };
- return pes;
-}
-
-DemuxFilterRecordSettings TunerFilter::getRecordSettings(const TunerFilterSettings& settings) {
- auto r = settings.get<TunerFilterSettings::record>();
- DemuxFilterRecordSettings record{
- .tsIndexMask = static_cast<uint32_t>(r.tsIndexMask),
- .scIndexType = static_cast<DemuxRecordScIndexType>(r.scIndexType),
- };
-
- switch (r.scIndexMask.getTag()) {
- case TunerFilterScIndexMask::sc: {
- record.scIndexMask.sc(static_cast<uint32_t>(
- r.scIndexMask.get<TunerFilterScIndexMask::sc>()));
- break;
- }
- case TunerFilterScIndexMask::scHevc: {
- record.scIndexMask.scHevc(static_cast<uint32_t>(
- r.scIndexMask.get<TunerFilterScIndexMask::scHevc>()));
- break;
- }
- }
- return record;
-}
-
-DemuxFilterDownloadSettings TunerFilter::getDownloadSettings(const TunerFilterSettings& settings) {
- DemuxFilterDownloadSettings download {
- .downloadId = static_cast<uint32_t>(
- settings.get<TunerFilterSettings::download>().downloadId),
- };
- return download;
-}
-
-Status TunerFilter::getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) {
- if (mFilter_1_1 == nullptr) {
- ALOGE("IFilter_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
}
- Result res;
- mFilter_1_1->getAvSharedHandle([&](Result r, hidl_handle avMemory, uint64_t avMemSize) {
- res = r;
- if (res == Result::SUCCESS) {
- TunerFilterSharedHandleInfo info{
- .handle = dupToAidl(avMemory),
- .size = static_cast<int64_t>(avMemSize),
- };
- *_aidl_return = move(info);
+ auto status = mFilter->getId64Bit(&mId64Bit);
+ if (status.isOk()) {
+ *_aidl_return = mId64Bit;
+ }
+ return status;
+}
+
+::ndk::ScopedAStatus TunerFilter::configure(const DemuxFilterSettings& in_settings) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ return mFilter->configure(in_settings);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureMonitorEvent(int32_t monitorEventType) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ return mFilter->configureMonitorEvent(monitorEventType);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureIpFilterContextId(int32_t cid) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ return mFilter->configureIpCid(cid);
+}
+
+::ndk::ScopedAStatus TunerFilter::configureAvStreamType(const AvStreamType& in_avStreamType) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ return mFilter->configureAvStreamType(in_avStreamType);
+}
+
+::ndk::ScopedAStatus TunerFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (filter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ shared_ptr<IFilter> halFilter = static_cast<TunerFilter*>(filter.get())->getHalFilter();
+ return mFilter->setDataSource(halFilter);
+}
+
+::ndk::ScopedAStatus TunerFilter::getAvSharedHandle(NativeHandle* out_avMemory,
+ int64_t* _aidl_return) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ return mFilter->getAvSharedHandle(out_avMemory, _aidl_return);
+}
+
+::ndk::ScopedAStatus TunerFilter::releaseAvHandle(const NativeHandle& in_handle,
+ int64_t in_avDataId) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ return mFilter->releaseAvHandle(in_handle, in_avDataId);
+}
+
+::ndk::ScopedAStatus TunerFilter::start() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+ }
+
+ auto res = mFilter->start();
+ if (res.isOk()) {
+ mStarted = true;
+ }
+ return res;
+}
+
+::ndk::ScopedAStatus TunerFilter::stop() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+ }
+
+ auto res = mFilter->stop();
+ mStarted = false;
+
+ return res;
+}
+
+::ndk::ScopedAStatus TunerFilter::flush() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+ }
+
+ return mFilter->flush();
+}
+
+::ndk::ScopedAStatus TunerFilter::close() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+ mFilterCallback->detachSharedFilterCallback();
+ }
+ TunerService::getTunerService()->removeSharedFilter(this->ref<TunerFilter>());
} else {
- _aidl_return = NULL;
+ // Calling from shared process, do not really close this filter.
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->detachSharedFilterCallback();
+ }
+ mStarted = false;
+ return ::ndk::ScopedAStatus::ok();
}
- });
-
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
}
- return Status::ok();
+
+ auto res = mFilter->close();
+ mFilter = nullptr;
+ mStarted = false;
+ mShared = false;
+ mClientPid = -1;
+
+ return res;
}
-Status TunerFilter::releaseAvHandle(
- const ::aidl::android::hardware::common::NativeHandle& handle, int64_t avDataId) {
+::ndk::ScopedAStatus TunerFilter::acquireSharedFilterToken(string* _aidl_return) {
+ Mutex::Autolock _l(mLock);
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mFilter->releaseAvHandle(hidl_handle(makeFromAidl(handle)), avDataId);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ if (mShared || mStarted) {
+ ALOGD("create SharedFilter in wrong state");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
}
- return Status::ok();
+
+ IPCThreadState* ipc = IPCThreadState::self();
+ mClientPid = ipc->getCallingPid();
+ string token = TunerService::getTunerService()->addFilterToShared(this->ref<TunerFilter>());
+ _aidl_return->assign(token);
+ mShared = true;
+
+ return ::ndk::ScopedAStatus::ok();
}
-Status TunerFilter::start() {
+::ndk::ScopedAStatus TunerFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
+ Mutex::Autolock _l(mLock);
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mFilter->start();
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+
+ if (!mShared) {
+ // The filter is not shared or the shared filter has been closed.
+ return ::ndk::ScopedAStatus::ok();
}
- return Status::ok();
+
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+ mFilterCallback->detachSharedFilterCallback();
+ }
+
+ TunerService::getTunerService()->removeSharedFilter(this->ref<TunerFilter>());
+ mShared = false;
+
+ return ::ndk::ScopedAStatus::ok();
}
-Status TunerFilter::stop() {
+::ndk::ScopedAStatus TunerFilter::getFilterType(DemuxFilterType* _aidl_return) {
+ Mutex::Autolock _l(mLock);
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mFilter->stop();
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+
+ *_aidl_return = mType;
+ return ::ndk::ScopedAStatus::ok();
}
-Status TunerFilter::flush() {
+::ndk::ScopedAStatus TunerFilter::setDelayHint(const FilterDelayHint& in_hint) {
+ Mutex::Autolock _l(mLock);
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mFilter->flush();
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+
+ return mFilter->setDelayHint(in_hint);
}
-Status TunerFilter::close() {
- if (mFilter == nullptr) {
- ALOGE("IFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
- Result res = mFilter->close();
- mFilter = NULL;
- mFilter_1_1 = NULL;
-
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+bool TunerFilter::isSharedFilterAllowed(int callingPid) {
+ return mShared && mClientPid != callingPid;
}
-sp<IFilter> TunerFilter::getHalFilter() {
+void TunerFilter::attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb) {
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->attachSharedFilterCallback(in_cb);
+ }
+}
+
+shared_ptr<IFilter> TunerFilter::getHalFilter() {
return mFilter;
}
-bool TunerFilter::isAudioFilter() {
- return (mMainType == (int)DemuxFilterMainType::TS
- && mSubType == (int)DemuxTsFilterType::AUDIO)
- || (mMainType == (int)DemuxFilterMainType::MMTP
- && mSubType == (int)DemuxMmtpFilterType::AUDIO);
-}
-
-bool TunerFilter::isVideoFilter() {
- return (mMainType == (int)DemuxFilterMainType::TS
- && mSubType == (int)DemuxTsFilterType::VIDEO)
- || (mMainType == (int)DemuxFilterMainType::MMTP
- && mSubType == (int)DemuxMmtpFilterType::VIDEO);
-}
-
-bool TunerFilter::getHidlAvStreamType(int avStreamType, AvStreamType& type) {
- if (isAudioFilter()) {
- type.audio(static_cast<AudioStreamType>(avStreamType));
- return true;
- }
-
- if (isVideoFilter()) {
- type.video(static_cast<VideoStreamType>(avStreamType));
- return true;
- }
-
- return false;
-}
-
/////////////// FilterCallback ///////////////////////
-
-Return<void> TunerFilter::FilterCallback::onFilterStatus(DemuxFilterStatus status) {
- if (mTunerFilterCallback != NULL) {
- mTunerFilterCallback->onFilterStatus((int)status);
+::ndk::ScopedAStatus TunerFilter::FilterCallback::onFilterStatus(DemuxFilterStatus status) {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr) {
+ mTunerFilterCallback->onFilterStatus(status);
}
- return Void();
+ return ::ndk::ScopedAStatus::ok();
}
-Return<void> TunerFilter::FilterCallback::onFilterEvent(const DemuxFilterEvent& filterEvent) {
- vector<DemuxFilterEventExt::Event> emptyEventsExt;
- DemuxFilterEventExt emptyFilterEventExt {
- .events = emptyEventsExt,
- };
- onFilterEvent_1_1(filterEvent, emptyFilterEventExt);
- return Void();
+::ndk::ScopedAStatus TunerFilter::FilterCallback::onFilterEvent(
+ const vector<DemuxFilterEvent>& events) {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr) {
+ mTunerFilterCallback->onFilterEvent(events);
+ }
+ return ::ndk::ScopedAStatus::ok();
}
-Return<void> TunerFilter::FilterCallback::onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
- const DemuxFilterEventExt& filterEventExt) {
- if (mTunerFilterCallback != NULL) {
- vector<DemuxFilterEvent::Event> events = filterEvent.events;
- vector<DemuxFilterEventExt::Event> eventsExt = filterEventExt.events;
- vector<TunerFilterEvent> tunerEvent;
-
- getAidlFilterEvent(events, eventsExt, tunerEvent);
- mTunerFilterCallback->onFilterEvent(tunerEvent);
- }
- return Void();
-}
-
-/////////////// FilterCallback Helper Methods ///////////////////////
-
-void TunerFilter::FilterCallback::getAidlFilterEvent(vector<DemuxFilterEvent::Event>& events,
- vector<DemuxFilterEventExt::Event>& eventsExt,
- vector<TunerFilterEvent>& tunerEvent) {
- if (events.empty() && !eventsExt.empty()) {
- auto eventExt = eventsExt[0];
- switch (eventExt.getDiscriminator()) {
- case DemuxFilterEventExt::Event::hidl_discriminator::monitorEvent: {
- getMonitorEvent(eventsExt, tunerEvent);
- return;
- }
- case DemuxFilterEventExt::Event::hidl_discriminator::startId: {
- getRestartEvent(eventsExt, tunerEvent);
- return;
- }
- default: {
- break;
- }
- }
- return;
- }
-
- if (!events.empty()) {
- auto event = events[0];
- switch (event.getDiscriminator()) {
- case DemuxFilterEvent::Event::hidl_discriminator::media: {
- getMediaEvent(events, tunerEvent);
- break;
- }
- case DemuxFilterEvent::Event::hidl_discriminator::section: {
- getSectionEvent(events, tunerEvent);
- break;
- }
- case DemuxFilterEvent::Event::hidl_discriminator::pes: {
- getPesEvent(events, tunerEvent);
- break;
- }
- case DemuxFilterEvent::Event::hidl_discriminator::tsRecord: {
- getTsRecordEvent(events, eventsExt, tunerEvent);
- break;
- }
- case DemuxFilterEvent::Event::hidl_discriminator::mmtpRecord: {
- getMmtpRecordEvent(events, eventsExt, tunerEvent);
- break;
- }
- case DemuxFilterEvent::Event::hidl_discriminator::download: {
- getDownloadEvent(events, tunerEvent);
- break;
- }
- case DemuxFilterEvent::Event::hidl_discriminator::ipPayload: {
- getIpPayloadEvent(events, tunerEvent);
- break;
- }
- case DemuxFilterEvent::Event::hidl_discriminator::temi: {
- getTemiEvent(events, tunerEvent);
- break;
- }
- default: {
- break;
- }
- }
+void TunerFilter::FilterCallback::sendSharedFilterStatus(int32_t status) {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+ mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
}
}
-void TunerFilter::FilterCallback::getMediaEvent(
- vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
- for (DemuxFilterEvent::Event e : events) {
- DemuxFilterMediaEvent mediaEvent = e.media();
- TunerFilterMediaEvent tunerMedia;
+void TunerFilter::FilterCallback::attachSharedFilterCallback(
+ const shared_ptr<ITunerFilterCallback>& in_cb) {
+ Mutex::Autolock _l(mCallbackLock);
+ mOriginalCallback = mTunerFilterCallback;
+ mTunerFilterCallback = in_cb;
+}
- tunerMedia.streamId = static_cast<char16_t>(mediaEvent.streamId);
- tunerMedia.isPtsPresent = mediaEvent.isPtsPresent;
- tunerMedia.pts = static_cast<long>(mediaEvent.pts);
- tunerMedia.dataLength = static_cast<int>(mediaEvent.dataLength);
- tunerMedia.offset = static_cast<int>(mediaEvent.offset);
- tunerMedia.isSecureMemory = mediaEvent.isSecureMemory;
- tunerMedia.avDataId = static_cast<long>(mediaEvent.avDataId);
- tunerMedia.mpuSequenceNumber = static_cast<int>(mediaEvent.mpuSequenceNumber);
- tunerMedia.isPesPrivateData = mediaEvent.isPesPrivateData;
-
- if (mediaEvent.extraMetaData.getDiscriminator() ==
- DemuxFilterMediaEvent::ExtraMetaData::hidl_discriminator::audio) {
- tunerMedia.isAudioExtraMetaData = true;
- tunerMedia.audio = {
- .adFade = static_cast<int8_t>(
- mediaEvent.extraMetaData.audio().adFade),
- .adPan = static_cast<int8_t>(
- mediaEvent.extraMetaData.audio().adPan),
- .versionTextTag = static_cast<int8_t>(
- mediaEvent.extraMetaData.audio().versionTextTag),
- .adGainCenter = static_cast<int8_t>(
- mediaEvent.extraMetaData.audio().adGainCenter),
- .adGainFront = static_cast<int8_t>(
- mediaEvent.extraMetaData.audio().adGainFront),
- .adGainSurround = static_cast<int8_t>(
- mediaEvent.extraMetaData.audio().adGainSurround),
- };
- } else {
- tunerMedia.isAudioExtraMetaData = false;
- }
-
- if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
- tunerMedia.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
- }
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::media>(move(tunerMedia));
- res.push_back(move(tunerEvent));
+void TunerFilter::FilterCallback::detachSharedFilterCallback() {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+ mTunerFilterCallback = mOriginalCallback;
+ mOriginalCallback = nullptr;
}
}
-void TunerFilter::FilterCallback::getSectionEvent(
- vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
- for (DemuxFilterEvent::Event e : events) {
- DemuxFilterSectionEvent sectionEvent = e.section();
- TunerFilterSectionEvent tunerSection;
-
- tunerSection.tableId = static_cast<char16_t>(sectionEvent.tableId);
- tunerSection.version = static_cast<char16_t>(sectionEvent.version);
- tunerSection.sectionNum = static_cast<char16_t>(sectionEvent.sectionNum);
- tunerSection.dataLength = static_cast<char16_t>(sectionEvent.dataLength);
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::section>(move(tunerSection));
- res.push_back(move(tunerEvent));
- }
-}
-
-void TunerFilter::FilterCallback::getPesEvent(
- vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
- for (DemuxFilterEvent::Event e : events) {
- DemuxFilterPesEvent pesEvent = e.pes();
- TunerFilterPesEvent tunerPes;
-
- tunerPes.streamId = static_cast<char16_t>(pesEvent.streamId);
- tunerPes.dataLength = static_cast<char16_t>(pesEvent.dataLength);
- tunerPes.mpuSequenceNumber = static_cast<int>(pesEvent.mpuSequenceNumber);
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::pes>(move(tunerPes));
- res.push_back(move(tunerEvent));
- }
-}
-
-void TunerFilter::FilterCallback::getTsRecordEvent(vector<DemuxFilterEvent::Event>& events,
- vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
- for (int i = 0; i < events.size(); i++) {
- TunerFilterTsRecordEvent tunerTsRecord;
- DemuxFilterTsRecordEvent tsRecordEvent = events[i].tsRecord();
-
- TunerFilterScIndexMask scIndexMask;
- if (tsRecordEvent.scIndexMask.getDiscriminator()
- == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
- scIndexMask.set<TunerFilterScIndexMask::sc>(
- static_cast<int>(tsRecordEvent.scIndexMask.sc()));
- } else if (tsRecordEvent.scIndexMask.getDiscriminator()
- == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
- scIndexMask.set<TunerFilterScIndexMask::scHevc>(
- static_cast<int>(tsRecordEvent.scIndexMask.scHevc()));
- }
-
- if (tsRecordEvent.pid.getDiscriminator() == DemuxPid::hidl_discriminator::tPid) {
- tunerTsRecord.pid = static_cast<char16_t>(tsRecordEvent.pid.tPid());
- } else {
- tunerTsRecord.pid = static_cast<char16_t>(Constant::INVALID_TS_PID);
- }
-
- tunerTsRecord.scIndexMask = scIndexMask;
- tunerTsRecord.tsIndexMask = static_cast<int>(tsRecordEvent.tsIndexMask);
- tunerTsRecord.byteNumber = static_cast<long>(tsRecordEvent.byteNumber);
-
- if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
- DemuxFilterEventExt::Event::hidl_discriminator::tsRecord) {
- tunerTsRecord.isExtended = true;
- tunerTsRecord.pts = static_cast<long>(eventsExt[i].tsRecord().pts);
- tunerTsRecord.firstMbInSlice = static_cast<int>(eventsExt[i].tsRecord().firstMbInSlice);
- } else {
- tunerTsRecord.isExtended = false;
- }
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::tsRecord>(move(tunerTsRecord));
- res.push_back(move(tunerEvent));
- }
-}
-
-void TunerFilter::FilterCallback::getMmtpRecordEvent(vector<DemuxFilterEvent::Event>& events,
- vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
- for (int i = 0; i < events.size(); i++) {
- TunerFilterMmtpRecordEvent tunerMmtpRecord;
- DemuxFilterMmtpRecordEvent mmtpRecordEvent = events[i].mmtpRecord();
-
- tunerMmtpRecord.scHevcIndexMask = static_cast<int>(mmtpRecordEvent.scHevcIndexMask);
- tunerMmtpRecord.byteNumber = static_cast<long>(mmtpRecordEvent.byteNumber);
-
- if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
- DemuxFilterEventExt::Event::hidl_discriminator::mmtpRecord) {
- tunerMmtpRecord.isExtended = true;
- tunerMmtpRecord.pts = static_cast<long>(eventsExt[i].mmtpRecord().pts);
- tunerMmtpRecord.mpuSequenceNumber =
- static_cast<int>(eventsExt[i].mmtpRecord().mpuSequenceNumber);
- tunerMmtpRecord.firstMbInSlice =
- static_cast<int>(eventsExt[i].mmtpRecord().firstMbInSlice);
- tunerMmtpRecord.tsIndexMask = static_cast<int>(eventsExt[i].mmtpRecord().tsIndexMask);
- } else {
- tunerMmtpRecord.isExtended = false;
- }
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::mmtpRecord>(move(tunerMmtpRecord));
- res.push_back(move(tunerEvent));
- }
-}
-
-void TunerFilter::FilterCallback::getDownloadEvent(
- vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
- for (DemuxFilterEvent::Event e : events) {
- DemuxFilterDownloadEvent downloadEvent = e.download();
- TunerFilterDownloadEvent tunerDownload;
-
- tunerDownload.itemId = static_cast<int>(downloadEvent.itemId);
- tunerDownload.itemFragmentIndex = static_cast<int>(downloadEvent.itemFragmentIndex);
- tunerDownload.mpuSequenceNumber = static_cast<int>(downloadEvent.mpuSequenceNumber);
- tunerDownload.lastItemFragmentIndex = static_cast<int>(downloadEvent.lastItemFragmentIndex);
- tunerDownload.dataLength = static_cast<char16_t>(downloadEvent.dataLength);
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::download>(move(tunerDownload));
- res.push_back(move(tunerEvent));
- }
-}
-
-void TunerFilter::FilterCallback::getIpPayloadEvent(
- vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
- for (DemuxFilterEvent::Event e : events) {
- DemuxFilterIpPayloadEvent ipPayloadEvent = e.ipPayload();
- TunerFilterIpPayloadEvent tunerIpPayload;
-
- tunerIpPayload.dataLength = static_cast<char16_t>(ipPayloadEvent.dataLength);
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::ipPayload>(move(tunerIpPayload));
- res.push_back(move(tunerEvent));
- }
-}
-
-void TunerFilter::FilterCallback::getTemiEvent(
- vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
- for (DemuxFilterEvent::Event e : events) {
- DemuxFilterTemiEvent temiEvent = e.temi();
- TunerFilterTemiEvent tunerTemi;
-
- tunerTemi.pts = static_cast<long>(temiEvent.pts);
- tunerTemi.descrTag = static_cast<int8_t>(temiEvent.descrTag);
- vector<uint8_t> descrData = temiEvent.descrData;
- tunerTemi.descrData.resize(descrData.size());
- copy(descrData.begin(), descrData.end(), tunerTemi.descrData.begin());
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::temi>(move(tunerTemi));
- res.push_back(move(tunerEvent));
- }
-}
-
-void TunerFilter::FilterCallback::getMonitorEvent(
- vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
- DemuxFilterMonitorEvent monitorEvent = eventsExt[0].monitorEvent();
- TunerFilterMonitorEvent tunerMonitor;
-
- switch (monitorEvent.getDiscriminator()) {
- case DemuxFilterMonitorEvent::hidl_discriminator::scramblingStatus: {
- tunerMonitor.set<TunerFilterMonitorEvent::scramblingStatus>(
- static_cast<int>(monitorEvent.scramblingStatus()));
- break;
- }
- case DemuxFilterMonitorEvent::hidl_discriminator::cid: {
- tunerMonitor.set<TunerFilterMonitorEvent::cid>(static_cast<int>(monitorEvent.cid()));
- break;
- }
- }
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::monitor>(move(tunerMonitor));
- res.push_back(move(tunerEvent));
-}
-
-void TunerFilter::FilterCallback::getRestartEvent(
- vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::startId>(static_cast<int>(eventsExt[0].startId()));
- res.push_back(move(tunerEvent));
-}
+} // namespace tuner
+} // namespace tv
+} // namespace media
} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerFilter.h b/services/tuner/TunerFilter.h
index ff4728c..529c191 100644
--- a/services/tuner/TunerFilter.h
+++ b/services/tuner/TunerFilter.h
@@ -17,176 +17,107 @@
#ifndef ANDROID_MEDIA_TUNERFILTER_H
#define ANDROID_MEDIA_TUNERFILTER_H
+#include <aidl/android/hardware/tv/tuner/AvStreamType.h>
+#include <aidl/android/hardware/tv/tuner/BnFilterCallback.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterType.h>
+#include <aidl/android/hardware/tv/tuner/FilterDelayHint.h>
+#include <aidl/android/hardware/tv/tuner/IFilter.h>
#include <aidl/android/media/tv/tuner/BnTunerFilter.h>
#include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
-#include <aidlcommonsupport/NativeHandle.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <android/hardware/tv/tuner/1.1/IFilter.h>
-#include <android/hardware/tv/tuner/1.1/IFilterCallback.h>
-#include <android/hardware/tv/tuner/1.1/types.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <fmq/ConvertMQDescriptors.h>
-#include <fmq/MessageQueue.h>
+#include <utils/Mutex.h>
-using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::hardware::common::NativeHandle;
using ::aidl::android::hardware::common::fmq::MQDescriptor;
using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::AvStreamType;
+using ::aidl::android::hardware::tv::tuner::BnFilterCallback;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
+using ::aidl::android::hardware::tv::tuner::IFilter;
using ::aidl::android::media::tv::tuner::BnTunerFilter;
-using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
-using ::aidl::android::media::tv::tuner::TunerDemuxIpAddress;
-using ::aidl::android::media::tv::tuner::TunerFilterConfiguration;
-using ::aidl::android::media::tv::tuner::TunerFilterDownloadEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterIpPayloadEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMediaEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMmtpRecordEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterMonitorEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterPesEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterScIndexMask;
-using ::aidl::android::media::tv::tuner::TunerFilterSectionEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterSharedHandleInfo;
-using ::aidl::android::media::tv::tuner::TunerFilterSettings;
-using ::aidl::android::media::tv::tuner::TunerFilterTemiEvent;
-using ::aidl::android::media::tv::tuner::TunerFilterTsRecordEvent;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_array;
-using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterIpPayloadEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMediaEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMmtpRecordEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterRecordSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterTemiEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterTsRecordEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxIpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxPid;
-using ::android::hardware::tv::tuner::V1_0::IFilter;
-using ::android::hardware::tv::tuner::V1_1::AvStreamType;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
-using ::android::hardware::tv::tuner::V1_1::DemuxFilterTsRecordEventExt;
-using ::android::hardware::tv::tuner::V1_1::IFilterCallback;
+using ::android::Mutex;
+using namespace std;
+
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-using MQDesc = MQDescriptorSync<uint8_t>;
using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
-const static int IP_V4_LENGTH = 4;
-const static int IP_V6_LENGTH = 16;
-
class TunerFilter : public BnTunerFilter {
public:
- TunerFilter(sp<IFilter> filter, int mainType, int subTyp);
- virtual ~TunerFilter();
- Status getId(int32_t* _aidl_return) override;
- Status getId64Bit(int64_t* _aidl_return) override;
- Status getQueueDesc(AidlMQDesc* _aidl_return) override;
- Status configure(const TunerFilterConfiguration& config) override;
- Status configureMonitorEvent(int monitorEventType) override;
- Status configureIpFilterContextId(int cid) override;
- Status configureAvStreamType(int avStreamType) override;
- Status getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) override;
- Status releaseAvHandle(const ::aidl::android::hardware::common::NativeHandle& handle,
- int64_t avDataId) override;
- Status setDataSource(const std::shared_ptr<ITunerFilter>& filter) override;
- Status start() override;
- Status stop() override;
- Status flush() override;
- Status close() override;
- sp<IFilter> getHalFilter();
+ class FilterCallback : public BnFilterCallback {
+ public:
+ FilterCallback(const shared_ptr<ITunerFilterCallback>& tunerFilterCallback)
+ : mTunerFilterCallback(tunerFilterCallback), mOriginalCallback(nullptr){};
- struct FilterCallback : public IFilterCallback {
- FilterCallback(const std::shared_ptr<ITunerFilterCallback> tunerFilterCallback)
- : mTunerFilterCallback(tunerFilterCallback) {};
+ ::ndk::ScopedAStatus onFilterEvent(const vector<DemuxFilterEvent>& events) override;
+ ::ndk::ScopedAStatus onFilterStatus(DemuxFilterStatus status) override;
- virtual Return<void> onFilterEvent(const DemuxFilterEvent& filterEvent);
- virtual Return<void> onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
- const DemuxFilterEventExt& filterEventExt);
- virtual Return<void> onFilterStatus(DemuxFilterStatus status);
+ void sendSharedFilterStatus(int32_t status);
+ void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+ void detachSharedFilterCallback();
- void getAidlFilterEvent(std::vector<DemuxFilterEvent::Event>& events,
- std::vector<DemuxFilterEventExt::Event>& eventsExt,
- std::vector<TunerFilterEvent>& tunerEvent);
-
- void getMediaEvent(
- std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
- void getSectionEvent(
- std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
- void getPesEvent(
- std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
- void getTsRecordEvent(
- std::vector<DemuxFilterEvent::Event>& events,
- std::vector<DemuxFilterEventExt::Event>& eventsExt,
- std::vector<TunerFilterEvent>& res);
- void getMmtpRecordEvent(
- std::vector<DemuxFilterEvent::Event>& events,
- std::vector<DemuxFilterEventExt::Event>& eventsExt,
- std::vector<TunerFilterEvent>& res);
- void getDownloadEvent(
- std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
- void getIpPayloadEvent(
- std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
- void getTemiEvent(
- std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
- void getMonitorEvent(
- std::vector<DemuxFilterEventExt::Event>& eventsExt,
- std::vector<TunerFilterEvent>& res);
- void getRestartEvent(
- std::vector<DemuxFilterEventExt::Event>& eventsExt,
- std::vector<TunerFilterEvent>& res);
-
- std::shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+ private:
+ shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+ shared_ptr<ITunerFilterCallback> mOriginalCallback;
+ Mutex mCallbackLock;
};
+ TunerFilter(shared_ptr<IFilter> filter, shared_ptr<FilterCallback> cb, DemuxFilterType type);
+ virtual ~TunerFilter();
+
+ ::ndk::ScopedAStatus getId(int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getId64Bit(int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+ ::ndk::ScopedAStatus configure(const DemuxFilterSettings& in_settings) override;
+ ::ndk::ScopedAStatus configureMonitorEvent(int32_t in_monitorEventTypes) override;
+ ::ndk::ScopedAStatus configureIpFilterContextId(int32_t in_cid) override;
+ ::ndk::ScopedAStatus configureAvStreamType(const AvStreamType& in_avStreamType) override;
+ ::ndk::ScopedAStatus getAvSharedHandle(NativeHandle* out_avMemory,
+ int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus releaseAvHandle(const NativeHandle& in_handle,
+ int64_t in_avDataId) override;
+ ::ndk::ScopedAStatus setDataSource(const shared_ptr<ITunerFilter>& in_filter) override;
+ ::ndk::ScopedAStatus start() override;
+ ::ndk::ScopedAStatus stop() override;
+ ::ndk::ScopedAStatus flush() override;
+ ::ndk::ScopedAStatus close() override;
+ ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+ ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
+ ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+ ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
+
+ bool isSharedFilterAllowed(int32_t pid);
+ void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+ shared_ptr<IFilter> getHalFilter();
+
private:
- DemuxFilterAvSettings getAvSettings(const TunerFilterSettings& settings);
- DemuxFilterSectionSettings getSectionSettings(const TunerFilterSettings& settings);
- DemuxFilterPesDataSettings getPesDataSettings(const TunerFilterSettings& settings);
- DemuxFilterRecordSettings getRecordSettings(const TunerFilterSettings& settings);
- DemuxFilterDownloadSettings getDownloadSettings(const TunerFilterSettings& settings);
-
- bool isAudioFilter();
- bool isVideoFilter();
- bool getHidlAvStreamType(int avStreamType, AvStreamType& type);
-
- void getHidlTsSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
- void getHidlMmtpSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
- void getHidlIpSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
- void getHidlTlvSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
- void getHidlAlpSettings(
- const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
-
- hidl_array<uint8_t, IP_V4_LENGTH> getIpV4Address(TunerDemuxIpAddress addr);
- hidl_array<uint8_t, IP_V6_LENGTH> getIpV6Address(TunerDemuxIpAddress addr);
-
- sp<IFilter> mFilter;
- sp<::android::hardware::tv::tuner::V1_1::IFilter> mFilter_1_1;
+ shared_ptr<IFilter> mFilter;
int32_t mId;
int64_t mId64Bit;
- int mMainType;
- int mSubType;
+ DemuxFilterType mType;
+ bool mStarted;
+ bool mShared;
+ int32_t mClientPid;
+ shared_ptr<FilterCallback> mFilterCallback;
+ Mutex mLock;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERFILTER_H
diff --git a/services/tuner/TunerFrontend.cpp b/services/tuner/TunerFrontend.cpp
index 74b5519..e86e8e1 100644
--- a/services/tuner/TunerFrontend.cpp
+++ b/services/tuner/TunerFrontend.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,1081 +14,186 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
#define LOG_TAG "TunerFrontend"
#include "TunerFrontend.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
#include "TunerLnb.h"
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3PlpSettings;
-using ::aidl::android::media::tv::tuner::TunerFrontendScanAtsc3PlpInfo;
-using ::aidl::android::media::tv::tuner::TunerFrontendStatusAtsc3PlpInfo;
-using ::aidl::android::media::tv::tuner::TunerFrontendUnionSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendAnalogSifStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendAnalogType;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Bandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3CodeRate;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3DemodOutputFormat;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Fec;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Modulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3TimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcAnnex;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcOuterFec;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbcSpectralInversion;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsPilot;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsRolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsVcmMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtBandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendInnerFec;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Coderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Modulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Rolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Settings;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsRolloff;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbsStreamIdType;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtBandwidth;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtCoderate;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtGuardInterval;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtMode;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtModulation;
-using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendModulationStatus;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanAtsc3PlpInfo;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanType;
-using ::android::hardware::tv::tuner::V1_0::FrontendStatusType;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::FrontendAnalogAftFlag;
-using ::android::hardware::tv::tuner::V1_1::FrontendBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendCableTimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDvbcBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbBandwidth;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbCodeRate;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbGuardInterval;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbModulation;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTimeInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTransmissionMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendDvbsScanType;
-using ::android::hardware::tv::tuner::V1_1::FrontendGuardInterval;
-using ::android::hardware::tv::tuner::V1_1::FrontendInterleaveMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendModulation;
-using ::android::hardware::tv::tuner::V1_1::FrontendRollOff;
-using ::android::hardware::tv::tuner::V1_1::FrontendTransmissionMode;
-using ::android::hardware::tv::tuner::V1_1::FrontendSpectralInversion;
-using ::android::hardware::tv::tuner::V1_1::FrontendStatusTypeExt1_1;
+using ::aidl::android::hardware::tv::tuner::Result;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-TunerFrontend::TunerFrontend(sp<IFrontend> frontend, int id) {
+TunerFrontend::TunerFrontend(shared_ptr<IFrontend> frontend, int id) {
mFrontend = frontend;
- mFrontend_1_1 = ::android::hardware::tv::tuner::V1_1::IFrontend::castFrom(mFrontend);
mId = id;
}
TunerFrontend::~TunerFrontend() {
- mFrontend = NULL;
- mFrontend_1_1 = NULL;
+ mFrontend = nullptr;
mId = -1;
}
-Status TunerFrontend::setCallback(
+::ndk::ScopedAStatus TunerFrontend::setCallback(
const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) {
- if (mFrontend == NULL) {
+ if (mFrontend == nullptr) {
ALOGE("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- if (tunerFrontendCallback == NULL) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ if (tunerFrontendCallback == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
}
- sp<IFrontendCallback> frontendCallback = new FrontendCallback(tunerFrontendCallback);
- Result status = mFrontend->setCallback(frontendCallback);
- if (status == Result::SUCCESS) {
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ shared_ptr<IFrontendCallback> frontendCallback =
+ ::ndk::SharedRefBase::make<FrontendCallback>(tunerFrontendCallback);
+ return mFrontend->setCallback(frontendCallback);
}
-Status TunerFrontend::tune(const TunerFrontendSettings& settings) {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::tune(const FrontendSettings& settings) {
+ if (mFrontend == nullptr) {
ALOGE("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status;
- FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
- if (settings.isExtended) {
- if (mFrontend_1_1 == NULL) {
- ALOGE("IFrontend_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
- FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
- status = mFrontend_1_1->tune_1_1(frontendSettings, frontendSettingsExt);
- } else {
- status = mFrontend->tune(frontendSettings);
- }
-
- if (status == Result::SUCCESS) {
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return mFrontend->tune(settings);
}
-Status TunerFrontend::stopTune() {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::stopTune() {
+ if (mFrontend == nullptr) {
ALOGD("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mFrontend->stopTune();
- if (status == Result::SUCCESS) {
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return mFrontend->stopTune();
}
-Status TunerFrontend::scan(const TunerFrontendSettings& settings, int frontendScanType) {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::scan(const FrontendSettings& settings,
+ FrontendScanType frontendScanType) {
+ if (mFrontend == nullptr) {
ALOGD("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status;
- FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
- if (settings.isExtended) {
- if (mFrontend_1_1 == NULL) {
- ALOGE("IFrontend_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
- FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
- status = mFrontend_1_1->scan_1_1(frontendSettings,
- static_cast<FrontendScanType>(frontendScanType), frontendSettingsExt);
- } else {
- status = mFrontend->scan(
- frontendSettings, static_cast<FrontendScanType>(frontendScanType));
- }
-
- if (status == Result::SUCCESS) {
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return mFrontend->scan(settings, frontendScanType);
}
-Status TunerFrontend::stopScan() {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::stopScan() {
+ if (mFrontend == nullptr) {
ALOGD("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mFrontend->stopScan();
- if (status == Result::SUCCESS) {
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return mFrontend->stopScan();
}
-Status TunerFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
+ if (mFrontend == nullptr) {
ALOGD("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mFrontend->setLnb(static_cast<TunerLnb*>(lnb.get())->getId());
- if (status == Result::SUCCESS) {
- return Status::ok();
+ if (lnb == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
}
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return mFrontend->setLnb(static_cast<TunerLnb*>(lnb.get())->getId());
}
-Status TunerFrontend::setLna(bool bEnable) {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::setLna(bool bEnable) {
+ if (mFrontend == nullptr) {
ALOGD("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mFrontend->setLna(bEnable);
- if (status == Result::SUCCESS) {
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return mFrontend->setLna(bEnable);
}
-Status TunerFrontend::linkCiCamToFrontend(int ciCamId, int32_t* _aidl_return) {
- if (mFrontend_1_1 == NULL) {
- ALOGD("IFrontend_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- int ltsId;
- Result status;
- mFrontend_1_1->linkCiCam(static_cast<uint32_t>(ciCamId),
- [&](Result r, uint32_t id) {
- status = r;
- ltsId = id;
- });
-
- if (status == Result::SUCCESS) {
- *_aidl_return = ltsId;
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-}
-
-Status TunerFrontend::unlinkCiCamToFrontend(int ciCamId) {
- if (mFrontend_1_1 == NULL) {
- ALOGD("IFrontend_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- Result status = mFrontend_1_1->unlinkCiCam(ciCamId);
- if (status == Result::SUCCESS) {
- return Status::ok();
- }
-
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
-}
-
-Status TunerFrontend::close() {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::linkCiCamToFrontend(int32_t ciCamId, int32_t* _aidl_return) {
+ if (mFrontend == nullptr) {
ALOGD("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mFrontend->close();
- mFrontend = NULL;
- mFrontend_1_1 = NULL;
-
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ return mFrontend->linkCiCam(ciCamId, _aidl_return);
}
-Status TunerFrontend::getStatus(const vector<int32_t>& statusTypes,
- vector<TunerFrontendStatus>* _aidl_return) {
- if (mFrontend == NULL) {
+::ndk::ScopedAStatus TunerFrontend::unlinkCiCamToFrontend(int32_t ciCamId) {
+ if (mFrontend == nullptr) {
ALOGD("IFrontend is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res;
- vector<FrontendStatus> status;
- vector<FrontendStatusType> types;
- for (auto s : statusTypes) {
- types.push_back(static_cast<FrontendStatusType>(s));
- }
-
- mFrontend->getStatus(types, [&](Result r, const hidl_vec<FrontendStatus>& s) {
- res = r;
- status = s;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
-
- getAidlFrontendStatus(status, *_aidl_return);
- return Status::ok();
+ return mFrontend->unlinkCiCam(ciCamId);
}
-Status TunerFrontend::getStatusExtended_1_1(const vector<int32_t>& statusTypes,
- vector<TunerFrontendStatus>* _aidl_return) {
- if (mFrontend_1_1 == NULL) {
- ALOGD("IFrontend_1_1 is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+::ndk::ScopedAStatus TunerFrontend::close() {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res;
- vector<FrontendStatusExt1_1> status;
- vector<FrontendStatusTypeExt1_1> types;
- for (auto s : statusTypes) {
- types.push_back(static_cast<FrontendStatusTypeExt1_1>(s));
- }
+ auto res = mFrontend->close();
+ mFrontend = nullptr;
- mFrontend_1_1->getStatusExt1_1(types, [&](Result r, const hidl_vec<FrontendStatusExt1_1>& s) {
- res = r;
- status = s;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
-
- getAidlFrontendStatusExt(status, *_aidl_return);
- return Status::ok();
+ return res;
}
-Status TunerFrontend::getFrontendId(int* _aidl_return) {
+::ndk::ScopedAStatus TunerFrontend::getStatus(const vector<FrontendStatusType>& in_statusTypes,
+ vector<FrontendStatus>* _aidl_return) {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ return mFrontend->getStatus(in_statusTypes, _aidl_return);
+}
+
+::ndk::ScopedAStatus TunerFrontend::getFrontendId(int32_t* _aidl_return) {
*_aidl_return = mId;
- return Status::ok();
+ return ::ndk::ScopedAStatus::ok();
}
/////////////// FrontendCallback ///////////////////////
-
-Return<void> TunerFrontend::FrontendCallback::onEvent(FrontendEventType frontendEventType) {
- ALOGD("FrontendCallback::onEvent, type=%d", frontendEventType);
- mTunerFrontendCallback->onEvent((int)frontendEventType);
- return Void();
+::ndk::ScopedAStatus TunerFrontend::FrontendCallback::onEvent(FrontendEventType frontendEventType) {
+ ALOGV("FrontendCallback::onEvent, type=%d", frontendEventType);
+ if (mTunerFrontendCallback != nullptr) {
+ mTunerFrontendCallback->onEvent(frontendEventType);
+ }
+ return ndk::ScopedAStatus::ok();
}
-Return<void> TunerFrontend::FrontendCallback::onScanMessage(
+::ndk::ScopedAStatus TunerFrontend::FrontendCallback::onScanMessage(
FrontendScanMessageType type, const FrontendScanMessage& message) {
- ALOGD("FrontendCallback::onScanMessage, type=%d", type);
- TunerFrontendScanMessage scanMessage;
- switch(type) {
- case FrontendScanMessageType::LOCKED: {
- scanMessage.set<TunerFrontendScanMessage::isLocked>(message.isLocked());
- break;
- }
- case FrontendScanMessageType::END: {
- scanMessage.set<TunerFrontendScanMessage::isEnd>(message.isEnd());
- break;
- }
- case FrontendScanMessageType::PROGRESS_PERCENT: {
- scanMessage.set<TunerFrontendScanMessage::progressPercent>(message.progressPercent());
- break;
- }
- case FrontendScanMessageType::FREQUENCY: {
- auto f = message.frequencies();
- vector<int> frequencies(begin(f), end(f));
- scanMessage.set<TunerFrontendScanMessage::frequencies>(frequencies);
- break;
- }
- case FrontendScanMessageType::SYMBOL_RATE: {
- auto s = message.symbolRates();
- vector<int> symbolRates(begin(s), end(s));
- scanMessage.set<TunerFrontendScanMessage::symbolRates>(symbolRates);
- break;
- }
- case FrontendScanMessageType::HIERARCHY: {
- scanMessage.set<TunerFrontendScanMessage::hierarchy>((int)message.hierarchy());
- break;
- }
- case FrontendScanMessageType::ANALOG_TYPE: {
- scanMessage.set<TunerFrontendScanMessage::analogType>((int)message.analogType());
- break;
- }
- case FrontendScanMessageType::PLP_IDS: {
- auto p = message.plpIds();
- vector<uint8_t> plpIds(begin(p), end(p));
- scanMessage.set<TunerFrontendScanMessage::plpIds>(plpIds);
- break;
- }
- case FrontendScanMessageType::GROUP_IDS: {
- auto g = message.groupIds();
- vector<uint8_t> groupIds(begin(g), end(g));
- scanMessage.set<TunerFrontendScanMessage::groupIds>(groupIds);
- break;
- }
- case FrontendScanMessageType::INPUT_STREAM_IDS: {
- auto i = message.inputStreamIds();
- vector<char16_t> streamIds(begin(i), end(i));
- scanMessage.set<TunerFrontendScanMessage::inputStreamIds>(streamIds);
- break;
- }
- case FrontendScanMessageType::STANDARD: {
- FrontendScanMessage::Standard std = message.std();
- int standard;
- if (std.getDiscriminator() == FrontendScanMessage::Standard::hidl_discriminator::sStd) {
- standard = (int) std.sStd();
- } else if (std.getDiscriminator() ==
- FrontendScanMessage::Standard::hidl_discriminator::tStd) {
- standard = (int) std.tStd();
- } else if (std.getDiscriminator() ==
- FrontendScanMessage::Standard::hidl_discriminator::sifStd) {
- standard = (int) std.sifStd();
- }
- scanMessage.set<TunerFrontendScanMessage::std>(standard);
- break;
- }
- case FrontendScanMessageType::ATSC3_PLP_INFO: {
- vector<FrontendScanAtsc3PlpInfo> plpInfos = message.atsc3PlpInfos();
- vector<TunerFrontendScanAtsc3PlpInfo> tunerPlpInfos;
- for (int i = 0; i < plpInfos.size(); i++) {
- auto info = plpInfos[i];
- int8_t plpId = (int8_t) info.plpId;
- bool lls = (bool) info.bLlsFlag;
- TunerFrontendScanAtsc3PlpInfo plpInfo{
- .plpId = plpId,
- .llsFlag = lls,
- };
- tunerPlpInfos.push_back(plpInfo);
- }
- scanMessage.set<TunerFrontendScanMessage::atsc3PlpInfos>(tunerPlpInfos);
- break;
- }
- default:
- break;
+ ALOGV("FrontendCallback::onScanMessage, type=%d", type);
+ if (mTunerFrontendCallback != nullptr) {
+ mTunerFrontendCallback->onScanMessage(type, message);
}
- mTunerFrontendCallback->onScanMessage((int)type, scanMessage);
- return Void();
+ return ndk::ScopedAStatus::ok();
}
-Return<void> TunerFrontend::FrontendCallback::onScanMessageExt1_1(
- FrontendScanMessageTypeExt1_1 type, const FrontendScanMessageExt1_1& message) {
- ALOGD("onScanMessageExt1_1::onScanMessage, type=%d", type);
- TunerFrontendScanMessage scanMessage;
- switch(type) {
- case FrontendScanMessageTypeExt1_1::MODULATION: {
- FrontendModulation m = message.modulation();
- int modulation;
- switch (m.getDiscriminator()) {
- case FrontendModulation::hidl_discriminator::dvbc:
- modulation = (int) m.dvbc();
- break;
- case FrontendModulation::hidl_discriminator::dvbt:
- modulation = (int) m.dvbt();
- break;
- case FrontendModulation::hidl_discriminator::dvbs:
- modulation = (int) m.dvbs();
- break;
- case FrontendModulation::hidl_discriminator::isdbs:
- modulation = (int) m.isdbs();
- break;
- case FrontendModulation::hidl_discriminator::isdbs3:
- modulation = (int) m.isdbs3();
- break;
- case FrontendModulation::hidl_discriminator::isdbt:
- modulation = (int) m.isdbt();
- break;
- case FrontendModulation::hidl_discriminator::atsc:
- modulation = (int) m.atsc();
- break;
- case FrontendModulation::hidl_discriminator::atsc3:
- modulation = (int) m.atsc3();
- break;
- case FrontendModulation::hidl_discriminator::dtmb:
- modulation = (int) m.dtmb();
- break;
- }
- scanMessage.set<TunerFrontendScanMessage::modulation>(modulation);
- break;
- }
- case FrontendScanMessageTypeExt1_1::DVBC_ANNEX: {
- scanMessage.set<TunerFrontendScanMessage::annex>((int)message.annex());
- break;
- }
- case FrontendScanMessageTypeExt1_1::HIGH_PRIORITY: {
- scanMessage.set<TunerFrontendScanMessage::isHighPriority>(message.isHighPriority());
- break;
- }
- default:
- break;
- }
- mTunerFrontendCallback->onScanMessage((int)type, scanMessage);
- return Void();
-}
-
-/////////////// TunerFrontend Helper Methods ///////////////////////
-
-void TunerFrontend::getAidlFrontendStatus(
- vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
- for (FrontendStatus s : hidlStatus) {
- TunerFrontendStatus status;
- switch (s.getDiscriminator()) {
- case FrontendStatus::hidl_discriminator::isDemodLocked: {
- status.set<TunerFrontendStatus::isDemodLocked>(s.isDemodLocked());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::snr: {
- status.set<TunerFrontendStatus::snr>((int)s.snr());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::ber: {
- status.set<TunerFrontendStatus::ber>((int)s.ber());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::per: {
- status.set<TunerFrontendStatus::per>((int)s.per());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::preBer: {
- status.set<TunerFrontendStatus::preBer>((int)s.preBer());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::signalQuality: {
- status.set<TunerFrontendStatus::signalQuality>((int)s.signalQuality());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::signalStrength: {
- status.set<TunerFrontendStatus::signalStrength>((int)s.signalStrength());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::symbolRate: {
- status.set<TunerFrontendStatus::symbolRate>((int)s.symbolRate());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::innerFec: {
- status.set<TunerFrontendStatus::innerFec>((long)s.innerFec());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::modulation: {
- switch (s.modulation().getDiscriminator()) {
- case FrontendModulationStatus::hidl_discriminator::dvbc:
- status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbc());
- aidlStatus.push_back(status);
- break;
- case FrontendModulationStatus::hidl_discriminator::dvbs:
- status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbs());
- aidlStatus.push_back(status);
- break;
- case FrontendModulationStatus::hidl_discriminator::isdbs:
- status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs());
- aidlStatus.push_back(status);
- break;
- case FrontendModulationStatus::hidl_discriminator::isdbs3:
- status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs3());
- aidlStatus.push_back(status);
- break;
- case FrontendModulationStatus::hidl_discriminator::isdbt:
- status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbt());
- aidlStatus.push_back(status);
- break;
- }
- break;
- }
- case FrontendStatus::hidl_discriminator::inversion: {
- status.set<TunerFrontendStatus::inversion>((int)s.inversion());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::lnbVoltage: {
- status.set<TunerFrontendStatus::lnbVoltage>((int)s.lnbVoltage());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::plpId: {
- status.set<TunerFrontendStatus::plpId>((int8_t)s.plpId());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::isEWBS: {
- status.set<TunerFrontendStatus::isEWBS>(s.isEWBS());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::agc: {
- status.set<TunerFrontendStatus::agc>((int8_t)s.agc());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::isLnaOn: {
- status.set<TunerFrontendStatus::isLnaOn>(s.isLnaOn());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::isLayerError: {
- vector<bool> e(s.isLayerError().begin(), s.isLayerError().end());
- status.set<TunerFrontendStatus::isLayerError>(e);
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::mer: {
- status.set<TunerFrontendStatus::mer>((int)s.mer());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::freqOffset: {
- status.set<TunerFrontendStatus::freqOffset>((int)s.freqOffset());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::hierarchy: {
- status.set<TunerFrontendStatus::hierarchy>((int)s.hierarchy());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::isRfLocked: {
- status.set<TunerFrontendStatus::isRfLocked>(s.isRfLocked());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatus::hidl_discriminator::plpInfo: {
- vector<TunerFrontendStatusAtsc3PlpInfo> info;
- for (auto i : s.plpInfo()) {
- info.push_back({
- .plpId = (int8_t)i.plpId,
- .isLocked = i.isLocked,
- .uec = (int)i.uec,
- });
- }
- status.set<TunerFrontendStatus::plpInfo>(info);
- aidlStatus.push_back(status);
- break;
- }
- }
- }
-}
-
-void TunerFrontend::getAidlFrontendStatusExt(
- vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
- for (FrontendStatusExt1_1 s : hidlStatus) {
- TunerFrontendStatus status;
- switch (s.getDiscriminator()) {
- case FrontendStatusExt1_1::hidl_discriminator::modulations: {
- vector<int> aidlMod;
- for (auto m : s.modulations()) {
- switch (m.getDiscriminator()) {
- case FrontendModulation::hidl_discriminator::dvbc:
- aidlMod.push_back((int)m.dvbc());
- break;
- case FrontendModulation::hidl_discriminator::dvbs:
- aidlMod.push_back((int)m.dvbs());
- break;
- case FrontendModulation::hidl_discriminator::dvbt:
- aidlMod.push_back((int)m.dvbt());
- break;
- case FrontendModulation::hidl_discriminator::isdbs:
- aidlMod.push_back((int)m.isdbs());
- break;
- case FrontendModulation::hidl_discriminator::isdbs3:
- aidlMod.push_back((int)m.isdbs3());
- break;
- case FrontendModulation::hidl_discriminator::isdbt:
- aidlMod.push_back((int)m.isdbt());
- break;
- case FrontendModulation::hidl_discriminator::atsc:
- aidlMod.push_back((int)m.atsc());
- break;
- case FrontendModulation::hidl_discriminator::atsc3:
- aidlMod.push_back((int)m.atsc3());
- break;
- case FrontendModulation::hidl_discriminator::dtmb:
- aidlMod.push_back((int)m.dtmb());
- break;
- }
- }
- status.set<TunerFrontendStatus::modulations>(aidlMod);
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::bers: {
- vector<int> b(s.bers().begin(), s.bers().end());
- status.set<TunerFrontendStatus::bers>(b);
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::codeRates: {
- vector<int64_t> codeRates;
- for (auto c : s.codeRates()) {
- codeRates.push_back((long)c);
- }
- status.set<TunerFrontendStatus::codeRates>(codeRates);
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::bandwidth: {
- switch (s.bandwidth().getDiscriminator()) {
- case FrontendBandwidth::hidl_discriminator::atsc3:
- status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().atsc3());
- break;
- case FrontendBandwidth::hidl_discriminator::dvbc:
- status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbc());
- break;
- case FrontendBandwidth::hidl_discriminator::dvbt:
- status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbt());
- break;
- case FrontendBandwidth::hidl_discriminator::isdbt:
- status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().isdbt());
- break;
- case FrontendBandwidth::hidl_discriminator::dtmb:
- status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dtmb());
- break;
- }
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::interval: {
- switch (s.interval().getDiscriminator()) {
- case FrontendGuardInterval::hidl_discriminator::dvbt:
- status.set<TunerFrontendStatus::interval>((int)s.interval().dvbt());
- break;
- case FrontendGuardInterval::hidl_discriminator::isdbt:
- status.set<TunerFrontendStatus::interval>((int)s.interval().isdbt());
- break;
- case FrontendGuardInterval::hidl_discriminator::dtmb:
- status.set<TunerFrontendStatus::interval>((int)s.interval().dtmb());
- break;
- }
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::transmissionMode: {
- switch (s.transmissionMode().getDiscriminator()) {
- case FrontendTransmissionMode::hidl_discriminator::dvbt:
- status.set<TunerFrontendStatus::transmissionMode>(
- (int)s.transmissionMode().dvbt());
- break;
- case FrontendTransmissionMode::hidl_discriminator::isdbt:
- status.set<TunerFrontendStatus::transmissionMode>(
- (int)s.transmissionMode().isdbt());
- break;
- case FrontendTransmissionMode::hidl_discriminator::dtmb:
- status.set<TunerFrontendStatus::transmissionMode>(
- (int)s.transmissionMode().dtmb());
- break;
- }
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::uec: {
- status.set<TunerFrontendStatus::uec>((int)s.uec());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::systemId: {
- status.set<TunerFrontendStatus::systemId>((char16_t)s.systemId());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::interleaving: {
- vector<int> aidlInter;
- for (auto i : s.interleaving()) {
- switch (i.getDiscriminator()) {
- case FrontendInterleaveMode::hidl_discriminator::atsc3:
- aidlInter.push_back((int)i.atsc3());
- break;
- case FrontendInterleaveMode::hidl_discriminator::dvbc:
- aidlInter.push_back((int)i.dvbc());
- break;
- case FrontendInterleaveMode::hidl_discriminator::dtmb:
- aidlInter.push_back((int)i.dtmb());
- break;
- }
- }
- status.set<TunerFrontendStatus::interleaving>(aidlInter);
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::isdbtSegment: {
- auto seg = s.isdbtSegment();
- vector<uint8_t> i(seg.begin(), seg.end());
- status.set<TunerFrontendStatus::isdbtSegment>(i);
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::tsDataRate: {
- vector<int> ts(s.tsDataRate().begin(), s.tsDataRate().end());
- status.set<TunerFrontendStatus::tsDataRate>(ts);
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::rollOff: {
- switch (s.rollOff().getDiscriminator()) {
- case FrontendRollOff::hidl_discriminator::dvbs:
- status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().dvbs());
- break;
- case FrontendRollOff::hidl_discriminator::isdbs:
- status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().isdbs());
- break;
- case FrontendRollOff::hidl_discriminator::isdbs3:
- status.set<TunerFrontendStatus::rollOff>((int)s.rollOff().isdbs3());
- break;
- }
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::isMiso: {
- status.set<TunerFrontendStatus::isMiso>(s.isMiso());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::isLinear: {
- status.set<TunerFrontendStatus::isLinear>(s.isLinear());
- aidlStatus.push_back(status);
- break;
- }
- case FrontendStatusExt1_1::hidl_discriminator::isShortFrames: {
- status.set<TunerFrontendStatus::isShortFrames>(s.isShortFrames());
- aidlStatus.push_back(status);
- break;
- }
- }
- }
-}
-
-hidl_vec<FrontendAtsc3PlpSettings> TunerFrontend::getAtsc3PlpSettings(
- const TunerFrontendAtsc3Settings& settings) {
- int len = settings.plpSettings.size();
- hidl_vec<FrontendAtsc3PlpSettings> plps = hidl_vec<FrontendAtsc3PlpSettings>(len);
- // parse PLP settings
- for (int i = 0; i < len; i++) {
- uint8_t plpId = static_cast<uint8_t>(settings.plpSettings[i].plpId);
- FrontendAtsc3Modulation modulation =
- static_cast<FrontendAtsc3Modulation>(settings.plpSettings[i].modulation);
- FrontendAtsc3TimeInterleaveMode interleaveMode =
- static_cast<FrontendAtsc3TimeInterleaveMode>(
- settings.plpSettings[i].interleaveMode);
- FrontendAtsc3CodeRate codeRate =
- static_cast<FrontendAtsc3CodeRate>(settings.plpSettings[i].codeRate);
- FrontendAtsc3Fec fec =
- static_cast<FrontendAtsc3Fec>(settings.plpSettings[i].fec);
- FrontendAtsc3PlpSettings frontendAtsc3PlpSettings {
- .plpId = plpId,
- .modulation = modulation,
- .interleaveMode = interleaveMode,
- .codeRate = codeRate,
- .fec = fec,
- };
- plps[i] = frontendAtsc3PlpSettings;
- }
- return plps;
-}
-
-FrontendDvbsCodeRate TunerFrontend::getDvbsCodeRate(const TunerFrontendDvbsCodeRate& codeRate) {
- FrontendInnerFec innerFec = static_cast<FrontendInnerFec>(codeRate.fec);
- bool isLinear = codeRate.isLinear;
- bool isShortFrames = codeRate.isShortFrames;
- uint32_t bitsPer1000Symbol = static_cast<uint32_t>(codeRate.bitsPer1000Symbol);
- FrontendDvbsCodeRate coderate {
- .fec = innerFec,
- .isLinear = isLinear,
- .isShortFrames = isShortFrames,
- .bitsPer1000Symbol = bitsPer1000Symbol,
- };
- return coderate;
-}
-
-FrontendSettings TunerFrontend::getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings) {
- auto settings = aidlSettings.settings;
- FrontendSettings frontendSettings;
-
- switch (settings.getTag()) {
- case TunerFrontendUnionSettings::analog: {
- auto analog = settings.get<TunerFrontendUnionSettings::analog>();
- frontendSettings.analog({
- .frequency = static_cast<uint32_t>(analog.frequency),
- .type = static_cast<FrontendAnalogType>(analog.signalType),
- .sifStandard = static_cast<FrontendAnalogSifStandard>(analog.sifStandard),
- });
- break;
- }
- case TunerFrontendUnionSettings::atsc: {
- auto atsc = settings.get<TunerFrontendUnionSettings::atsc>();
- frontendSettings.atsc({
- .frequency = static_cast<uint32_t>(atsc.frequency),
- .modulation = static_cast<FrontendAtscModulation>(atsc.modulation),
- });
- break;
- }
- case TunerFrontendUnionSettings::atsc3: {
- auto atsc3 = settings.get<TunerFrontendUnionSettings::atsc3>();
- frontendSettings.atsc3({
- .frequency = static_cast<uint32_t>(atsc3.frequency),
- .bandwidth = static_cast<FrontendAtsc3Bandwidth>(atsc3.bandwidth),
- .demodOutputFormat = static_cast<FrontendAtsc3DemodOutputFormat>(
- atsc3.demodOutputFormat),
- .plpSettings = getAtsc3PlpSettings(atsc3),
- });
- break;
- }
- case TunerFrontendUnionSettings::cable: {
- auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
- frontendSettings.dvbc({
- .frequency = static_cast<uint32_t>(dvbc.frequency),
- .modulation = static_cast<FrontendDvbcModulation>(dvbc.modulation),
- .fec = static_cast<FrontendInnerFec>(dvbc.innerFec),
- .symbolRate = static_cast<uint32_t>(dvbc.symbolRate),
- .outerFec = static_cast<FrontendDvbcOuterFec>(dvbc.outerFec),
- .annex = static_cast<FrontendDvbcAnnex>(dvbc.annex),
- .spectralInversion = static_cast<FrontendDvbcSpectralInversion>(
- dvbc.spectralInversion),
- });
- break;
- }
- case TunerFrontendUnionSettings::dvbs: {
- auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
- frontendSettings.dvbs({
- .frequency = static_cast<uint32_t>(dvbs.frequency),
- .modulation = static_cast<FrontendDvbsModulation>(dvbs.modulation),
- .coderate = getDvbsCodeRate(dvbs.codeRate),
- .symbolRate = static_cast<uint32_t>(dvbs.symbolRate),
- .rolloff = static_cast<FrontendDvbsRolloff>(dvbs.rolloff),
- .pilot = static_cast<FrontendDvbsPilot>(dvbs.pilot),
- .inputStreamId = static_cast<uint32_t>(dvbs.inputStreamId),
- .standard = static_cast<FrontendDvbsStandard>(dvbs.standard),
- .vcmMode = static_cast<FrontendDvbsVcmMode>(dvbs.vcm),
- });
- break;
- }
- case TunerFrontendUnionSettings::dvbt: {
- auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
- frontendSettings.dvbt({
- .frequency = static_cast<uint32_t>(dvbt.frequency),
- .transmissionMode = static_cast<FrontendDvbtTransmissionMode>(
- dvbt.transmissionMode),
- .bandwidth = static_cast<FrontendDvbtBandwidth>(dvbt.bandwidth),
- .constellation = static_cast<FrontendDvbtConstellation>(dvbt.constellation),
- .hierarchy = static_cast<FrontendDvbtHierarchy>(dvbt.hierarchy),
- .hpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.hpCodeRate),
- .lpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.lpCodeRate),
- .guardInterval = static_cast<FrontendDvbtGuardInterval>(dvbt.guardInterval),
- .isHighPriority = dvbt.isHighPriority,
- .standard = static_cast<FrontendDvbtStandard>(dvbt.standard),
- .isMiso = dvbt.isMiso,
- .plpMode = static_cast<FrontendDvbtPlpMode>(dvbt.plpMode),
- .plpId = static_cast<uint8_t>(dvbt.plpId),
- .plpGroupId = static_cast<uint8_t>(dvbt.plpGroupId),
- });
- break;
- }
- case TunerFrontendUnionSettings::isdbs: {
- auto isdbs = settings.get<TunerFrontendUnionSettings::isdbs>();
- frontendSettings.isdbs({
- .frequency = static_cast<uint32_t>(isdbs.frequency),
- .streamId = static_cast<uint16_t>(isdbs.streamId),
- .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs.streamIdType),
- .modulation = static_cast<FrontendIsdbsModulation>(isdbs.modulation),
- .coderate = static_cast<FrontendIsdbsCoderate>(isdbs.codeRate),
- .symbolRate = static_cast<uint32_t>(isdbs.symbolRate),
- .rolloff = static_cast<FrontendIsdbsRolloff>(isdbs.rolloff),
- });
- break;
- }
- case TunerFrontendUnionSettings::isdbs3: {
- auto isdbs3 = settings.get<TunerFrontendUnionSettings::isdbs3>();
- frontendSettings.isdbs3({
- .frequency = static_cast<uint32_t>(isdbs3.frequency),
- .streamId = static_cast<uint16_t>(isdbs3.streamId),
- .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs3.streamIdType),
- .modulation = static_cast<FrontendIsdbs3Modulation>(isdbs3.modulation),
- .coderate = static_cast<FrontendIsdbs3Coderate>(isdbs3.codeRate),
- .symbolRate = static_cast<uint32_t>(isdbs3.symbolRate),
- .rolloff = static_cast<FrontendIsdbs3Rolloff>(isdbs3.rolloff),
- });
- break;
- }
- case TunerFrontendUnionSettings::isdbt: {
- auto isdbt = settings.get<TunerFrontendUnionSettings::isdbt>();
- frontendSettings.isdbt({
- .frequency = static_cast<uint32_t>(isdbt.frequency),
- .modulation = static_cast<FrontendIsdbtModulation>(isdbt.modulation),
- .bandwidth = static_cast<FrontendIsdbtBandwidth>(isdbt.bandwidth),
- .mode = static_cast<FrontendIsdbtMode>(isdbt.mode),
- .coderate = static_cast<FrontendIsdbtCoderate>(isdbt.codeRate),
- .guardInterval = static_cast<FrontendIsdbtGuardInterval>(isdbt.guardInterval),
- .serviceAreaId = static_cast<uint32_t>(isdbt.serviceAreaId),
- });
- break;
- }
- default:
- break;
- }
-
- return frontendSettings;
-}
-
-FrontendSettingsExt1_1 TunerFrontend::getHidlFrontendSettingsExt(
- const TunerFrontendSettings& aidlSettings) {
- FrontendSettingsExt1_1 frontendSettingsExt{
- .endFrequency = static_cast<uint32_t>(aidlSettings.endFrequency),
- .inversion = static_cast<FrontendSpectralInversion>(aidlSettings.inversion),
- };
-
- auto settings = aidlSettings.settings;
- switch (settings.getTag()) {
- case TunerFrontendUnionSettings::analog: {
- auto analog = settings.get<TunerFrontendUnionSettings::analog>();
- if (analog.isExtended) {
- frontendSettingsExt.settingExt.analog({
- .aftFlag = static_cast<FrontendAnalogAftFlag>(analog.aftFlag),
- });
- } else {
- frontendSettingsExt.settingExt.noinit();
- }
- break;
- }
- case TunerFrontendUnionSettings::cable: {
- auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
- if (dvbc.isExtended) {
- frontendSettingsExt.settingExt.dvbc({
- .interleaveMode = static_cast<FrontendCableTimeInterleaveMode>(
- dvbc.interleaveMode),
- .bandwidth = static_cast<FrontendDvbcBandwidth>(
- dvbc.bandwidth),
- });
- } else {
- frontendSettingsExt.settingExt.noinit();
- }
- break;
- }
- case TunerFrontendUnionSettings::dvbs: {
- auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
- if (dvbs.isExtended) {
- frontendSettingsExt.settingExt.dvbs({
- .scanType = static_cast<FrontendDvbsScanType>(dvbs.scanType),
- .isDiseqcRxMessage = dvbs.isDiseqcRxMessage,
- });
- } else {
- frontendSettingsExt.settingExt.noinit();
- }
- break;
- }
- case TunerFrontendUnionSettings::dvbt: {
- auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
- if (dvbt.isExtended) {
- frontendSettingsExt.settingExt.dvbt({
- .constellation =
- static_cast<hardware::tv::tuner::V1_1::FrontendDvbtConstellation>(
- dvbt.constellation),
- .transmissionMode =
- static_cast<hardware::tv::tuner::V1_1::FrontendDvbtTransmissionMode>(
- dvbt.transmissionMode),
- });
- } else {
- frontendSettingsExt.settingExt.noinit();
- }
- break;
- }
- case TunerFrontendUnionSettings::dtmb: {
- auto dtmb = settings.get<TunerFrontendUnionSettings::dtmb>();
- frontendSettingsExt.settingExt.dtmb({
- .frequency = static_cast<uint32_t>(dtmb.frequency),
- .transmissionMode = static_cast<FrontendDtmbTransmissionMode>(
- dtmb.transmissionMode),
- .bandwidth = static_cast<FrontendDtmbBandwidth>(dtmb.bandwidth),
- .modulation = static_cast<FrontendDtmbModulation>(dtmb.modulation),
- .codeRate = static_cast<FrontendDtmbCodeRate>(dtmb.codeRate),
- .guardInterval = static_cast<FrontendDtmbGuardInterval>(dtmb.guardInterval),
- .interleaveMode = static_cast<FrontendDtmbTimeInterleaveMode>(dtmb.interleaveMode),
- });
- break;
- }
- default:
- frontendSettingsExt.settingExt.noinit();
- break;
- }
-
- return frontendSettingsExt;
-}
+} // namespace tuner
+} // namespace tv
+} // namespace media
} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerFrontend.h b/services/tuner/TunerFrontend.h
index 22fd509..417d969 100644
--- a/services/tuner/TunerFrontend.h
+++ b/services/tuner/TunerFrontend.h
@@ -1,5 +1,5 @@
/**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,95 +17,73 @@
#ifndef ANDROID_MEDIA_TUNERFRONTEND_H
#define ANDROID_MEDIA_TUNERFRONTEND_H
+#include <aidl/android/hardware/tv/tuner/BnFrontendCallback.h>
+#include <aidl/android/hardware/tv/tuner/IFrontend.h>
+#include <aidl/android/hardware/tv/tuner/IFrontendCallback.h>
#include <aidl/android/media/tv/tuner/BnTunerFrontend.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
-#include <android/hardware/tv/tuner/1.1/IFrontend.h>
-#include <android/hardware/tv/tuner/1.1/IFrontendCallback.h>
-#include <media/stagefright/foundation/ADebug.h>
#include <utils/Log.h>
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerFrontend;
-using ::aidl::android::media::tv::tuner::ITunerFrontendCallback;
-using ::aidl::android::media::tv::tuner::ITunerLnb;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3Settings;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbsCodeRate;
-using ::aidl::android::media::tv::tuner::TunerFrontendScanMessage;
-using ::aidl::android::media::tv::tuner::TunerFrontendSettings;
-using ::aidl::android::media::tv::tuner::TunerFrontendStatus;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::FrontendAtsc3PlpSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendDvbsCodeRate;
-using ::android::hardware::tv::tuner::V1_0::FrontendEventType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanMessage;
-using ::android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
-using ::android::hardware::tv::tuner::V1_0::FrontendSettings;
-using ::android::hardware::tv::tuner::V1_0::FrontendStatus;
-using ::android::hardware::tv::tuner::V1_0::IFrontend;
-using ::android::hardware::tv::tuner::V1_1::IFrontendCallback;
-using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageTypeExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendSettingsExt1_1;
-using ::android::hardware::tv::tuner::V1_1::FrontendStatusExt1_1;
+using ::aidl::android::hardware::tv::tuner::BnFrontendCallback;
+using ::aidl::android::hardware::tv::tuner::FrontendEventType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessage;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanType;
+using ::aidl::android::hardware::tv::tuner::FrontendSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusType;
+using ::aidl::android::hardware::tv::tuner::IFrontend;
+using ::aidl::android::hardware::tv::tuner::IFrontendCallback;
using namespace std;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
class TunerFrontend : public BnTunerFrontend {
public:
- TunerFrontend(sp<IFrontend> frontend, int id);
+ TunerFrontend(shared_ptr<IFrontend> frontend, int id);
virtual ~TunerFrontend();
- Status setCallback(
- const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) override;
- Status tune(const TunerFrontendSettings& settings) override;
- Status stopTune() override;
- Status scan(const TunerFrontendSettings& settings, int frontendScanType) override;
- Status stopScan() override;
- Status setLnb(const shared_ptr<ITunerLnb>& lnb) override;
- Status setLna(bool bEnable) override;
- Status linkCiCamToFrontend(int ciCamId, int32_t* _aidl_return) override;
- Status unlinkCiCamToFrontend(int ciCamId) override;
- Status close() override;
- Status getStatus(const vector<int32_t>& statusTypes,
- vector<TunerFrontendStatus>* _aidl_return) override;
- Status getStatusExtended_1_1(const vector<int32_t>& statusTypes,
- vector<TunerFrontendStatus>* _aidl_return) override;
- Status getFrontendId(int* _aidl_return) override;
- struct FrontendCallback : public IFrontendCallback {
+ ::ndk::ScopedAStatus setCallback(
+ const shared_ptr<ITunerFrontendCallback>& in_tunerFrontendCallback) override;
+ ::ndk::ScopedAStatus tune(const FrontendSettings& in_settings) override;
+ ::ndk::ScopedAStatus stopTune() override;
+ ::ndk::ScopedAStatus scan(const FrontendSettings& in_settings,
+ FrontendScanType in_frontendScanType) override;
+ ::ndk::ScopedAStatus stopScan() override;
+ ::ndk::ScopedAStatus setLnb(const shared_ptr<ITunerLnb>& in_lnb) override;
+ ::ndk::ScopedAStatus setLna(bool in_bEnable) override;
+ ::ndk::ScopedAStatus linkCiCamToFrontend(int32_t in_ciCamId, int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus unlinkCiCamToFrontend(int32_t in_ciCamId) override;
+ ::ndk::ScopedAStatus close() override;
+ ::ndk::ScopedAStatus getStatus(const vector<FrontendStatusType>& in_statusTypes,
+ vector<FrontendStatus>* _aidl_return) override;
+ ::ndk::ScopedAStatus getFrontendId(int32_t* _aidl_return) override;
+
+ struct FrontendCallback : public BnFrontendCallback {
FrontendCallback(const shared_ptr<ITunerFrontendCallback> tunerFrontendCallback)
- : mTunerFrontendCallback(tunerFrontendCallback) {};
+ : mTunerFrontendCallback(tunerFrontendCallback){};
- virtual Return<void> onEvent(FrontendEventType frontendEventType);
- virtual Return<void> onScanMessage(
- FrontendScanMessageType type, const FrontendScanMessage& message);
- virtual Return<void> onScanMessageExt1_1(
- FrontendScanMessageTypeExt1_1 type, const FrontendScanMessageExt1_1& message);
+ ::ndk::ScopedAStatus onEvent(FrontendEventType frontendEventType) override;
+ ::ndk::ScopedAStatus onScanMessage(FrontendScanMessageType type,
+ const FrontendScanMessage& message) override;
shared_ptr<ITunerFrontendCallback> mTunerFrontendCallback;
};
private:
- hidl_vec<FrontendAtsc3PlpSettings> getAtsc3PlpSettings(
- const TunerFrontendAtsc3Settings& settings);
- FrontendDvbsCodeRate getDvbsCodeRate(const TunerFrontendDvbsCodeRate& codeRate);
- FrontendSettings getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings);
- FrontendSettingsExt1_1 getHidlFrontendSettingsExt(const TunerFrontendSettings& aidlSettings);
- void getAidlFrontendStatus(
- vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
- void getAidlFrontendStatusExt(
- vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
-
int mId;
- sp<IFrontend> mFrontend;
- sp<::android::hardware::tv::tuner::V1_1::IFrontend> mFrontend_1_1;
+ shared_ptr<IFrontend> mFrontend;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERFRONTEND_H
diff --git a/services/tuner/TunerHelper.cpp b/services/tuner/TunerHelper.cpp
new file mode 100644
index 0000000..dc67110
--- /dev/null
+++ b/services/tuner/TunerHelper.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TunerHelper.h"
+
+#include <aidl/android/media/tv/tunerresourcemanager/ITunerResourceManager.h>
+#include <android/binder_manager.h>
+#include <android/content/pm/IPackageManagerNative.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+using ::aidl::android::media::tv::tunerresourcemanager::ITunerResourceManager;
+using ::android::defaultServiceManager;
+using ::android::IBinder;
+using ::android::interface_cast;
+using ::android::IServiceManager;
+using ::android::sp;
+using ::android::binder::Status;
+using ::android::content::pm::IPackageManagerNative;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+// System Feature defined in PackageManager
+static const ::android::String16 FEATURE_TUNER(::android::String16("android.hardware.tv.tuner"));
+
+int32_t TunerHelper::sResourceRequestCount = 0;
+
+bool TunerHelper::checkTunerFeature() {
+ sp<IServiceManager> serviceMgr = defaultServiceManager();
+ sp<IPackageManagerNative> packageMgr;
+ if (serviceMgr.get() == nullptr) {
+ ALOGE("%s: Cannot find service manager", __func__);
+ return false;
+ }
+
+ sp<IBinder> binder = serviceMgr->waitForService(String16("package_native"));
+ packageMgr = interface_cast<IPackageManagerNative>(binder);
+ if (packageMgr != nullptr) {
+ bool hasFeature = false;
+ Status status = packageMgr->hasSystemFeature(FEATURE_TUNER, 0, &hasFeature);
+ if (!status.isOk()) {
+ ALOGE("%s: hasSystemFeature failed: %s", __func__, status.exceptionMessage().c_str());
+ return false;
+ }
+ if (!hasFeature) {
+ ALOGD("Current device does not support tuner feaure.");
+ return false;
+ }
+ } else {
+ ALOGD("%s: Cannot find package manager.", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+// TODO: update Demux, Descrambler.
+void TunerHelper::updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+ const vector<int32_t>& lnbHandles) {
+ ::ndk::SpAIBinder binder(AServiceManager_waitForService("tv_tuner_resource_mgr"));
+ shared_ptr<ITunerResourceManager> tunerRM = ITunerResourceManager::fromBinder(binder);
+ if (tunerRM == nullptr) {
+ return;
+ }
+
+ tunerRM->setFrontendInfoList(feInfos);
+ tunerRM->setLnbInfoList(lnbHandles);
+}
+
+// TODO: create a map between resource id and handles.
+int TunerHelper::getResourceIdFromHandle(int resourceHandle, int /*type*/) {
+ return (resourceHandle & 0x00ff0000) >> 16;
+}
+
+int TunerHelper::getResourceHandleFromId(int id, int resourceType) {
+ // TODO: build up randomly generated id to handle mapping
+ return (resourceType & 0x000000ff) << 24 | (id << 16) | (sResourceRequestCount++ & 0xffff);
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerHelper.h b/services/tuner/TunerHelper.h
new file mode 100644
index 0000000..755df57
--- /dev/null
+++ b/services/tuner/TunerHelper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERDVRHELPER_H
+#define ANDROID_MEDIA_TUNERDVRHELPER_H
+
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <utils/String16.h>
+
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::String16;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+const static int TUNER_HAL_VERSION_UNKNOWN = 0;
+const static int TUNER_HAL_VERSION_1_0 = 1 << 16;
+const static int TUNER_HAL_VERSION_1_1 = (1 << 16) | 1;
+const static int TUNER_HAL_VERSION_2_0 = 2 << 16;
+
+// Keep syncing with ShareFilter.java
+const static int STATUS_INACCESSIBLE = 1 << 7;
+
+const static String16 sSharedFilterPermission("android.permission.ACCESS_TV_SHARED_FILTER");
+
+typedef enum {
+ FRONTEND,
+ DEMUX,
+ DESCRAMBLER,
+ LNB
+} TunerResourceType;
+
+class TunerHelper {
+public:
+ static bool checkTunerFeature();
+
+ // TODO: update Demux, Descrambler.
+ static void updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+ const vector<int32_t>& lnbHandles);
+ // TODO: create a map between resource id and handles.
+ static int getResourceIdFromHandle(int resourceHandle, int type);
+ static int getResourceHandleFromId(int id, int resourceType);
+
+private:
+ static int32_t sResourceRequestCount;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERDVRHELPER_H
diff --git a/services/tuner/TunerLnb.cpp b/services/tuner/TunerLnb.cpp
index 77248d4..1e143c3 100644
--- a/services/tuner/TunerLnb.cpp
+++ b/services/tuner/TunerLnb.cpp
@@ -18,123 +18,116 @@
#include "TunerLnb.h"
-using ::android::hardware::tv::tuner::V1_0::LnbPosition;
-using ::android::hardware::tv::tuner::V1_0::LnbTone;
-using ::android::hardware::tv::tuner::V1_0::LnbVoltage;
-using ::android::hardware::tv::tuner::V1_0::Result;
+#include <aidl/android/hardware/tv/tuner/ILnbCallback.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+using ::aidl::android::hardware::tv::tuner::ILnbCallback;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-TunerLnb::TunerLnb(sp<ILnb> lnb, int id) {
+TunerLnb::TunerLnb(shared_ptr<ILnb> lnb, int id) {
mLnb = lnb;
mId = id;
}
TunerLnb::~TunerLnb() {
- mLnb = NULL;
+ mLnb = nullptr;
mId = -1;
}
-Status TunerLnb::setCallback(
- const shared_ptr<ITunerLnbCallback>& tunerLnbCallback) {
- if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setCallback(
+ const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) {
+ if (mLnb == nullptr) {
ALOGE("ILnb is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- if (tunerLnbCallback == NULL) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ if (in_tunerLnbCallback == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
}
- sp<ILnbCallback> lnbCallback = new LnbCallback(tunerLnbCallback);
- Result status = mLnb->setCallback(lnbCallback);
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ shared_ptr<ILnbCallback> lnbCallback =
+ ::ndk::SharedRefBase::make<LnbCallback>(in_tunerLnbCallback);
+ return mLnb->setCallback(lnbCallback);
}
-Status TunerLnb::setVoltage(int voltage) {
- if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setVoltage(LnbVoltage in_voltage) {
+ if (mLnb == nullptr) {
ALOGE("ILnb is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mLnb->setVoltage(static_cast<LnbVoltage>(voltage));
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ return mLnb->setVoltage(in_voltage);
}
-Status TunerLnb::setTone(int tone) {
- if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setTone(LnbTone in_tone) {
+ if (mLnb == nullptr) {
ALOGE("ILnb is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mLnb->setTone(static_cast<LnbTone>(tone));
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ return mLnb->setTone(in_tone);
}
-Status TunerLnb::setSatellitePosition(int position) {
- if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::setSatellitePosition(LnbPosition in_position) {
+ if (mLnb == nullptr) {
ALOGE("ILnb is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mLnb->setSatellitePosition(static_cast<LnbPosition>(position));
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ return mLnb->setSatellitePosition(in_position);
}
-Status TunerLnb::sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
- if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) {
+ if (mLnb == nullptr) {
ALOGE("ILnb is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mLnb->sendDiseqcMessage(diseqcMessage);
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ return mLnb->sendDiseqcMessage(in_diseqcMessage);
}
-Status TunerLnb::close() {
- if (mLnb == NULL) {
+::ndk::ScopedAStatus TunerLnb::close() {
+ if (mLnb == nullptr) {
ALOGE("ILnb is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mLnb->close();
- mLnb = NULL;
+ auto res = mLnb->close();
+ mLnb = nullptr;
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return res;
}
/////////////// ILnbCallback ///////////////////////
-
-Return<void> TunerLnb::LnbCallback::onEvent(const LnbEventType lnbEventType) {
- if (mTunerLnbCallback != NULL) {
- mTunerLnbCallback->onEvent((int)lnbEventType);
+::ndk::ScopedAStatus TunerLnb::LnbCallback::onEvent(const LnbEventType lnbEventType) {
+ if (mTunerLnbCallback != nullptr) {
+ mTunerLnbCallback->onEvent(lnbEventType);
}
- return Void();
+ return ndk::ScopedAStatus::ok();
}
-Return<void> TunerLnb::LnbCallback::onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
- if (mTunerLnbCallback != NULL && diseqcMessage != NULL) {
- vector<uint8_t> msg(begin(diseqcMessage), end(diseqcMessage));
- mTunerLnbCallback->onDiseqcMessage(msg);
+::ndk::ScopedAStatus TunerLnb::LnbCallback::onDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
+ if (mTunerLnbCallback != nullptr) {
+ mTunerLnbCallback->onDiseqcMessage(diseqcMessage);
}
- return Void();
+ return ndk::ScopedAStatus::ok();
}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerLnb.h b/services/tuner/TunerLnb.h
index 500d072..72988a6 100644
--- a/services/tuner/TunerLnb.h
+++ b/services/tuner/TunerLnb.h
@@ -17,55 +17,61 @@
#ifndef ANDROID_MEDIA_TUNERFLNB_H
#define ANDROID_MEDIA_TUNERFLNB_H
+#include <aidl/android/hardware/tv/tuner/BnLnbCallback.h>
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
#include <aidl/android/media/tv/tuner/BnTunerLnb.h>
-#include <android/hardware/tv/tuner/1.0/ILnb.h>
-#include <android/hardware/tv/tuner/1.0/ILnbCallback.h>
-#include <media/stagefright/foundation/ADebug.h>
#include <utils/Log.h>
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerLnb;
-using ::aidl::android::media::tv::tuner::ITunerLnbCallback;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::ILnb;
-using ::android::hardware::tv::tuner::V1_0::ILnbCallback;
-using ::android::hardware::tv::tuner::V1_0::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::BnLnbCallback;
+using ::aidl::android::hardware::tv::tuner::ILnb;
+using ::aidl::android::hardware::tv::tuner::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::LnbPosition;
+using ::aidl::android::hardware::tv::tuner::LnbTone;
+using ::aidl::android::hardware::tv::tuner::LnbVoltage;
using namespace std;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
class TunerLnb : public BnTunerLnb {
public:
- TunerLnb(sp<ILnb> lnb, int id);
+ TunerLnb(shared_ptr<ILnb> lnb, int id);
virtual ~TunerLnb();
- Status setCallback(const shared_ptr<ITunerLnbCallback>& tunerLnbCallback) override;
- Status setVoltage(int voltage) override;
- Status setTone(int tone) override;
- Status setSatellitePosition(int position) override;
- Status sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) override;
- Status close() override;
+
+ ::ndk::ScopedAStatus setCallback(
+ const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) override;
+ ::ndk::ScopedAStatus setVoltage(LnbVoltage in_voltage) override;
+ ::ndk::ScopedAStatus setTone(LnbTone in_tone) override;
+ ::ndk::ScopedAStatus setSatellitePosition(LnbPosition in_position) override;
+ ::ndk::ScopedAStatus sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) override;
+ ::ndk::ScopedAStatus close() override;
int getId() { return mId; }
- struct LnbCallback : public ILnbCallback {
+ struct LnbCallback : public BnLnbCallback {
LnbCallback(const shared_ptr<ITunerLnbCallback> tunerLnbCallback)
- : mTunerLnbCallback(tunerLnbCallback) {};
+ : mTunerLnbCallback(tunerLnbCallback){};
- virtual Return<void> onEvent(const LnbEventType lnbEventType);
- virtual Return<void> onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage);
+ ::ndk::ScopedAStatus onEvent(const LnbEventType lnbEventType) override;
+ ::ndk::ScopedAStatus onDiseqcMessage(const vector<uint8_t>& diseqcMessage) override;
shared_ptr<ITunerLnbCallback> mTunerLnbCallback;
};
private:
int mId;
- sp<ILnb> mLnb;
+ shared_ptr<ILnb> mLnb;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERFLNB_H
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index 5b4129a..36e4cd1 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,369 +14,310 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
#define LOG_TAG "TunerService"
-#include <android/binder_manager.h>
-#include <android/content/pm/IPackageManagerNative.h>
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
#include "TunerService.h"
-#include "TunerFrontend.h"
-#include "TunerLnb.h"
+
+#include <aidl/android/hardware/tv/tuner/IDemux.h>
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
+#include <aidl/android/hardware/tv/tuner/IFrontend.h>
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <android/binder_manager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/PermissionCache.h>
+#include <utils/Log.h>
+
+#include <string>
+
#include "TunerDemux.h"
#include "TunerDescrambler.h"
+#include "TunerFrontend.h"
+#include "TunerHelper.h"
+#include "TunerLnb.h"
-using ::aidl::android::media::tv::tuner::TunerFrontendAnalogCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3Capabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendAtscCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendCableCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbsCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDvbtCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbs3Capabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbsCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendIsdbtCapabilities;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendType;
-using ::android::hardware::tv::tuner::V1_0::IFrontend;
-using ::android::hardware::tv::tuner::V1_0::ILnb;
-using ::android::hardware::tv::tuner::V1_0::LnbId;
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::FrontendDtmbCapabilities;
+using ::aidl::android::hardware::tv::tuner::IDemux;
+using ::aidl::android::hardware::tv::tuner::IDescrambler;
+using ::aidl::android::hardware::tv::tuner::IFrontend;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::android::IPCThreadState;
+using ::android::PermissionCache;
+using ::android::sp;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+shared_ptr<TunerService> TunerService::sTunerService = nullptr;
TunerService::TunerService() {
- sp<IServiceManager> serviceMgr = defaultServiceManager();
- sp<content::pm::IPackageManagerNative> packageMgr;
- if (serviceMgr.get() == nullptr) {
- ALOGE("%s: Cannot find service manager", __func__);
- return;
- } else {
- sp<IBinder> binder = serviceMgr->waitForService(String16("package_native"));
- packageMgr = interface_cast<content::pm::IPackageManagerNative>(binder);
- }
-
- bool hasFeature = false;
- if (packageMgr != nullptr) {
- binder::Status status = packageMgr->hasSystemFeature(FEATURE_TUNER, 0, &hasFeature);
- if (!status.isOk()) {
- ALOGE("%s: hasSystemFeature failed: %s",
- __func__, status.exceptionMessage().c_str());
- return;
- }
- if (!hasFeature) {
- ALOGD("Current device does not support tuner feaure.");
- return;
- }
- } else {
- ALOGD("%s: Cannot find package manager.", __func__);
+ if (!TunerHelper::checkTunerFeature()) {
+ ALOGD("Device doesn't have tuner hardware.");
return;
}
- ::ndk::SpAIBinder binder(AServiceManager_waitForService("tv_tuner_resource_mgr"));
- mTunerResourceManager = ITunerResourceManager::fromBinder(binder);
updateTunerResources();
}
TunerService::~TunerService() {}
binder_status_t TunerService::instantiate() {
- shared_ptr<TunerService> service =
- ::ndk::SharedRefBase::make<TunerService>();
- return AServiceManager_addService(service->asBinder().get(), getServiceName());
+ sTunerService = ::ndk::SharedRefBase::make<TunerService>();
+ return AServiceManager_addService(sTunerService->asBinder().get(), getServiceName());
+}
+
+shared_ptr<TunerService> TunerService::getTunerService() {
+ return sTunerService;
}
bool TunerService::hasITuner() {
- ALOGD("hasITuner");
+ ALOGV("hasITuner");
if (mTuner != nullptr) {
return true;
}
- mTuner = ITuner::getService();
- if (mTuner == nullptr) {
- ALOGE("Failed to get ITuner service");
+ const string statsServiceName = string() + ITuner::descriptor + "/default";
+ if (AServiceManager_isDeclared(statsServiceName.c_str())) {
+ ::ndk::SpAIBinder binder(AServiceManager_waitForService(statsServiceName.c_str()));
+ mTuner = ITuner::fromBinder(binder);
+ } else {
+ mTuner = nullptr;
+ ALOGE("Failed to get Tuner HAL Service");
return false;
}
- mTunerVersion = TUNER_HAL_VERSION_1_0;
- mTuner_1_1 = ::android::hardware::tv::tuner::V1_1::ITuner::castFrom(mTuner);
- if (mTuner_1_1 != nullptr) {
- mTunerVersion = TUNER_HAL_VERSION_1_1;
- } else {
- ALOGE("Failed to get ITuner_1_1 service");
- }
+
+ mTunerVersion = TUNER_HAL_VERSION_2_0;
+ // TODO: Enable this after Tuner HAL is frozen.
+ // if (mTuner->getInterfaceVersion(&mTunerVersion).isOk()) {
+ // // Tuner AIDL HAL version 1 will be Tuner HAL 2.0
+ // mTunerVersion = (mTunerVersion + 1) << 16;
+ //}
+
return true;
}
-bool TunerService::hasITuner_1_1() {
- ALOGD("hasITuner_1_1");
- hasITuner();
- return (mTunerVersion == TUNER_HAL_VERSION_1_1);
-}
-
-Status TunerService::openDemux(
- int /* demuxHandle */, std::shared_ptr<ITunerDemux>* _aidl_return) {
- ALOGD("openDemux");
+::ndk::ScopedAStatus TunerService::openDemux(int32_t /* in_demuxHandle */,
+ shared_ptr<ITunerDemux>* _aidl_return) {
+ ALOGV("openDemux");
if (!hasITuner()) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::NOT_INITIALIZED));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res;
- uint32_t id;
- sp<IDemux> demuxSp = nullptr;
- shared_ptr<ITunerDemux> tunerDemux = nullptr;
- mTuner->openDemux([&](Result r, uint32_t demuxId, const sp<IDemux>& demux) {
- demuxSp = demux;
- id = demuxId;
- res = r;
- ALOGD("open demux, id = %d", demuxId);
- });
- if (res == Result::SUCCESS) {
- tunerDemux = ::ndk::SharedRefBase::make<TunerDemux>(demuxSp, id);
- *_aidl_return = tunerDemux->ref<ITunerDemux>();
- return Status::ok();
+ vector<int32_t> id;
+ shared_ptr<IDemux> demux;
+ auto status = mTuner->openDemux(&id, &demux);
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerDemux>(demux, id[0]);
}
- ALOGW("open demux failed, res = %d", res);
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ return status;
}
-Status TunerService::getDemuxCaps(TunerDemuxCapabilities* _aidl_return) {
- ALOGD("getDemuxCaps");
+::ndk::ScopedAStatus TunerService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
+ ALOGV("getDemuxCaps");
if (!hasITuner()) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::NOT_INITIALIZED));
- }
- Result res;
- DemuxCapabilities caps;
- mTuner->getDemuxCaps([&](Result r, const DemuxCapabilities& demuxCaps) {
- caps = demuxCaps;
- res = r;
- });
- if (res == Result::SUCCESS) {
- *_aidl_return = getAidlDemuxCaps(caps);
- return Status::ok();
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- ALOGW("Get demux caps failed, res = %d", res);
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ return mTuner->getDemuxCaps(_aidl_return);
}
-Status TunerService::getFrontendIds(vector<int32_t>* ids) {
+::ndk::ScopedAStatus TunerService::getFrontendIds(vector<int32_t>* ids) {
if (!hasITuner()) {
- return Status::fromServiceSpecificError(
- static_cast<int32_t>(Result::NOT_INITIALIZED));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- hidl_vec<FrontendId> feIds;
- Result res = getHidlFrontendIds(feIds);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- ids->resize(feIds.size());
- copy(feIds.begin(), feIds.end(), ids->begin());
- return Status::ok();
+ return mTuner->getFrontendIds(ids);
}
-Status TunerService::getFrontendInfo(int32_t id, TunerFrontendInfo* _aidl_return) {
+::ndk::ScopedAStatus TunerService::getFrontendInfo(int32_t id, FrontendInfo* _aidl_return) {
if (!hasITuner()) {
ALOGE("ITuner service is not init.");
return ::ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(Result::UNAVAILABLE));
}
- FrontendInfo info;
- Result res = getHidlFrontendInfo(id, info);
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
-
- TunerFrontendInfo tunerInfo = convertToAidlFrontendInfo(info);
- *_aidl_return = tunerInfo;
- return Status::ok();
+ return mTuner->getFrontendInfo(id, _aidl_return);
}
-Status TunerService::getFrontendDtmbCapabilities(
- int32_t id, TunerFrontendDtmbCapabilities* _aidl_return) {
- if (!hasITuner_1_1()) {
- ALOGE("ITuner_1_1 service is not init.");
+::ndk::ScopedAStatus TunerService::openFrontend(int32_t frontendHandle,
+ shared_ptr<ITunerFrontend>* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGE("ITuner service is not init.");
return ::ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res;
- FrontendDtmbCapabilities dtmbCaps;
- mTuner_1_1->getFrontendDtmbCapabilities(id,
- [&](Result r, const FrontendDtmbCapabilities& caps) {
- dtmbCaps = caps;
- res = r;
- });
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ int id = TunerHelper::getResourceIdFromHandle(frontendHandle, FRONTEND);
+ shared_ptr<IFrontend> frontend;
+ auto status = mTuner->openFrontendById(id, &frontend);
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerFrontend>(frontend, id);
}
- TunerFrontendDtmbCapabilities aidlDtmbCaps{
- .transmissionModeCap = (int)dtmbCaps.transmissionModeCap,
- .bandwidthCap = (int)dtmbCaps.bandwidthCap,
- .modulationCap = (int)dtmbCaps.modulationCap,
- .codeRateCap = (int)dtmbCaps.codeRateCap,
- .guardIntervalCap = (int)dtmbCaps.guardIntervalCap,
- .interleaveModeCap = (int)dtmbCaps.interleaveModeCap,
- };
-
- *_aidl_return = aidlDtmbCaps;
- return Status::ok();
+ return status;
}
-Status TunerService::openFrontend(
- int32_t frontendHandle, shared_ptr<ITunerFrontend>* _aidl_return) {
- if (!hasITuner()) {
- ALOGE("ITuner service is not init.");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
- }
-
- Result status;
- sp<IFrontend> frontend;
- int id = getResourceIdFromHandle(frontendHandle, FRONTEND);
- mTuner->openFrontendById(id, [&](Result result, const sp<IFrontend>& fe) {
- frontend = fe;
- status = result;
- });
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- *_aidl_return = ::ndk::SharedRefBase::make<TunerFrontend>(frontend, id);
- return Status::ok();
-}
-
-Status TunerService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
if (!hasITuner()) {
ALOGD("get ITuner failed");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status;
- sp<ILnb> lnb;
- int id = getResourceIdFromHandle(lnbHandle, LNB);
- mTuner->openLnbById(id, [&](Result result, const sp<ILnb>& lnbSp){
- lnb = lnbSp;
- status = result;
- });
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ shared_ptr<ILnb> lnb;
+ int id = TunerHelper::getResourceIdFromHandle(lnbHandle, LNB);
+ auto status = mTuner->openLnbById(id, &lnb);
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id);
}
- *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id);
- return Status::ok();
+ return status;
}
-Status TunerService::openLnbByName(const string& lnbName, shared_ptr<ITunerLnb>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openLnbByName(const string& lnbName,
+ shared_ptr<ITunerLnb>* _aidl_return) {
if (!hasITuner()) {
ALOGE("get ITuner failed");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- int lnbId;
- Result status;
- sp<ILnb> lnb;
- mTuner->openLnbByName(lnbName, [&](Result r, LnbId id, const sp<ILnb>& lnbSp) {
- status = r;
- lnb = lnbSp;
- lnbId = (int)id;
- });
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ vector<int32_t> id;
+ shared_ptr<ILnb> lnb;
+ auto status = mTuner->openLnbByName(lnbName, &id, &lnb);
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, id[0]);
}
- *_aidl_return = ::ndk::SharedRefBase::make<TunerLnb>(lnb, lnbId);
- return Status::ok();
+ return ::ndk::ScopedAStatus::ok();
}
-Status TunerService::openDescrambler(int32_t /*descramblerHandle*/,
- std::shared_ptr<ITunerDescrambler>* _aidl_return) {
+::ndk::ScopedAStatus TunerService::openDescrambler(int32_t /*descramblerHandle*/,
+ shared_ptr<ITunerDescrambler>* _aidl_return) {
if (!hasITuner()) {
ALOGD("get ITuner failed");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status;
- sp<IDescrambler> descrambler;
- //int id = getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
- mTuner->openDescrambler([&](Result r, const sp<IDescrambler>& descramblerSp) {
- status = r;
- descrambler = descramblerSp;
- });
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ shared_ptr<IDescrambler> descrambler;
+ // int id = TunerHelper::getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
+ auto status = mTuner->openDescrambler(&descrambler);
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerDescrambler>(descrambler);
}
- *_aidl_return = ::ndk::SharedRefBase::make<TunerDescrambler>(descrambler);
- return Status::ok();
+ return status;
+}
+
+::ndk::ScopedAStatus TunerService::getTunerHalVersion(int* _aidl_return) {
+ hasITuner();
+ *_aidl_return = mTunerVersion;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerService::openSharedFilter(const string& in_filterToken,
+ const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGE("get ITuner failed");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (!PermissionCache::checkCallingPermission(sSharedFilterPermission)) {
+ ALOGE("Request requires android.permission.ACCESS_TV_SHARED_FILTER");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Mutex::Autolock _l(mSharedFiltersLock);
+ if (mSharedFilters.find(in_filterToken) == mSharedFilters.end()) {
+ *_aidl_return = nullptr;
+ ALOGD("fail to find %s", in_filterToken.c_str());
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ shared_ptr<TunerFilter> filter = mSharedFilters.at(in_filterToken);
+ IPCThreadState* ipc = IPCThreadState::self();
+ const int pid = ipc->getCallingPid();
+ if (!filter->isSharedFilterAllowed(pid)) {
+ *_aidl_return = nullptr;
+ ALOGD("shared filter %s is opened in the same process", in_filterToken.c_str());
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ filter->attachSharedFilterCallback(in_cb);
+
+ *_aidl_return = filter;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+string TunerService::addFilterToShared(const shared_ptr<TunerFilter>& sharedFilter) {
+ Mutex::Autolock _l(mSharedFiltersLock);
+
+ // Use sharedFilter address as token.
+ string token = to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get()));
+ mSharedFilters[token] = sharedFilter;
+ return token;
+}
+
+void TunerService::removeSharedFilter(const shared_ptr<TunerFilter>& sharedFilter) {
+ Mutex::Autolock _l(mSharedFiltersLock);
+
+ // Use sharedFilter address as token.
+ mSharedFilters.erase(to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get())));
}
void TunerService::updateTunerResources() {
- if (!hasITuner() || mTunerResourceManager == NULL) {
+ if (!hasITuner()) {
ALOGE("Failed to updateTunerResources");
return;
}
- updateFrontendResources();
- updateLnbResources();
- // TODO: update Demux, Descrambler.
+ TunerHelper::updateTunerResources(getTRMFrontendInfos(), getTRMLnbHandles());
}
-Status TunerService::getTunerHalVersion(int* _aidl_return) {
- hasITuner();
- *_aidl_return = mTunerVersion;
- return Status::ok();
-}
-
-void TunerService::updateFrontendResources() {
- hidl_vec<FrontendId> ids;
- Result res = getHidlFrontendIds(ids);
- if (res != Result::SUCCESS) {
- return;
- }
+vector<TunerFrontendInfo> TunerService::getTRMFrontendInfos() {
vector<TunerFrontendInfo> infos;
+ vector<int32_t> ids;
+ auto status = mTuner->getFrontendIds(&ids);
+ if (!status.isOk()) {
+ return infos;
+ }
+
for (int i = 0; i < ids.size(); i++) {
FrontendInfo frontendInfo;
- Result res = getHidlFrontendInfo((int)ids[i], frontendInfo);
- if (res != Result::SUCCESS) {
+ auto res = mTuner->getFrontendInfo(ids[i], &frontendInfo);
+ if (!res.isOk()) {
continue;
}
TunerFrontendInfo tunerFrontendInfo{
- .handle = getResourceHandleFromId((int)ids[i], FRONTEND),
- .type = static_cast<int>(frontendInfo.type),
- .exclusiveGroupId = static_cast<int>(frontendInfo.exclusiveGroupId),
+ .handle = TunerHelper::getResourceHandleFromId((int)ids[i], FRONTEND),
+ .type = static_cast<int>(frontendInfo.type),
+ .exclusiveGroupId = frontendInfo.exclusiveGroupId,
};
infos.push_back(tunerFrontendInfo);
}
- mTunerResourceManager->setFrontendInfoList(infos);
+
+ return infos;
}
-void TunerService::updateLnbResources() {
- vector<int> handles = getLnbHandles();
- if (handles.size() == 0) {
- return;
- }
- mTunerResourceManager->setLnbInfoList(handles);
-}
-
-vector<int> TunerService::getLnbHandles() {
- vector<int> lnbHandles;
- if (mTuner != NULL) {
- Result res;
- vector<LnbId> lnbIds;
- mTuner->getLnbIds([&](Result r, const hardware::hidl_vec<LnbId>& ids) {
- lnbIds = ids;
- res = r;
- });
- if (res != Result::SUCCESS || lnbIds.size() == 0) {
- } else {
+vector<int32_t> TunerService::getTRMLnbHandles() {
+ vector<int32_t> lnbHandles;
+ if (mTuner != nullptr) {
+ vector<int32_t> lnbIds;
+ auto res = mTuner->getLnbIds(&lnbIds);
+ if (res.isOk()) {
for (int i = 0; i < lnbIds.size(); i++) {
- lnbHandles.push_back(getResourceHandleFromId((int)lnbIds[i], LNB));
+ lnbHandles.push_back(TunerHelper::getResourceHandleFromId(lnbIds[i], LNB));
}
}
}
@@ -384,186 +325,8 @@
return lnbHandles;
}
-Result TunerService::getHidlFrontendIds(hidl_vec<FrontendId>& ids) {
- if (mTuner == NULL) {
- return Result::NOT_INITIALIZED;
- }
- Result res;
- mTuner->getFrontendIds([&](Result r, const hidl_vec<FrontendId>& frontendIds) {
- ids = frontendIds;
- res = r;
- });
- return res;
-}
-
-Result TunerService::getHidlFrontendInfo(int id, FrontendInfo& info) {
- if (mTuner == NULL) {
- return Result::NOT_INITIALIZED;
- }
- Result res;
- mTuner->getFrontendInfo(id, [&](Result r, const FrontendInfo& feInfo) {
- info = feInfo;
- res = r;
- });
- return res;
-}
-
-TunerDemuxCapabilities TunerService::getAidlDemuxCaps(DemuxCapabilities caps) {
- TunerDemuxCapabilities aidlCaps{
- .numDemux = (int)caps.numDemux,
- .numRecord = (int)caps.numRecord,
- .numPlayback = (int)caps.numPlayback,
- .numTsFilter = (int)caps.numTsFilter,
- .numSectionFilter = (int)caps.numSectionFilter,
- .numAudioFilter = (int)caps.numAudioFilter,
- .numVideoFilter = (int)caps.numVideoFilter,
- .numPesFilter = (int)caps.numPesFilter,
- .numPcrFilter = (int)caps.numPcrFilter,
- .numBytesInSectionFilter = (int)caps.numBytesInSectionFilter,
- .filterCaps = (int)caps.filterCaps,
- .bTimeFilter = caps.bTimeFilter,
- };
- aidlCaps.linkCaps.resize(caps.linkCaps.size());
- copy(caps.linkCaps.begin(), caps.linkCaps.end(), aidlCaps.linkCaps.begin());
- return aidlCaps;
-}
-
-TunerFrontendInfo TunerService::convertToAidlFrontendInfo(FrontendInfo halInfo) {
- TunerFrontendInfo info{
- .type = (int)halInfo.type,
- .minFrequency = (int)halInfo.minFrequency,
- .maxFrequency = (int)halInfo.maxFrequency,
- .minSymbolRate = (int)halInfo.minSymbolRate,
- .maxSymbolRate = (int)halInfo.maxSymbolRate,
- .acquireRange = (int)halInfo.acquireRange,
- .exclusiveGroupId = (int)halInfo.exclusiveGroupId,
- };
- for (int i = 0; i < halInfo.statusCaps.size(); i++) {
- info.statusCaps.push_back((int)halInfo.statusCaps[i]);
- }
-
- TunerFrontendCapabilities caps;
- switch (halInfo.type) {
- case FrontendType::ANALOG: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendAnalogCapabilities analogCaps{
- .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
- .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
- };
- caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
- }
- break;
- }
- case FrontendType::ATSC: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendAtscCapabilities atscCaps{
- .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
- };
- caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
- }
- break;
- }
- case FrontendType::ATSC3: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendAtsc3Capabilities atsc3Caps{
- .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
- .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
- .timeInterleaveModeCap =
- (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
- .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
- .demodOutputFormatCap
- = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
- .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
- };
- caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
- }
- break;
- }
- case FrontendType::DVBC: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendCableCapabilities cableCaps{
- .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
- .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
- .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
- };
- caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
- }
- break;
- }
- case FrontendType::DVBS: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendDvbsCapabilities dvbsCaps{
- .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
- .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
- .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
- };
- caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
- }
- break;
- }
- case FrontendType::DVBT: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendDvbtCapabilities dvbtCaps{
- .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
- .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
- .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
- .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
- .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
- .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
- .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
- .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
- };
- caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
- }
- break;
- }
- case FrontendType::ISDBS: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendIsdbsCapabilities isdbsCaps{
- .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
- };
- caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
- }
- break;
- }
- case FrontendType::ISDBS3: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendIsdbs3Capabilities isdbs3Caps{
- .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
- };
- caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
- }
- break;
- }
- case FrontendType::ISDBT: {
- if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps
- == halInfo.frontendCaps.getDiscriminator()) {
- TunerFrontendIsdbtCapabilities isdbtCaps{
- .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
- .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
- .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
- .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
- };
- caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
- }
- break;
- }
- default:
- break;
- }
-
- info.caps = caps;
- return info;
-}
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerService.h b/services/tuner/TunerService.h
index f8e2ee6..7bf50b6 100644
--- a/services/tuner/TunerService.h
+++ b/services/tuner/TunerService.h
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,141 +17,89 @@
#ifndef ANDROID_MEDIA_TUNERSERVICE_H
#define ANDROID_MEDIA_TUNERSERVICE_H
-#include <aidl/android/media/tv/tunerresourcemanager/ITunerResourceManager.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/ITuner.h>
#include <aidl/android/media/tv/tuner/BnTunerService.h>
-#include <android/hardware/tv/tuner/1.1/ITuner.h>
-#include <fmq/AidlMessageQueue.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <utils/Mutex.h>
-using ::aidl::android::hardware::common::fmq::GrantorDescriptor;
-using ::aidl::android::hardware::common::fmq::MQDescriptor;
-using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+#include <map>
+
+#include "TunerFilter.h"
+#include "TunerHelper.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendInfo;
+using ::aidl::android::hardware::tv::tuner::ITuner;
using ::aidl::android::media::tv::tuner::BnTunerService;
using ::aidl::android::media::tv::tuner::ITunerDemux;
-using ::aidl::android::media::tv::tuner::ITunerDescrambler;
+using ::aidl::android::media::tv::tuner::ITunerFilter;
+using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
using ::aidl::android::media::tv::tuner::ITunerFrontend;
using ::aidl::android::media::tv::tuner::ITunerLnb;
-using ::aidl::android::media::tv::tuner::TunerDemuxCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendDtmbCapabilities;
-using ::aidl::android::media::tv::tuner::TunerFrontendInfo;
-using ::aidl::android::media::tv::tunerresourcemanager::ITunerResourceManager;
-
-using ::android::hardware::details::logError;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::kSynchronizedReadWrite;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::tv::tuner::V1_0::DemuxCapabilities;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
-using ::android::hardware::tv::tuner::V1_0::FrontendId;
-using ::android::hardware::tv::tuner::V1_0::FrontendInfo;
-using ::android::hardware::tv::tuner::V1_0::IDemux;
-using ::android::hardware::tv::tuner::V1_0::IDescrambler;
-using ::android::hardware::tv::tuner::V1_0::IFilter;
-using ::android::hardware::tv::tuner::V1_0::IFilterCallback;
-using ::android::hardware::tv::tuner::V1_0::ITuner;
-using ::android::hardware::tv::tuner::V1_0::Result;
-
-using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::Mutex;
using namespace std;
+namespace aidl {
namespace android {
-
-const static int TUNER_HAL_VERSION_UNKNOWN = 0;
-const static int TUNER_HAL_VERSION_1_0 = 1 << 16;
-const static int TUNER_HAL_VERSION_1_1 = (1 << 16) | 1;
-// System Feature defined in PackageManager
-static const ::android::String16 FEATURE_TUNER(::android::String16("android.hardware.tv.tuner"));
-
-typedef enum {
- FRONTEND,
- LNB,
- DEMUX,
- DESCRAMBLER,
-} TunerResourceType;
-
-struct FilterCallback : public IFilterCallback {
- ~FilterCallback() {}
- Return<void> onFilterEvent(const DemuxFilterEvent&) {
- return Void();
- }
- Return<void> onFilterStatus(const DemuxFilterStatus) {
- return Void();
- }
-};
+namespace media {
+namespace tv {
+namespace tuner {
class TunerService : public BnTunerService {
- typedef AidlMessageQueue<int8_t, SynchronizedReadWrite> AidlMessageQueue;
- typedef MessageQueue<uint8_t, kSynchronizedReadWrite> HidlMessageQueue;
- typedef MQDescriptor<int8_t, SynchronizedReadWrite> AidlMQDesc;
-
public:
static char const *getServiceName() { return "media.tuner"; }
static binder_status_t instantiate();
TunerService();
virtual ~TunerService();
- Status getFrontendIds(vector<int32_t>* ids) override;
- Status getFrontendInfo(int32_t id, TunerFrontendInfo* _aidl_return) override;
- Status getFrontendDtmbCapabilities(
- int32_t id, TunerFrontendDtmbCapabilities* _aidl_return) override;
- Status openFrontend(
- int32_t frontendHandle, shared_ptr<ITunerFrontend>* _aidl_return) override;
- Status openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) override;
- Status openLnbByName(const string& lnbName, shared_ptr<ITunerLnb>* _aidl_return) override;
- Status openDemux(int32_t demuxHandle, std::shared_ptr<ITunerDemux>* _aidl_return) override;
- Status getDemuxCaps(TunerDemuxCapabilities* _aidl_return) override;
- Status openDescrambler(int32_t descramblerHandle,
- std::shared_ptr<ITunerDescrambler>* _aidl_return) override;
- Status getTunerHalVersion(int* _aidl_return) override;
+ ::ndk::ScopedAStatus getFrontendIds(vector<int32_t>* out_ids) override;
+ ::ndk::ScopedAStatus getFrontendInfo(int32_t in_frontendHandle,
+ FrontendInfo* _aidl_return) override;
+ ::ndk::ScopedAStatus openFrontend(int32_t in_frontendHandle,
+ shared_ptr<ITunerFrontend>* _aidl_return) override;
+ ::ndk::ScopedAStatus openLnb(int32_t in_lnbHandle,
+ shared_ptr<ITunerLnb>* _aidl_return) override;
+ ::ndk::ScopedAStatus openLnbByName(const string& in_lnbName,
+ shared_ptr<ITunerLnb>* _aidl_return) override;
+ ::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
+ shared_ptr<ITunerDemux>* _aidl_return) override;
+ ::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+ ::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
+ shared_ptr<ITunerDescrambler>* _aidl_return) override;
+ ::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus openSharedFilter(const string& in_filterToken,
+ const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) override;
- // TODO: create a map between resource id and handles.
- static int getResourceIdFromHandle(int resourceHandle, int /*type*/) {
- return (resourceHandle & 0x00ff0000) >> 16;
- }
+ string addFilterToShared(const shared_ptr<TunerFilter>& sharedFilter);
+ void removeSharedFilter(const shared_ptr<TunerFilter>& sharedFilter);
- int getResourceHandleFromId(int id, int resourceType) {
- // TODO: build up randomly generated id to handle mapping
- return (resourceType & 0x000000ff) << 24
- | (id << 16)
- | (mResourceRequestCount++ & 0xffff);
- }
+ static shared_ptr<TunerService> getTunerService();
private:
bool hasITuner();
- bool hasITuner_1_1();
void updateTunerResources();
+ vector<TunerFrontendInfo> getTRMFrontendInfos();
+ vector<int32_t> getTRMLnbHandles();
- void updateFrontendResources();
- void updateLnbResources();
- Result getHidlFrontendIds(hidl_vec<FrontendId>& ids);
- Result getHidlFrontendInfo(int id, FrontendInfo& info);
- vector<int> getLnbHandles();
-
- TunerDemuxCapabilities getAidlDemuxCaps(DemuxCapabilities caps);
- TunerFrontendInfo convertToAidlFrontendInfo(FrontendInfo halInfo);
-
- sp<ITuner> mTuner;
- sp<::android::hardware::tv::tuner::V1_1::ITuner> mTuner_1_1;
-
- shared_ptr<ITunerResourceManager> mTunerResourceManager;
- int mResourceRequestCount = 0;
-
+ shared_ptr<ITuner> mTuner;
int mTunerVersion = TUNER_HAL_VERSION_UNKNOWN;
+ Mutex mSharedFiltersLock;
+ map<string, shared_ptr<TunerFilter>> mSharedFilters;
+
+ static shared_ptr<TunerService> sTunerService;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERSERVICE_H
diff --git a/services/tuner/TunerTimeFilter.cpp b/services/tuner/TunerTimeFilter.cpp
index ea9da30..73cd6b4 100644
--- a/services/tuner/TunerTimeFilter.cpp
+++ b/services/tuner/TunerTimeFilter.cpp
@@ -18,97 +18,91 @@
#include "TunerTimeFilter.h"
-using ::android::hardware::tv::tuner::V1_0::Result;
-using ::android::hardware::tv::tuner::V1_1::Constant64Bit;
+#include <aidl/android/hardware/tv/tuner/Constant64Bit.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+using ::aidl::android::hardware::tv::tuner::Constant64Bit;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
-TunerTimeFilter::TunerTimeFilter(sp<ITimeFilter> timeFilter) {
+TunerTimeFilter::TunerTimeFilter(shared_ptr<ITimeFilter> timeFilter) {
mTimeFilter = timeFilter;
}
TunerTimeFilter::~TunerTimeFilter() {
- mTimeFilter = NULL;
+ mTimeFilter = nullptr;
}
-Status TunerTimeFilter::setTimeStamp(int64_t timeStamp) {
- if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::setTimeStamp(int64_t timeStamp) {
+ if (mTimeFilter == nullptr) {
ALOGE("ITimeFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mTimeFilter->setTimeStamp(timeStamp);
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ return mTimeFilter->setTimeStamp(timeStamp);
}
-Status TunerTimeFilter::clearTimeStamp() {
- if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::clearTimeStamp() {
+ if (mTimeFilter == nullptr) {
ALOGE("ITimeFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status = mTimeFilter->clearTimeStamp();
- if (status != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
- }
- return Status::ok();
+ return mTimeFilter->clearTimeStamp();
}
-Status TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
- if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
+ if (mTimeFilter == nullptr) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
ALOGE("ITimeFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status;
- mTimeFilter->getSourceTime(
- [&](Result r, uint64_t t) {
- status = r;
- *_aidl_return = t;
- });
- if (status != Result::SUCCESS) {
+ auto status = mTimeFilter->getSourceTime(_aidl_return);
+ if (!status.isOk()) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
- return Status::ok();
+ return status;
}
-Status TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
- if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
+ if (mTimeFilter == nullptr) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
ALOGE("ITimeFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result status;
- mTimeFilter->getTimeStamp(
- [&](Result r, uint64_t t) {
- status = r;
- *_aidl_return = t;
- });
- if (status != Result::SUCCESS) {
+ auto status = mTimeFilter->getTimeStamp(_aidl_return);
+ if (!status.isOk()) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
- return Status::ok();
+ return status;
}
-Status TunerTimeFilter::close() {
- if (mTimeFilter == NULL) {
+::ndk::ScopedAStatus TunerTimeFilter::close() {
+ if (mTimeFilter == nullptr) {
ALOGE("ITimeFilter is not initialized");
- return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mTimeFilter->close();
- mTimeFilter = NULL;
+ auto status = mTimeFilter->close();
+ mTimeFilter = nullptr;
- if (res != Result::SUCCESS) {
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
- }
- return Status::ok();
+ return status;
}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
} // namespace android
+} // namespace aidl
diff --git a/services/tuner/TunerTimeFilter.h b/services/tuner/TunerTimeFilter.h
index d675319..31a47cd 100644
--- a/services/tuner/TunerTimeFilter.h
+++ b/services/tuner/TunerTimeFilter.h
@@ -17,38 +17,40 @@
#ifndef ANDROID_MEDIA_TUNERFTIMEFILTER_H
#define ANDROID_MEDIA_TUNERFTIMEFILTER_H
+#include <aidl/android/hardware/tv/tuner/ITimeFilter.h>
#include <aidl/android/media/tv/tuner/BnTunerTimeFilter.h>
-#include <android/hardware/tv/tuner/1.0/ITimeFilter.h>
-#include <android/hardware/tv/tuner/1.1/types.h>
-#include <media/stagefright/foundation/ADebug.h>
#include <utils/Log.h>
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::tv::tuner::BnTunerTimeFilter;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+using ::aidl::android::hardware::tv::tuner::ITimeFilter;
using namespace std;
+namespace aidl {
namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
class TunerTimeFilter : public BnTunerTimeFilter {
public:
- TunerTimeFilter(sp<ITimeFilter> timeFilter);
+ TunerTimeFilter(shared_ptr<ITimeFilter> timeFilter);
virtual ~TunerTimeFilter();
- Status setTimeStamp(int64_t timeStamp) override;
- Status clearTimeStamp() override;
- Status getSourceTime(int64_t* _aidl_return) override;
- Status getTimeStamp(int64_t* _aidl_return) override;
- Status close() override;
+
+ ::ndk::ScopedAStatus setTimeStamp(int64_t in_timeStamp) override;
+ ::ndk::ScopedAStatus clearTimeStamp() override;
+ ::ndk::ScopedAStatus getSourceTime(int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getTimeStamp(int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus close() override;
private:
- sp<ITimeFilter> mTimeFilter;
+ shared_ptr<ITimeFilter> mTimeFilter;
};
-} // namespace android
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
#endif // ANDROID_MEDIA_TUNERFTIMEFILTER_H
diff --git a/services/tuner/aidl/android/media/tv/OWNERS b/services/tuner/aidl/android/media/tv/OWNERS
index 0ceb8e8..bf9fe34 100644
--- a/services/tuner/aidl/android/media/tv/OWNERS
+++ b/services/tuner/aidl/android/media/tv/OWNERS
@@ -1,2 +1,2 @@
-nchalko@google.com
+hgchen@google.com
quxiangfang@google.com
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
index 73b00ae..fa326b2 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
@@ -16,6 +16,8 @@
package android.media.tv.tuner;
+import android.hardware.tv.tuner.DemuxFilterType;
+import android.hardware.tv.tuner.DvrType;
import android.media.tv.tuner.ITunerDvr;
import android.media.tv.tuner.ITunerDvrCallback;
import android.media.tv.tuner.ITunerFilter;
@@ -36,10 +38,15 @@
void setFrontendDataSource(in ITunerFrontend frontend);
/**
+ * Set a frontend resource by ID as data input of the demux
+ */
+ void setFrontendDataSourceById(in int frontendId);
+
+ /**
* Open a new filter in the demux
*/
- ITunerFilter openFilter(
- in int mainType, in int subtype, in int bufferSize, in ITunerFilterCallback cb);
+ ITunerFilter openFilter(in DemuxFilterType type, in int bufferSize,
+ in ITunerFilterCallback cb);
/**
* Open time filter of the demux.
@@ -59,7 +66,7 @@
/**
* Open a DVR (Digital Video Record) instance in the demux.
*/
- ITunerDvr openDvr(in int dvbType, in int bufferSize, in ITunerDvrCallback cb);
+ ITunerDvr openDvr(in DvrType dvbType, in int bufferSize, in ITunerDvrCallback cb);
/**
* Connect Conditional Access Modules (CAM) through Common Interface (CI).
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
index 7370eee..39d193c 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDescrambler.aidl
@@ -16,9 +16,9 @@
package android.media.tv.tuner;
+import android.hardware.tv.tuner.DemuxPid;
import android.media.tv.tuner.ITunerDemux;
import android.media.tv.tuner.ITunerFilter;
-import android.media.tv.tuner.TunerDemuxPid;
/**
* Tuner Demux interface handles tuner related operations.
@@ -39,12 +39,12 @@
/**
* Add packets' PID to the descrambler for descrambling.
*/
- void addPid(in TunerDemuxPid pid, in ITunerFilter optionalSourceFilter);
+ void addPid(in DemuxPid pid, in ITunerFilter optionalSourceFilter);
/**
* Remove packets' PID from the descrambler.
*/
- void removePid(in TunerDemuxPid pid, in ITunerFilter optionalSourceFilter);
+ void removePid(in DemuxPid pid, in ITunerFilter optionalSourceFilter);
/**
* Close a new interface of ITunerDescrambler.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
index 8f1601b..2c01c4e 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDvr.aidl
@@ -18,8 +18,8 @@
import android.hardware.common.fmq.MQDescriptor;
import android.hardware.common.fmq.SynchronizedReadWrite;
+import android.hardware.tv.tuner.DvrSettings;
import android.media.tv.tuner.ITunerFilter;
-import android.media.tv.tuner.TunerDvrSettings;
/**
* Tuner Dvr interface handles tuner related operations.
@@ -35,7 +35,7 @@
/**
* Configure the DVR.
*/
- void configure(in TunerDvrSettings settings);
+ void configure(in DvrSettings settings);
/**
* Attach one filter to DVR interface for recording.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
index e234fe5..3043d24 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDvrCallback.aidl
@@ -16,6 +16,9 @@
package android.media.tv.tuner;
+import android.hardware.tv.tuner.PlaybackStatus;
+import android.hardware.tv.tuner.RecordStatus;
+
/**
* TunerDvrCallback interface handles tuner dvr related callbacks.
*
@@ -25,10 +28,10 @@
/**
* Notify the client a new status of the demux's record.
*/
- void onRecordStatus(in int status);
+ void onRecordStatus(in RecordStatus status);
/**
* Notify the client a new status of the demux's playback.
*/
- void onPlaybackStatus(in int status);
+ void onPlaybackStatus(in PlaybackStatus status);
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
index 10d4c3b..dc40f03 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
@@ -19,8 +19,11 @@
import android.hardware.common.fmq.MQDescriptor;
import android.hardware.common.fmq.SynchronizedReadWrite;
import android.hardware.common.NativeHandle;
-import android.media.tv.tuner.TunerFilterConfiguration;
-import android.media.tv.tuner.TunerFilterSharedHandleInfo;
+import android.hardware.tv.tuner.DemuxFilterSettings;
+import android.hardware.tv.tuner.DemuxFilterType;
+import android.hardware.tv.tuner.AvStreamType;
+import android.hardware.tv.tuner.DemuxFilterMonitorEventType;
+import android.hardware.tv.tuner.FilterDelayHint;
/**
* Tuner Filter interface handles tuner related operations.
@@ -46,12 +49,12 @@
/**
* Configure the filter.
*/
- void configure(in TunerFilterConfiguration config);
+ void configure(in DemuxFilterSettings settings);
/**
* Configure the monitor event of the Filter.
*/
- void configureMonitorEvent(in int monitorEventType);
+ void configureMonitorEvent(in int monitorEventTypes);
/**
* Configure the context id of the IP Filter.
@@ -61,12 +64,12 @@
/**
* Configure the stream type of the media Filter.
*/
- void configureAvStreamType(in int avStreamType);
+ void configureAvStreamType(in AvStreamType avStreamType);
/**
* Get the a/v shared memory handle
*/
- TunerFilterSharedHandleInfo getAvSharedHandleInfo();
+ long getAvSharedHandle(out NativeHandle avMemory);
/**
* Release the handle reported by the HAL for AV memory.
@@ -97,4 +100,28 @@
* Close the filter.
*/
void close();
+
+ /**
+ * Acquire a new SharedFilter token.
+ *
+ * @return a token of the newly created SharedFilter instance.
+ */
+ String acquireSharedFilterToken();
+
+ /**
+ * Free a SharedFilter token.
+ *
+ * @param filterToken the SharedFilter token will be released.
+ * @return a token of the newly created SharedFilter instance.
+ */
+ void freeSharedFilterToken(in String filterToken);
+
+ /**
+ * Get filter type.
+ *
+ * @return filter type.
+ */
+ DemuxFilterType getFilterType();
+
+ void setDelayHint(in FilterDelayHint hint);
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
index e7a52a7..6c53042 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
@@ -16,7 +16,8 @@
package android.media.tv.tuner;
-import android.media.tv.tuner.TunerFilterEvent;
+import android.hardware.tv.tuner.DemuxFilterEvent;
+import android.hardware.tv.tuner.DemuxFilterStatus;
/**
* TunerFilterCallback interface handles tuner filter related callbacks.
@@ -27,10 +28,10 @@
/**
* Notify the client a new status of a filter.
*/
- void onFilterStatus(int status);
+ void onFilterStatus(in DemuxFilterStatus status);
/**
* Notify the client that a new filter event happened.
*/
- void onFilterEvent(in TunerFilterEvent[] filterEvent);
+ void onFilterEvent(in DemuxFilterEvent[] events);
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
index ef0255a..771a647 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
@@ -1,5 +1,5 @@
/**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,10 +16,12 @@
package android.media.tv.tuner;
+import android.hardware.tv.tuner.FrontendScanType;
+import android.hardware.tv.tuner.FrontendSettings;
+import android.hardware.tv.tuner.FrontendStatus;
+import android.hardware.tv.tuner.FrontendStatusType;
import android.media.tv.tuner.ITunerFrontendCallback;
import android.media.tv.tuner.ITunerLnb;
-import android.media.tv.tuner.TunerFrontendSettings;
-import android.media.tv.tuner.TunerFrontendStatus;
/**
* Tuner Frontend interface handles frontend related operations.
@@ -39,7 +41,7 @@
*
* @param settings the settings to tune with.
*/
- void tune(in TunerFrontendSettings settings);
+ void tune(in FrontendSettings settings);
/**
* Stop the previous tuning.
@@ -52,7 +54,7 @@
* @param settings the settings to scan with.
* @param frontendScanType scan with given type.
*/
- void scan(in TunerFrontendSettings settings, in int frontendScanType);
+ void scan(in FrontendSettings settings, in FrontendScanType frontendScanType);
/**
* Stop the previous scanning.
@@ -93,12 +95,7 @@
/**
* Gets the statuses of the frontend.
*/
- TunerFrontendStatus[] getStatus(in int[] statusTypes);
-
- /**
- * Gets the 1.1 extended statuses of the frontend.
- */
- TunerFrontendStatus[] getStatusExtended_1_1(in int[] statusTypes);
+ FrontendStatus[] getStatus(in FrontendStatusType[] statusTypes);
/**
* Gets the id of the frontend.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
index c92f5ee..d0ab11d 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontendCallback.aidl
@@ -1,5 +1,5 @@
/**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,7 +16,9 @@
package android.media.tv.tuner;
-import android.media.tv.tuner.TunerFrontendScanMessage;
+import android.hardware.tv.tuner.FrontendEventType;
+import android.hardware.tv.tuner.FrontendScanMessage;
+import android.hardware.tv.tuner.FrontendScanMessageType;
/**
* TunerFrontendCallback interface handles tuner frontend related callbacks.
@@ -24,13 +26,14 @@
* {@hide}
*/
interface ITunerFrontendCallback {
- /**
+ /**
* Notify the client that a new event happened on the frontend.
*/
- void onEvent(in int frontendEventType);
+ void onEvent(in FrontendEventType frontendEventType);
/**
* notify the client of scan messages.
*/
- void onScanMessage(in int messageType, in TunerFrontendScanMessage message);
+ void onScanMessage(in FrontendScanMessageType messageType,
+ in FrontendScanMessage message);
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
index d62145e..79f0761 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerLnb.aidl
@@ -16,6 +16,9 @@
package android.media.tv.tuner;
+import android.hardware.tv.tuner.LnbPosition;
+import android.hardware.tv.tuner.LnbTone;
+import android.hardware.tv.tuner.LnbVoltage;
import android.media.tv.tuner.ITunerLnbCallback;
/**
@@ -32,17 +35,17 @@
/**
* Set the lnb's power voltage.
*/
- void setVoltage(in int voltage);
+ void setVoltage(in LnbVoltage voltage);
/**
* Set the lnb's tone mode.
*/
- void setTone(in int tone);
+ void setTone(in LnbTone tone);
/**
* Select the lnb's position.
*/
- void setSatellitePosition(in int position);
+ void setSatellitePosition(in LnbPosition position);
/**
* Sends DiSEqC (Digital Satellite Equipment Control) message.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
index 117352f..2b6eb5f 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerLnbCallback.aidl
@@ -16,6 +16,8 @@
package android.media.tv.tuner;
+import android.hardware.tv.tuner.LnbEventType;
+
/**
* TuneLnbCallback interface handles tuner lnb related callbacks.
*
@@ -25,7 +27,7 @@
/**
* Notify the client that a new event happened on the Lnb.
*/
- void onEvent(in int lnbEventType);
+ void onEvent(in LnbEventType lnbEventType);
/**
* notify the client of new DiSEqC message.
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
index 755b152..e6a1a5c 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,16 +16,14 @@
package android.media.tv.tuner;
-import android.hardware.common.fmq.MQDescriptor;
-import android.hardware.common.fmq.SynchronizedReadWrite;
-import android.hardware.common.fmq.UnsynchronizedWrite;
+import android.hardware.tv.tuner.DemuxCapabilities;
+import android.hardware.tv.tuner.FrontendInfo;
import android.media.tv.tuner.ITunerDemux;
import android.media.tv.tuner.ITunerDescrambler;
+import android.media.tv.tuner.ITunerFilter;
+import android.media.tv.tuner.ITunerFilterCallback;
import android.media.tv.tuner.ITunerFrontend;
import android.media.tv.tuner.ITunerLnb;
-import android.media.tv.tuner.TunerDemuxCapabilities;
-import android.media.tv.tuner.TunerFrontendDtmbCapabilities;
-import android.media.tv.tuner.TunerFrontendInfo;
/**
* TunerService interface handles tuner related operations.
@@ -33,8 +31,8 @@
* {@hide}
*/
//@VintfStability
+@SuppressWarnings(value={"out-array"})
interface ITunerService {
-
/**
* Gets frontend IDs.
*/
@@ -43,15 +41,10 @@
/**
* Retrieve the frontend's information.
*
- * @param frontendHandle the handle of the frontend granted by TRM.
+ * @param frontendId the ID of the frontend.
* @return the information of the frontend.
*/
- TunerFrontendInfo getFrontendInfo(in int frontendHandle);
-
- /**
- * Get Dtmb Frontend Capabilities.
- */
- TunerFrontendDtmbCapabilities getFrontendDtmbCapabilities(in int id);
+ FrontendInfo getFrontendInfo(in int frontendId);
/**
* Open a Tuner Frontend interface.
@@ -87,7 +80,7 @@
*
* @return the demux’s capabilities.
*/
- TunerDemuxCapabilities getDemuxCaps();
+ DemuxCapabilities getDemuxCaps();
/* Open a new interface of ITunerDescrambler given a descramblerHandle.
*
@@ -102,4 +95,13 @@
* value is unknown version 0.
*/
int getTunerHalVersion();
+
+ /**
+ * Open a new SharedFilter instance of ITunerFilter.
+ *
+ * @param filterToken the SharedFilter token created by ITunerFilter.
+ * @param cb the ITunerFilterCallback used to receive callback events
+ * @return a newly created ITunerFilter interface.
+ */
+ ITunerFilter openSharedFilter(in String filterToken, in ITunerFilterCallback cb);
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
deleted file mode 100644
index df3374a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Extra Meta Data from AD (Audio Descriptor) according to ETSI TS 101 154 V2.1.1.
- *
- * {@hide}
- */
-parcelable TunerAudioExtraMetaData {
- byte adFade;
-
- byte adPan;
-
- byte versionTextTag;
-
- byte adGainCenter;
-
- byte adGainFront;
-
- byte adGainSurround;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
deleted file mode 100644
index 71ab151..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Tuner Demux capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerDemuxCapabilities {
- int numDemux;
-
- int numRecord;
-
- int numPlayback;
-
- int numTsFilter;
-
- int numSectionFilter;
-
- int numAudioFilter;
-
- int numVideoFilter;
-
- int numPesFilter;
-
- int numPcrFilter;
-
- int numBytesInSectionFilter;
-
- int filterCaps;
-
- int[] linkCaps;
-
- boolean bTimeFilter;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
deleted file mode 100644
index b65f404..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Demux ip address configure.
- *
- * {@hide}
- */
-parcelable TunerDemuxIpAddress {
- boolean isIpV6;
-
- byte[] addr;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
deleted file mode 100644
index b244388..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerDemuxIpAddress;
-
-/**
- * Filter Settings for an Ip filter.
- *
- * {@hide}
- */
-parcelable TunerDemuxIpAddressSettings {
- TunerDemuxIpAddress srcIpAddress;
-
- TunerDemuxIpAddress dstIpAddress;
-
- char srcPort;
-
- char dstPort;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
deleted file mode 100644
index 8b238b6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Demux PID interface.
- *
- * {@hide}
- */
-union TunerDemuxPid {
- char tPid;
-
- char mmtpPid;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl
deleted file mode 100644
index 4ec4d75..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDvrSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvr Settings interface.
- *
- * {@hide}
- */
-parcelable TunerDvrSettings {
- int statusMask;
-
- int lowThreshold;
-
- int highThreshold;
-
- int dataFormat;
-
- int packetSize;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
deleted file mode 100644
index 4c9e3af..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for an ALP filter.
- *
- * {@hide}
- */
-parcelable TunerFilterAlpConfiguration {
- byte packetType;
-
- byte lengthType;
-
- TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl
deleted file mode 100644
index 6bf88f0..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAvSettings.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for a Video and Audio.
- *
- * {@hide}
- */
-parcelable TunerFilterAvSettings {
- /**
- * true if the filter output goes to decoder directly in pass through mode.
- */
- boolean isPassthrough;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
deleted file mode 100644
index 808cfd1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterAlpConfiguration;
-import android.media.tv.tuner.TunerFilterIpConfiguration;
-import android.media.tv.tuner.TunerFilterMmtpConfiguration;
-import android.media.tv.tuner.TunerFilterTlvConfiguration;
-import android.media.tv.tuner.TunerFilterTsConfiguration;
-
-/**
- * Filter configuration.
- *
- * {@hide}
- */
-union TunerFilterConfiguration {
- TunerFilterTsConfiguration ts;
-
- TunerFilterMmtpConfiguration mmtp;
-
- TunerFilterIpConfiguration ip;
-
- TunerFilterTlvConfiguration tlv;
-
- TunerFilterAlpConfiguration alp;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
deleted file mode 100644
index b971dd3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Download data.
- *
- * {@hide}
- */
-parcelable TunerFilterDownloadEvent {
- int itemId;
-
- /**
- * MPU sequence number of filtered data (only for MMTP)
- */
- int mpuSequenceNumber;
-
- int itemFragmentIndex;
-
- int lastItemFragmentIndex;
-
- /**
- * Data size in bytes of filtered data
- */
- char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
deleted file mode 100644
index 417a5fe..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for downloading.
- *
- * {@hide}
- */
-parcelable TunerFilterDownloadSettings {
- int downloadId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
deleted file mode 100644
index 1305510..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterDownloadEvent;
-import android.media.tv.tuner.TunerFilterIpPayloadEvent;
-import android.media.tv.tuner.TunerFilterMediaEvent;
-import android.media.tv.tuner.TunerFilterMmtpRecordEvent;
-import android.media.tv.tuner.TunerFilterMonitorEvent;
-import android.media.tv.tuner.TunerFilterPesEvent;
-import android.media.tv.tuner.TunerFilterSectionEvent;
-import android.media.tv.tuner.TunerFilterTemiEvent;
-import android.media.tv.tuner.TunerFilterTsRecordEvent;
-
-/**
- * Filter events.
- *
- * {@hide}
- */
-union TunerFilterEvent {
- TunerFilterMediaEvent media;
-
- TunerFilterSectionEvent section;
-
- TunerFilterPesEvent pes;
-
- TunerFilterTsRecordEvent tsRecord;
-
- TunerFilterMmtpRecordEvent mmtpRecord;
-
- TunerFilterDownloadEvent download;
-
- TunerFilterIpPayloadEvent ipPayload;
-
- TunerFilterTemiEvent temi;
-
- TunerFilterMonitorEvent monitor;
-
- int startId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
deleted file mode 100644
index 8b4d889..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerDemuxIpAddressSettings;
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a ip filter.
- *
- * {@hide}
- */
-parcelable TunerFilterIpConfiguration {
- TunerDemuxIpAddressSettings ipAddr;
-
- TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
deleted file mode 100644
index d5bda93..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for IP payload data.
- *
- * {@hide}
- */
-parcelable TunerFilterIpPayloadEvent {
- /**
- * Data size in bytes of ip data
- */
- char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
deleted file mode 100644
index c3dbce9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.hardware.common.NativeHandle;
-import android.media.tv.tuner.TunerAudioExtraMetaData;
-
-/**
- * Filter Event for Audio or Video Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMediaEvent {
- char streamId;
-
- /**
- * true if PTS is present in PES header.
- */
- boolean isPtsPresent;
-
- /**
- * Presentation Time Stamp for audio or video frame. It based on 90KHz has
- * the same format as PTS (Presentation Time Stamp).
- */
- long pts;
-
- /**
- * Data size in bytes of audio or video frame
- */
- int dataLength;
-
- /**
- * The offset in the memory block which is shared among multiple
- * MediaEvents.
- */
- int offset;
-
- /**
- * A handle associated to the memory where audio or video data stays.
- */
- NativeHandle avMemory;
-
- /**
- * True if the avMemory is in secure area, and isn't mappable.
- */
- boolean isSecureMemory;
-
- /**
- * An Id is used by HAL to provide additional information for AV data.
- * For secure audio, it's the audio handle used by Audio Track.
- */
- long avDataId;
-
- /**
- * MPU sequence number of filtered data (only for MMTP)
- */
- int mpuSequenceNumber;
-
- boolean isPesPrivateData;
-
- /**
- * If TunerAudioExtraMetaData field is valid or not
- */
- boolean isAudioExtraMetaData;
-
- /**
- * Only valid when isAudioExtraMetaData is true
- */
- TunerAudioExtraMetaData audio;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
deleted file mode 100644
index 162ca8e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for an mmtp filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMmtpConfiguration {
- char mmtpPid;
-
- TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
deleted file mode 100644
index b8871cf..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for an MMTP Record Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterMmtpRecordEvent {
- int scHevcIndexMask;
-
- /**
- * Byte number from beginning of the filter's output
- */
- long byteNumber;
-
- /**
- * If the current event contains extended information or not
- */
- boolean isExtended;
-
- /**
- * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
- * and has the same format as the PTS in ISO/IEC 13818-1.
- */
- long pts;
-
- /**
- * MPU sequence number of the filtered data. This is only used for MMTP.
- */
- int mpuSequenceNumber;
-
- /**
- * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
- */
- int firstMbInSlice;
-
- /**
- * TS index mask.
- */
- int tsIndexMask;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
deleted file mode 100644
index 31ab5e6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter monitor events.
- *
- * {@hide}
- */
-union TunerFilterMonitorEvent {
- /**
- * New scrambling status.
- */
- int scramblingStatus;
-
- /**
- * New cid for the IP filter.
- */
- int cid;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
deleted file mode 100644
index 312f314..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Settings for Pes Data.
- *
- * {@hide}
- */
-parcelable TunerFilterPesDataSettings {
- char streamId;
-
- boolean isRaw;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
deleted file mode 100644
index dc1ecc6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for PES Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterPesEvent {
- char streamId;
-
- /**
- * Data size in bytes of PES data
- */
- char dataLength;
-
- /**
- * MPU sequence number of filtered data
- */
- int mpuSequenceNumber;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
deleted file mode 100644
index 29be624..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterScIndexMask;
-
-/**
- * Filter Settings for recording.
- *
- * {@hide}
- */
-parcelable TunerFilterRecordSettings {
- int tsIndexMask;
-
- int scIndexType;
-
- TunerFilterScIndexMask scIndexMask;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
deleted file mode 100644
index ed37fce..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter SC Index Mask
- *
- * {@hide}
- */
-union TunerFilterScIndexMask {
- int sc;
-
- int scHevc;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
deleted file mode 100644
index dd4f842..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Bits settings of a section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionBits {
- byte[] filter;
-
- byte[] mask;
-
- byte[] mode;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
deleted file mode 100644
index 00aabe4..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSectionBits;
-import android.media.tv.tuner.TunerFilterSectionTableInfo;
-
-/**
- * Section filter condition settings.
- *
- * {@hide}
- */
-union TunerFilterSectionCondition {
- TunerFilterSectionBits sectionBits;
-
- TunerFilterSectionTableInfo tableInfo;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
deleted file mode 100644
index 5f20926..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionEvent {
- /**
- * Table ID of filtered data
- */
- char tableId;
-
- /**
- * Version number of filtered data
- */
- char version;
-
- /**
- * Section number of filtered data
- */
- char sectionNum;
-
- /**
- * Data size in bytes of filtered data
- */
- char dataLength;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
deleted file mode 100644
index 22129b6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSectionCondition;
-
-/**
- * Filter Settings for a section filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionSettings {
- TunerFilterSectionCondition condition;
-
- boolean isCheckCrc;
-
- boolean isRepeat;
-
- boolean isRaw;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
deleted file mode 100644
index cc78c9d..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Table info settings of a section Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterSectionTableInfo {
- char tableId;
-
- char version;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
deleted file mode 100644
index eb7eaa5..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterAvSettings;
-import android.media.tv.tuner.TunerFilterDownloadSettings;
-import android.media.tv.tuner.TunerFilterPesDataSettings;
-import android.media.tv.tuner.TunerFilterRecordSettings;
-import android.media.tv.tuner.TunerFilterSectionSettings;
-
-/**
- * Filter Settings.
- *
- * {@hide}
- */
-union TunerFilterSettings {
- boolean nothing;
-
- TunerFilterAvSettings av;
-
- TunerFilterSectionSettings section;
-
- TunerFilterPesDataSettings pesData;
-
- TunerFilterRecordSettings record;
-
- TunerFilterDownloadSettings download;
-
- boolean isPassthrough;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
deleted file mode 100644
index 122dfc3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.hardware.common.NativeHandle;
-
-/**
- * Filter Shared Handle Information.
- *
- * {@hide}
- */
-parcelable TunerFilterSharedHandleInfo {
- NativeHandle handle;
- long size;
-}
\ No newline at end of file
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
deleted file mode 100644
index 4c4e993..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Filter Event for Timed External Media Information (TEMI) data.
- *
- * {@hide}
- */
-parcelable TunerFilterTemiEvent {
- /**
- * Presentation Time Stamp for audio or video frame. It based on 90KHz has
- * the same format as PTS (Presentation Time Stamp) in ISO/IEC 13818-1.
- */
- long pts;
-
- /**
- * TEMI Descriptor Tag
- */
- byte descrTag;
-
- /**
- * TEMI Descriptor
- */
- byte[] descrData;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
deleted file mode 100644
index 0b237b4..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a tlv filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTlvConfiguration {
- byte packetType;
-
- boolean isCompressedIpPacket;
-
- TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
deleted file mode 100644
index 2e386e6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterSettings;
-
-/**
- * Filter Settings for a TS filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTsConfiguration {
- char tpid;
-
- TunerFilterSettings filterSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
deleted file mode 100644
index c52a749..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFilterScIndexMask;
-
-/**
- * Filter Event for TS Record Filter.
- *
- * {@hide}
- */
-parcelable TunerFilterTsRecordEvent {
- char pid;
-
- int tsIndexMask;
-
- /**
- * Indexes of record output
- */
- TunerFilterScIndexMask scIndexMask;
-
- /**
- * Byte number from beginning of the filter's output
- */
- long byteNumber;
-
- /**
- * If the current event contains extended information or not
- */
- boolean isExtended;
-
- /**
- * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
- * and has the same format as the PTS in ISO/IEC 13818-1.
- */
- long pts;
-
- /**
- * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
- */
- int firstMbInSlice;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl
deleted file mode 100644
index 74bf04e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogCapabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Analog Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAnalogCapabilities {
- /**
- * Signal Type capability
- */
- int typeCap;
-
- /**
- * Standard Interchange Format (SIF) capability
- */
- int sifStandardCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
deleted file mode 100644
index 40cd8c9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Analog Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAnalogSettings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- int signalType;
-
- /**
- * Standard Interchange Format (SIF) setting
- */
- int sifStandard;
-
- /**
- * Fields after isExtended are only valid when isExtended is true
- */
- boolean isExtended;
-
- int aftFlag;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl
deleted file mode 100644
index 6c9be77..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Capabilities.aidl
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ATSC3 Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3Capabilities {
- /**
- * Bandwidth capability
- */
- int bandwidthCap;
-
- /**
- * Modulation capability
- */
- int modulationCap;
-
- /**
- * TimeInterleaveMode capability
- */
- int timeInterleaveModeCap;
-
- /**
- * CodeRate capability
- */
- int codeRateCap;
-
- /**
- * FEC capability
- */
- int fecCap;
-
- /**
- * Demodulator Output Format capability
- */
- int demodOutputFormatCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl
deleted file mode 100644
index b29e1f7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3PlpSettings.aidl
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3PlpSettings {
- int plpId;
-
- int modulation;
-
- int interleaveMode;
-
- int codeRate;
-
- /**
- * Forward Error Correction Type.
- */
- int fec;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl
deleted file mode 100644
index 32fb8c7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtsc3Settings.aidl
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAtsc3PlpSettings;
-
-/**
- * Atsc3 Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtsc3Settings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- /**
- * Bandwidth of tuning band.
- */
- int bandwidth;
-
- int demodOutputFormat;
-
- TunerFrontendAtsc3PlpSettings[] plpSettings;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl
deleted file mode 100644
index 2b6c2fc..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscCapabilities.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ATSC Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtscCapabilities {
- /**
- * Modulation capability
- */
- int modulationCap;
-}
\ No newline at end of file
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl
deleted file mode 100644
index c7a8c07..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAtscSettings.aidl
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendAtscSettings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- int modulation;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl
deleted file mode 100644
index b880c60..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableCapabilities.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Cable(DVBC) Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendCableCapabilities {
- /**
- * Modulation capability
- */
- int modulationCap;
-
- /**
- * Code Rate capability
- */
- long codeRateCap; // inner FEC will converge to codeRate
-
- /**
- * Annex capability
- */
- int annexCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
deleted file mode 100644
index b9bcf29..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Cable Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendCableSettings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- int modulation;
-
- /**
- * Inner Forward Error Correction type as specified in ETSI EN 300 468 V1.15.1
- * and ETSI EN 302 307-2 V1.1.1.
- */
- long innerFec;
-
- /**
- * Symbols per second
- */
- int symbolRate;
-
- /**
- * Outer Forward Error Correction (FEC) Type.
- */
- int outerFec;
-
- int annex;
-
- /**
- * Spectral Inversion Type.
- */
- int spectralInversion;
-
- /**
- * Fields after isExtended are only valid when isExtended is true
- */
- boolean isExtended;
-
- int interleaveMode;
-
- int bandwidth;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl
deleted file mode 100644
index 19f31f1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCapabilities.aidl
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAnalogCapabilities;
-import android.media.tv.tuner.TunerFrontendAtscCapabilities;
-import android.media.tv.tuner.TunerFrontendAtsc3Capabilities;
-import android.media.tv.tuner.TunerFrontendCableCapabilities;
-import android.media.tv.tuner.TunerFrontendDvbsCapabilities;
-import android.media.tv.tuner.TunerFrontendDvbtCapabilities;
-import android.media.tv.tuner.TunerFrontendIsdbsCapabilities;
-import android.media.tv.tuner.TunerFrontendIsdbs3Capabilities;
-import android.media.tv.tuner.TunerFrontendIsdbtCapabilities;
-
-/**
- * Frontend Capabilities interface.
- *
- * Use a group of vectors as the workaround for Union structure that is not fully supported
- * in AIDL currently.
- *
- * Client may use FrontendInfo.type as the discriminar to check the corresponding vector. If
- * the vector is not null, it contains valid value.
- *
- * {@hide}
- */
-union TunerFrontendCapabilities {
- /**
- * Analog Frontend Capabilities
- */
- TunerFrontendAnalogCapabilities analogCaps;
-
- /**
- * ATSC Frontend Capabilities
- */
- TunerFrontendAtscCapabilities atscCaps;
-
- /**
- * ATSC3 Frontend Capabilities
- */
- TunerFrontendAtsc3Capabilities atsc3Caps;
-
- /**
- * Cable Frontend Capabilities
- */
- TunerFrontendCableCapabilities cableCaps;
-
- /**
- * DVBS Frontend Capabilities
- */
- TunerFrontendDvbsCapabilities dvbsCaps;
-
- /**
- * DVBT Frontend Capabilities
- */
- TunerFrontendDvbtCapabilities dvbtCaps;
-
- /**
- * ISDB-S Frontend Capabilities
- */
- TunerFrontendIsdbsCapabilities isdbsCaps;
-
- /**
- * ISDB-S3 Frontend Capabilities
- */
- TunerFrontendIsdbs3Capabilities isdbs3Caps;
-
- /**
- * ISDB-T Frontend Capabilities
- */
- TunerFrontendIsdbtCapabilities isdbtCaps;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl
deleted file mode 100644
index e8e4933..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbCapabilities.aidl
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DTMB Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDtmbCapabilities {
- int transmissionModeCap;
-
- int bandwidthCap;
-
- int modulationCap;
-
- int codeRateCap;
-
- int guardIntervalCap;
-
- int interleaveModeCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
deleted file mode 100644
index 45e7ff9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DTMB Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDtmbSettings {
- int frequency;
-
- int transmissionMode;
-
- int bandwidth;
-
- int modulation;
-
- int codeRate;
-
- int guardInterval;
-
- int interleaveMode;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl
deleted file mode 100644
index 5e4322c..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCapabilities.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DVBS Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsCapabilities {
- /**
- * Modulation capability
- */
- int modulationCap;
-
- /**
- * Code Rate capability
- */
- long codeRateCap; // inner FEC will converge to codeRate
-
- /**
- * Sub standards capability
- */
- int standard;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl
deleted file mode 100644
index 59b7de3..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsCodeRate.aidl
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvbs Frontend CodeRate interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsCodeRate {
- /**
- * Inner Forward Error Correction type as specified in ETSI EN 300 468 V1.15.1
- * and ETSI EN 302 307-2 V1.1.1.
- */
- long fec;
-
- boolean isLinear;
-
- /**
- * true if enable short frame
- */
- boolean isShortFrames;
-
- /**
- * bits number in 1000 symbol. 0 if use the default.
- */
- int bitsPer1000Symbol;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
deleted file mode 100644
index ec3e4b9..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendDvbsCodeRate;
-
-/**
- * Dvbs Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbsSettings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- int modulation;
-
- TunerFrontendDvbsCodeRate codeRate;
-
- int symbolRate;
-
- /**
- * Roll off type.
- */
- int rolloff;
-
- /**
- * Pilot mode.
- */
- int pilot;
-
- int inputStreamId;
-
- int standard;
-
- /**
- * Vcm mode.
- */
- int vcm;
-
- /**
- * Fields after isExtended are only valid when isExtended is true
- */
- boolean isExtended;
-
- int scanType;
-
- boolean isDiseqcRxMessage;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl
deleted file mode 100644
index 73f16dd..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtCapabilities.aidl
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * DVBT Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbtCapabilities {
- /**
- * Transmission Mode capability
- */
- int transmissionModeCap;
-
- /**
- * Bandwidth capability
- */
- int bandwidthCap;
-
- /**
- * Constellation capability
- */
- int constellationCap;
-
- /**
- * Code Rate capability
- */
- int codeRateCap;
-
- /**
- * Hierarchy Type capability
- */
- int hierarchyCap;
-
- /**
- * Guard Interval capability
- */
- int guardIntervalCap;
-
- /**
- * T2 Support capability
- */
- boolean isT2Supported;
-
- /**
- * Miso Support capability
- */
- boolean isMisoSupported;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
deleted file mode 100644
index 14c942a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Dvbt Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendDvbtSettings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- int transmissionMode;
-
- int bandwidth;
-
- int constellation;
-
- int hierarchy;
-
- /**
- * Code Rate for High Priority level
- */
- int hpCodeRate;
-
- /**
- * Code Rate for Low Priority level
- */
- int lpCodeRate;
-
- int guardInterval;
-
- boolean isHighPriority;
-
- int standard;
-
- boolean isMiso;
-
- /**
- * Physical Layer Pipe (PLP) mode
- */
- int plpMode;
-
- /**
- * Physical Layer Pipe (PLP) Id
- */
- int plpId;
-
- /**
- * Physical Layer Pipe (PLP) Group Id
- */
- int plpGroupId;
-
- /**
- * Fields after isExtended are only valid when isExtended is true
- */
- boolean isExtended;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl
deleted file mode 100644
index 4bccd56..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendInfo.aidl
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendCapabilities;
-
-/**
- * FrontendInfo interface that carries tuner frontend information.
- *
- * <p>This is used to update the TunerResourceManager and pass Frontend
- * information from HAL to the client side.
- *
- * {@hide}
- */
-parcelable TunerFrontendInfo {
- /**
- * Frontend Handle
- */
- int handle;
-
- /**
- * Frontend Type
- */
- int type;
-
- /**
- * Minimum Frequency in Hertz
- */
- int minFrequency;
-
- /**
- * Maximum Frequency in Hertz
- */
- int maxFrequency;
-
- /**
- * Minimum symbols per second
- */
- int minSymbolRate;
-
- /**
- * Maximum symbols per second
- */
- int maxSymbolRate;
-
- /**
- * Range in Hertz
- */
- int acquireRange;
-
- /**
- * Frontends are assigned with the same exclusiveGroupId if they can't
- * function at same time. For instance, they share same hardware module.
- */
- int exclusiveGroupId;
-
- /**
- * A list of supported status types which client can inquiry
- */
- int[] statusCaps;
-
- /**
- * Frontend Capabilities
- */
- TunerFrontendCapabilities caps;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl
deleted file mode 100644
index 84dd67a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Capabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-S3 Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbs3Capabilities {
- /**
- * Modulation capability
- */
- int modulationCap;
-
- /**
- * Code Rate capability
- */
- int codeRateCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
deleted file mode 100644
index 9a11fd5..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbs3 Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbs3Settings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- char streamId;
-
- int streamIdType;
-
- int modulation;
-
- int codeRate;
-
- /**
- * Symbols per second
- */
- int symbolRate;
-
- int rolloff;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl
deleted file mode 100644
index 15dfdf7..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsCapabilities.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-S Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbsCapabilities {
- /**
- * Modulation capability
- */
- int modulationCap;
-
- /**
- * Code Rate capability
- */
- int codeRateCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
deleted file mode 100644
index dff9f4a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbs Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbsSettings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- char streamId;
-
- int streamIdType;
-
- int modulation;
-
- int codeRate;
-
- /**
- * Symbols per second
- */
- int symbolRate;
-
- int rolloff;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl
deleted file mode 100644
index c9295d8..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtCapabilities.aidl
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * ISDB-T Frontend Capabilities interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbtCapabilities {
- /**
- * ISDB-T Mode capability
- */
- int modeCap;
-
- /**
- * Bandwidth capability
- */
- int bandwidthCap;
-
- /**
- * Modulation capability
- */
- int modulationCap;
-
- /**
- * Code Rate capability
- */
- int codeRateCap;
-
- /**
- * Guard Interval capability
- */
- int guardIntervalCap;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl
deleted file mode 100644
index 191f3a6..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbtSettings.aidl
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Isdbt Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendIsdbtSettings {
- /**
- * Signal frequency in Hertz
- */
- int frequency;
-
- int modulation;
-
- int bandwidth;
-
- int mode;
-
- int codeRate;
-
- int guardInterval;
-
- int serviceAreaId;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
deleted file mode 100644
index 1b8fcbb..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Info.
- *
- * {@hide}
- */
-parcelable TunerFrontendScanAtsc3PlpInfo {
- byte plpId;
-
- boolean llsFlag;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl
deleted file mode 100644
index 9921ca1..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanMessage.aidl
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendScanAtsc3PlpInfo;
-
-/**
- * Tuner Frontend Scan Message interface.
- *
- * {@hide}
- */
-union TunerFrontendScanMessage {
- boolean isLocked;
-
- boolean isEnd;
-
- byte progressPercent;
-
- int[] frequencies;
-
- int[] symbolRates;
-
- int hierarchy;
-
- int analogType;
-
- byte[] plpIds;
-
- byte[] groupIds;
-
- char[] inputStreamIds;
-
- int std;
-
- TunerFrontendScanAtsc3PlpInfo[] atsc3PlpInfos;
-
- int modulation;
-
- int annex;
-
- boolean isHighPriority;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
deleted file mode 100644
index 70a5f3e..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendUnionSettings;
-
-/**
- * Frontend Settings interface.
- *
- * {@hide}
- */
-parcelable TunerFrontendSettings {
- TunerFrontendUnionSettings settings;
-
- boolean isExtended;
-
- int endFrequency;
-
- int inversion;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
deleted file mode 100644
index 2b3c01b..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendStatusAtsc3PlpInfo;
-
-/**
- * Tuner Frontend Status interface.
- *
- * {@hide}
- */
-union TunerFrontendStatus {
- /**
- * Lock status for Demod in True/False.
- */
- boolean isDemodLocked;
-
- /**
- * SNR value measured by 0.001 dB.
- */
- int snr;
-
- /**
- * The number of error bits per 1 billion bits.
- */
- int ber;
-
- /**
- * The number of error packages per 1 billion packages.
- */
- int per;
-
- /**
- * The number of error bits per 1 billion bits before FEC.
- */
- int preBer;
-
- /**
- * Signal Quality in percent.
- */
- int signalQuality;
-
- /**
- * Signal Strength measured by 0.001 dBm.
- */
- int signalStrength;
-
- /**
- * Symbols per second
- */
- int symbolRate;
-
- long innerFec;
-
- /**
- * Check frontend type to decide the hidl type value
- */
- int modulation;
-
- int inversion;
-
- int lnbVoltage;
-
- byte plpId;
-
- boolean isEWBS;
-
- /**
- * AGC value is normalized from 0 to 255.
- */
- byte agc;
-
- boolean isLnaOn;
-
- boolean[] isLayerError;
-
- /**
- * MER value measured by 0.001 dB
- */
- int mer;
-
- /**
- * Frequency difference in Hertz.
- */
- int freqOffset;
-
- int hierarchy;
-
- boolean isRfLocked;
-
- /**
- * A list of PLP status for tuned PLPs for ATSC3 frontend.
- */
- TunerFrontendStatusAtsc3PlpInfo[] plpInfo;
-
- // 1.1 Extension Starting
-
- /**
- * Extended modulation status. Check frontend type to decide the hidl type value.
- */
- int[] modulations;
-
- /**
- * Extended bit error ratio status.
- */
- int[] bers;
-
- /**
- * Extended code rate status.
- */
- long[] codeRates;
-
- /**
- * Extended bandwidth status. Check frontend type to decide the hidl type value.
- */
- int bandwidth;
-
- /**
- * Extended guard interval status. Check frontend type to decide the hidl type value.
- */
- int interval;
-
- /**
- * Extended transmission mode status. Check frontend type to decide the hidl type value.
- */
- int transmissionMode;
-
- /**
- * Uncorrectable Error Counts of the frontend's Physical Layer Pipe (PLP)
- * since the last tune operation.
- */
- int uec;
-
- /**
- * The current DVB-T2 system id status.
- */
- char systemId;
-
- /**
- * Frontend Interleaving Modes. Check frontend type to decide the hidl type value.
- */
- int[] interleaving;
-
- /**
- * Segments in ISDB-T Specification of all the channels.
- */
- byte[] isdbtSegment;
-
- /**
- * Transport Stream Data Rate in BPS of the current channel.
- */
- int[] tsDataRate;
-
- /**
- * Roll Off Type status of the frontend. Check frontend type to decide the hidl type value.
- */
- int rollOff;
-
- /**
- * If the frontend currently supports MISO or not.
- */
- boolean isMiso;
-
- /**
- * If the frontend code rate is linear or not.
- */
- boolean isLinear;
-
- /**
- * If short frames are enabled or not.
- */
- boolean isShortFrames;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
deleted file mode 100644
index 4116c34..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-/**
- * Atsc3 Frontend Physical Layer Pipe Info in Frontend status.
- *
- * {@hide}
- */
-parcelable TunerFrontendStatusAtsc3PlpInfo {
- /**
- * PLP Id value.
- */
- byte plpId;
-
- /**
- * Demod Lock/Unlock status of this particular PLP.
- */
- boolean isLocked;
-
- /**
- * Uncorrectable Error Counts (UEC) of this particular PLP since last tune operation.
- */
- int uec;
-}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
deleted file mode 100644
index c362c2a..0000000
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Copyright 2020, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.tv.tuner;
-
-import android.media.tv.tuner.TunerFrontendAnalogSettings;
-import android.media.tv.tuner.TunerFrontendAtscSettings;
-import android.media.tv.tuner.TunerFrontendAtsc3Settings;
-import android.media.tv.tuner.TunerFrontendCableSettings;
-import android.media.tv.tuner.TunerFrontendDtmbSettings;
-import android.media.tv.tuner.TunerFrontendDvbsSettings;
-import android.media.tv.tuner.TunerFrontendDvbtSettings;
-import android.media.tv.tuner.TunerFrontendIsdbsSettings;
-import android.media.tv.tuner.TunerFrontendIsdbs3Settings;
-import android.media.tv.tuner.TunerFrontendIsdbtSettings;
-
-/**
- * Frontend Settings Union interface.
- *
- * {@hide}
- */
-union TunerFrontendUnionSettings {
- TunerFrontendAnalogSettings analog;
-
- TunerFrontendAtscSettings atsc;
-
- TunerFrontendAtsc3Settings atsc3;
-
- TunerFrontendCableSettings cable;
-
- TunerFrontendDvbsSettings dvbs;
-
- TunerFrontendDvbtSettings dvbt;
-
- TunerFrontendIsdbsSettings isdbs;
-
- TunerFrontendIsdbs3Settings isdbs3;
-
- TunerFrontendIsdbtSettings isdbt;
-
- TunerFrontendDtmbSettings dtmb;
-}
diff --git a/services/tuner/hidl/TunerHidlDemux.cpp b/services/tuner/hidl/TunerHidlDemux.cpp
new file mode 100644
index 0000000..a8151d2
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDemux.cpp
@@ -0,0 +1,278 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDemux"
+
+#include "TunerHidlDemux.h"
+
+#include "TunerHidlDvr.h"
+#include "TunerHidlFilter.h"
+#include "TunerHidlTimeFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSubType;
+
+using HidlDemuxAlpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterType;
+using HidlDemuxFilterMainType = ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
+using HidlDemuxFilterType = ::android::hardware::tv::tuner::V1_0::DemuxFilterType;
+using HidlDemuxIpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxIpFilterType;
+using HidlDemuxMmtpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
+using HidlDemuxTlvFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterType;
+using HidlDemuxTsFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
+using HidlDvrType = ::android::hardware::tv::tuner::V1_0::DvrType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDemux::TunerHidlDemux(sp<IDemux> demux, int id) {
+ mDemux = demux;
+ mDemuxId = id;
+}
+
+TunerHidlDemux::~TunerHidlDemux() {
+ mDemux = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::setFrontendDataSource(
+ const shared_ptr<ITunerFrontend>& in_frontend) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ int frontendId;
+ in_frontend->getFrontendId(&frontendId);
+ HidlResult res = mDemux->setFrontendDataSource(frontendId);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::setFrontendDataSourceById(int frontendId) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ HidlResult res = mDemux->setFrontendDataSource(frontendId);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openFilter(const DemuxFilterType& in_type,
+ int32_t in_bufferSize,
+ const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ HidlDemuxFilterMainType mainType = static_cast<HidlDemuxFilterMainType>(in_type.mainType);
+ HidlDemuxFilterType filterType{
+ .mainType = mainType,
+ };
+
+ switch (mainType) {
+ case HidlDemuxFilterMainType::TS:
+ filterType.subType.tsFilterType(static_cast<HidlDemuxTsFilterType>(
+ in_type.subType.get<DemuxFilterSubType::Tag::tsFilterType>()));
+ break;
+ case HidlDemuxFilterMainType::MMTP:
+ filterType.subType.mmtpFilterType(static_cast<HidlDemuxMmtpFilterType>(
+ in_type.subType.get<DemuxFilterSubType::Tag::mmtpFilterType>()));
+ break;
+ case HidlDemuxFilterMainType::IP:
+ filterType.subType.ipFilterType(static_cast<HidlDemuxIpFilterType>(
+ in_type.subType.get<DemuxFilterSubType::Tag::ipFilterType>()));
+ break;
+ case HidlDemuxFilterMainType::TLV:
+ filterType.subType.tlvFilterType(static_cast<HidlDemuxTlvFilterType>(
+ in_type.subType.get<DemuxFilterSubType::Tag::tlvFilterType>()));
+ break;
+ case HidlDemuxFilterMainType::ALP:
+ filterType.subType.alpFilterType(static_cast<HidlDemuxAlpFilterType>(
+ in_type.subType.get<DemuxFilterSubType::Tag::alpFilterType>()));
+ break;
+ }
+ HidlResult status;
+ sp<HidlIFilter> filterSp;
+ sp<TunerHidlFilter::FilterCallback> filterCb = new TunerHidlFilter::FilterCallback(in_cb);
+ sp<::android::hardware::tv::tuner::V1_0::IFilterCallback> cbSp = filterCb;
+ mDemux->openFilter(filterType, static_cast<uint32_t>(in_bufferSize), cbSp,
+ [&](HidlResult r, const sp<HidlIFilter>& filter) {
+ filterSp = filter;
+ status = r;
+ });
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlFilter>(filterSp, filterCb, in_type);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ sp<HidlITimeFilter> filterSp;
+ mDemux->openTimeFilter([&](HidlResult r, const sp<HidlITimeFilter>& filter) {
+ filterSp = filter;
+ status = r;
+ });
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlTimeFilter>(filterSp);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter,
+ int32_t* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ uint32_t avSyncHwId;
+ HidlResult res;
+ sp<HidlIFilter> halFilter = static_cast<TunerHidlFilter*>(tunerFilter.get())->getHalFilter();
+ mDemux->getAvSyncHwId(halFilter, [&](HidlResult r, uint32_t id) {
+ res = r;
+ avSyncHwId = id;
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ *_aidl_return = (int)avSyncHwId;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::getAvSyncTime(int32_t avSyncHwId, int64_t* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ uint64_t time;
+ HidlResult res;
+ mDemux->getAvSyncTime(static_cast<uint32_t>(avSyncHwId), [&](HidlResult r, uint64_t ts) {
+ res = r;
+ time = ts;
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ *_aidl_return = (int64_t)time;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+ const shared_ptr<ITunerDvrCallback>& in_cb,
+ shared_ptr<ITunerDvr>* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ HidlResult res;
+ sp<HidlIDvrCallback> callback = new TunerHidlDvr::DvrCallback(in_cb);
+ sp<HidlIDvr> hidlDvr;
+ mDemux->openDvr(static_cast<HidlDvrType>(in_dvbType), in_bufferSize, callback,
+ [&](HidlResult r, const sp<HidlIDvr>& dvr) {
+ hidlDvr = dvr;
+ res = r;
+ });
+ if (res != HidlResult::SUCCESS) {
+ *_aidl_return = nullptr;
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDvr>(hidlDvr, in_dvbType);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::connectCiCam(int32_t ciCamId) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ HidlResult res = mDemux->connectCiCam(static_cast<uint32_t>(ciCamId));
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::disconnectCiCam() {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ HidlResult res = mDemux->disconnectCiCam();
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDemux::close() {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+ }
+
+ HidlResult res = mDemux->close();
+ mDemux = nullptr;
+
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDemux.h b/services/tuner/hidl/TunerHidlDemux.h
new file mode 100644
index 0000000..d535da6
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDemux.h
@@ -0,0 +1,75 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDEMUX_H
+#define ANDROID_MEDIA_TUNERHIDLDEMUX_H
+
+#include <aidl/android/media/tv/tuner/BnTunerDemux.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::tv::tuner::V1_0::IDemux;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlIDemux = ::android::hardware::tv::tuner::V1_0::IDemux;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlDemux : public BnTunerDemux {
+public:
+ TunerHidlDemux(sp<HidlIDemux> demux, int demuxId);
+ virtual ~TunerHidlDemux();
+
+ ::ndk::ScopedAStatus setFrontendDataSource(
+ const shared_ptr<ITunerFrontend>& in_frontend) override;
+ ::ndk::ScopedAStatus setFrontendDataSourceById(int frontendId) override;
+ ::ndk::ScopedAStatus openFilter(const DemuxFilterType& in_type, int32_t in_bufferSize,
+ const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) override;
+ ::ndk::ScopedAStatus openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
+ ::ndk::ScopedAStatus getAvSyncHwId(const shared_ptr<ITunerFilter>& in_tunerFilter,
+ int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getAvSyncTime(int32_t in_avSyncHwId, int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus openDvr(DvrType in_dvbType, int32_t in_bufferSize,
+ const shared_ptr<ITunerDvrCallback>& in_cb,
+ shared_ptr<ITunerDvr>* _aidl_return) override;
+ ::ndk::ScopedAStatus connectCiCam(int32_t in_ciCamId) override;
+ ::ndk::ScopedAStatus disconnectCiCam() override;
+ ::ndk::ScopedAStatus close() override;
+
+ int getId() { return mDemuxId; }
+
+private:
+ sp<HidlIDemux> mDemux;
+ int mDemuxId;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLDEMUX_H
diff --git a/services/tuner/hidl/TunerHidlDescrambler.cpp b/services/tuner/hidl/TunerHidlDescrambler.cpp
new file mode 100644
index 0000000..dd8cd9c
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDescrambler.cpp
@@ -0,0 +1,149 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDescrambler"
+
+#include "TunerHidlDescrambler.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerHidlDemux.h"
+#include "TunerHidlFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDescrambler::TunerHidlDescrambler(sp<HidlIDescrambler> descrambler) {
+ mDescrambler = descrambler;
+}
+
+TunerHidlDescrambler::~TunerHidlDescrambler() {
+ mDescrambler = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::setDemuxSource(
+ const shared_ptr<ITunerDemux>& in_tunerDemux) {
+ if (mDescrambler == nullptr) {
+ ALOGE("IDescrambler is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDescrambler->setDemuxSource(
+ static_cast<TunerHidlDemux*>(in_tunerDemux.get())->getId());
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::setKeyToken(const vector<uint8_t>& in_keyToken) {
+ if (mDescrambler == nullptr) {
+ ALOGE("IDescrambler is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDescrambler->setKeyToken(in_keyToken);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::addPid(
+ const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
+ if (mDescrambler == nullptr) {
+ ALOGE("IDescrambler is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ sp<HidlIFilter> halFilter =
+ (in_optionalSourceFilter == nullptr)
+ ? nullptr
+ : static_cast<TunerHidlFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+ HidlResult res = mDescrambler->addPid(getHidlDemuxPid(in_pid), halFilter);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::removePid(
+ const DemuxPid& in_pid, const shared_ptr<ITunerFilter>& in_optionalSourceFilter) {
+ if (mDescrambler == nullptr) {
+ ALOGE("IDescrambler is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ sp<HidlIFilter> halFilter =
+ (in_optionalSourceFilter == nullptr)
+ ? nullptr
+ : static_cast<TunerHidlFilter*>(in_optionalSourceFilter.get())->getHalFilter();
+ HidlResult res = mDescrambler->removePid(getHidlDemuxPid(in_pid), halFilter);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDescrambler::close() {
+ if (mDescrambler == nullptr) {
+ ALOGE("IDescrambler is not initialized.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDescrambler->close();
+ mDescrambler = nullptr;
+
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+HidlDemuxPid TunerHidlDescrambler::getHidlDemuxPid(const DemuxPid& pid) {
+ HidlDemuxPid hidlPid;
+ switch (pid.getTag()) {
+ case DemuxPid::tPid: {
+ hidlPid.tPid((uint16_t)pid.get<DemuxPid::Tag::tPid>());
+ break;
+ }
+ case DemuxPid::mmtpPid: {
+ hidlPid.mmtpPid((uint16_t)pid.get<DemuxPid::Tag::mmtpPid>());
+ break;
+ }
+ }
+ return hidlPid;
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDescrambler.h b/services/tuner/hidl/TunerHidlDescrambler.h
new file mode 100644
index 0000000..9494968
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDescrambler.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
+#define ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
+
+#include <aidl/android/hardware/tv/tuner/IDescrambler.h>
+#include <aidl/android/media/tv/tuner/BnTunerDescrambler.h>
+#include <android/hardware/tv/tuner/1.0/IDescrambler.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using HidlDemuxPid = ::android::hardware::tv::tuner::V1_0::DemuxPid;
+using HidlIDescrambler = ::android::hardware::tv::tuner::V1_0::IDescrambler;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlDescrambler : public BnTunerDescrambler {
+public:
+ TunerHidlDescrambler(sp<HidlIDescrambler> descrambler);
+ virtual ~TunerHidlDescrambler();
+
+ ::ndk::ScopedAStatus setDemuxSource(const std::shared_ptr<ITunerDemux>& in_tunerDemux) override;
+ ::ndk::ScopedAStatus setKeyToken(const std::vector<uint8_t>& in_keyToken) override;
+ ::ndk::ScopedAStatus addPid(
+ const DemuxPid& in_pid,
+ const std::shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+ ::ndk::ScopedAStatus removePid(
+ const DemuxPid& in_pid,
+ const std::shared_ptr<ITunerFilter>& in_optionalSourceFilter) override;
+ ::ndk::ScopedAStatus close() override;
+
+private:
+ HidlDemuxPid getHidlDemuxPid(const DemuxPid& pid);
+
+ sp<HidlIDescrambler> mDescrambler;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLDESCRAMBLER_H
diff --git a/services/tuner/hidl/TunerHidlDvr.cpp b/services/tuner/hidl/TunerHidlDvr.cpp
new file mode 100644
index 0000000..1a619d5
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDvr.cpp
@@ -0,0 +1,257 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlDvr"
+
+#include "TunerHidlDvr.h"
+
+#include <aidl/android/hardware/tv/tuner/DataFormat.h>
+#include <aidl/android/hardware/tv/tuner/PlaybackStatus.h>
+#include <aidl/android/hardware/tv/tuner/RecordStatus.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <fmq/ConvertMQDescriptors.h>
+
+using ::aidl::android::hardware::tv::tuner::DataFormat;
+using ::aidl::android::hardware::tv::tuner::PlaybackStatus;
+using ::aidl::android::hardware::tv::tuner::RecordStatus;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::android::unsafeHidlToAidlMQDescriptor;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::MQDescriptorSync;
+
+using HidlDataFormat = ::android::hardware::tv::tuner::V1_0::DataFormat;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using MQDesc = MQDescriptorSync<uint8_t>;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlDvr::TunerHidlDvr(sp<HidlIDvr> dvr, DvrType type) {
+ mDvr = dvr;
+ mType = type;
+}
+
+TunerHidlDvr::~TunerHidlDvr() {
+ mDvr = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::getQueueDesc(AidlMQDesc* _aidl_return) {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ MQDesc dvrMQDesc;
+ HidlResult res;
+ mDvr->getQueueDesc([&](HidlResult r, const MQDesc& desc) {
+ dvrMQDesc = desc;
+ res = r;
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ AidlMQDesc aidlMQDesc;
+ unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(dvrMQDesc, &aidlMQDesc);
+ *_aidl_return = move(aidlMQDesc);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::configure(const DvrSettings& in_settings) {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDvr->configure(getHidlDvrSettings(in_settings));
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::attachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (in_filter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ sp<HidlIFilter> hidlFilter = static_cast<TunerHidlFilter*>(in_filter.get())->getHalFilter();
+ if (hidlFilter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ HidlResult res = mDvr->attachFilter(hidlFilter);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::detachFilter(const shared_ptr<ITunerFilter>& in_filter) {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (in_filter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ sp<HidlIFilter> halFilter = (static_cast<TunerHidlFilter*>(in_filter.get()))->getHalFilter();
+ if (halFilter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ HidlResult res = mDvr->detachFilter(halFilter);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::start() {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDvr->start();
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::stop() {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDvr->stop();
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::flush() {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDvr->flush();
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlDvr::close() {
+ if (mDvr == nullptr) {
+ ALOGE("IDvr is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mDvr->close();
+ mDvr = nullptr;
+
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+HidlDvrSettings TunerHidlDvr::getHidlDvrSettings(const DvrSettings& settings) {
+ HidlDvrSettings s;
+ switch (mType) {
+ case DvrType::PLAYBACK: {
+ s.playback({
+ .statusMask =
+ static_cast<uint8_t>(settings.get<DvrSettings::playback>().statusMask),
+ .lowThreshold =
+ static_cast<uint32_t>(settings.get<DvrSettings::playback>().lowThreshold),
+ .highThreshold =
+ static_cast<uint32_t>(settings.get<DvrSettings::playback>().highThreshold),
+ .dataFormat = static_cast<HidlDataFormat>(
+ settings.get<DvrSettings::playback>().dataFormat),
+ .packetSize =
+ static_cast<uint8_t>(settings.get<DvrSettings::playback>().packetSize),
+ });
+ return s;
+ }
+ case DvrType::RECORD: {
+ s.record({
+ .statusMask = static_cast<uint8_t>(settings.get<DvrSettings::record>().statusMask),
+ .lowThreshold =
+ static_cast<uint32_t>(settings.get<DvrSettings::record>().lowThreshold),
+ .highThreshold =
+ static_cast<uint32_t>(settings.get<DvrSettings::record>().highThreshold),
+ .dataFormat =
+ static_cast<HidlDataFormat>(settings.get<DvrSettings::record>().dataFormat),
+ .packetSize = static_cast<uint8_t>(settings.get<DvrSettings::record>().packetSize),
+ });
+ return s;
+ }
+ default:
+ break;
+ }
+ return s;
+}
+
+/////////////// IDvrCallback ///////////////////////
+Return<void> TunerHidlDvr::DvrCallback::onRecordStatus(const HidlRecordStatus status) {
+ if (mTunerDvrCallback != nullptr) {
+ mTunerDvrCallback->onRecordStatus(static_cast<RecordStatus>(status));
+ }
+ return Void();
+}
+
+Return<void> TunerHidlDvr::DvrCallback::onPlaybackStatus(const HidlPlaybackStatus status) {
+ if (mTunerDvrCallback != nullptr) {
+ mTunerDvrCallback->onPlaybackStatus(static_cast<PlaybackStatus>(status));
+ }
+ return Void();
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlDvr.h b/services/tuner/hidl/TunerHidlDvr.h
new file mode 100644
index 0000000..a280ff7
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlDvr.h
@@ -0,0 +1,91 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLDVR_H
+#define ANDROID_MEDIA_TUNERHIDLDVR_H
+
+#include <aidl/android/hardware/tv/tuner/DvrSettings.h>
+#include <aidl/android/hardware/tv/tuner/DvrType.h>
+#include <aidl/android/media/tv/tuner/BnTunerDvr.h>
+#include <aidl/android/media/tv/tuner/ITunerDvrCallback.h>
+#include <android/hardware/tv/tuner/1.0/IDvr.h>
+#include <android/hardware/tv/tuner/1.0/IDvrCallback.h>
+
+#include "TunerHidlFilter.h"
+
+using ::aidl::android::hardware::common::fmq::MQDescriptor;
+using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::DvrSettings;
+using ::aidl::android::hardware::tv::tuner::DvrType;
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlDvrSettings = ::android::hardware::tv::tuner::V1_0::DvrSettings;
+using HidlIDvr = ::android::hardware::tv::tuner::V1_0::IDvr;
+using HidlIDvrCallback = ::android::hardware::tv::tuner::V1_0::IDvrCallback;
+using HidlPlaybackStatus = ::android::hardware::tv::tuner::V1_0::PlaybackStatus;
+using HidlRecordStatus = ::android::hardware::tv::tuner::V1_0::RecordStatus;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
+
+class TunerHidlDvr : public BnTunerDvr {
+public:
+ TunerHidlDvr(sp<HidlIDvr> dvr, DvrType type);
+ ~TunerHidlDvr();
+
+ ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+ ::ndk::ScopedAStatus configure(const DvrSettings& in_settings) override;
+ ::ndk::ScopedAStatus attachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+ ::ndk::ScopedAStatus detachFilter(const shared_ptr<ITunerFilter>& in_filter) override;
+ ::ndk::ScopedAStatus start() override;
+ ::ndk::ScopedAStatus stop() override;
+ ::ndk::ScopedAStatus flush() override;
+ ::ndk::ScopedAStatus close() override;
+
+ struct DvrCallback : public HidlIDvrCallback {
+ DvrCallback(const shared_ptr<ITunerDvrCallback> tunerDvrCallback)
+ : mTunerDvrCallback(tunerDvrCallback){};
+
+ virtual Return<void> onRecordStatus(const HidlRecordStatus status);
+ virtual Return<void> onPlaybackStatus(const HidlPlaybackStatus status);
+
+ private:
+ shared_ptr<ITunerDvrCallback> mTunerDvrCallback;
+ };
+
+private:
+ HidlDvrSettings getHidlDvrSettings(const DvrSettings& settings);
+
+ sp<HidlIDvr> mDvr;
+ DvrType mType;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLDVR_H
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
new file mode 100644
index 0000000..7b76093
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -0,0 +1,1265 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlFilter"
+
+#include "TunerHidlFilter.h"
+
+#include <aidl/android/hardware/tv/tuner/Constant.h>
+#include <aidl/android/hardware/tv/tuner/DemuxScIndex.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <aidlcommonsupport/NativeHandle.h>
+#include <binder/IPCThreadState.h>
+#include <fmq/ConvertMQDescriptors.h>
+
+#include "TunerHelper.h"
+#include "TunerHidlService.h"
+
+using ::aidl::android::hardware::tv::tuner::AudioExtraMetaData;
+using ::aidl::android::hardware::tv::tuner::Constant;
+using ::aidl::android::hardware::tv::tuner::DemuxAlpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxAlpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterDownloadEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterIpPayloadEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMainType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMediaEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMediaEventExtraMetaData;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMmtpRecordEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterMonitorEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterPesEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterScIndexMask;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionBits;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettingsCondition;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettingsConditionTableInfo;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSubType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterTemiEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterTsRecordEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddress;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddressIpAddress;
+using ::aidl::android::hardware::tv::tuner::DemuxIpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxIpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterType;
+using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::aidl::android::hardware::tv::tuner::DemuxScIndex;
+using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterSettingsFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxTsFilterType;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::aidl::android::hardware::tv::tuner::ScramblingStatus;
+using ::android::dupToAidl;
+using ::android::IPCThreadState;
+using ::android::makeFromAidl;
+using ::android::unsafeHidlToAidlMQDescriptor;
+using ::android::hardware::hidl_handle;
+
+using HidlDemuxAlpLengthType = ::android::hardware::tv::tuner::V1_0::DemuxAlpLengthType;
+using HidlDemuxFilterMainType = ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
+using HidlDemuxIpAddress = ::android::hardware::tv::tuner::V1_0::DemuxIpAddress;
+using HidlDemuxMmtpFilterType = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
+using HidlDemuxMmtpPid = ::android::hardware::tv::tuner::V1_0::DemuxMmtpPid;
+using HidlDemuxRecordScIndexType = ::android::hardware::tv::tuner::V1_0::DemuxRecordScIndexType;
+using HidlDemuxStreamId = ::android::hardware::tv::tuner::V1_0::DemuxStreamId;
+using HidlDemuxTsFilterType = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlAudioStreamType = ::android::hardware::tv::tuner::V1_1::AudioStreamType;
+using HidlConstant = ::android::hardware::tv::tuner::V1_1::Constant;
+using HidlVideoStreamType = ::android::hardware::tv::tuner::V1_1::VideoStreamType;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlFilter::TunerHidlFilter(sp<HidlIFilter> filter, sp<FilterCallback> cb,
+ DemuxFilterType type)
+ : mFilter(filter),
+ mType(type),
+ mStarted(false),
+ mShared(false),
+ mClientPid(-1),
+ mFilterCallback(cb) {
+ mFilter_1_1 = ::android::hardware::tv::tuner::V1_1::IFilter::castFrom(filter);
+}
+
+TunerHidlFilter::~TunerHidlFilter() {
+ Mutex::Autolock _l(mLock);
+ mFilter = nullptr;
+ mFilter_1_1 = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+ }
+
+ MQDesc filterMQDesc;
+ HidlResult res;
+ mFilter->getQueueDesc([&](HidlResult r, const MQDesc& desc) {
+ filterMQDesc = desc;
+ res = r;
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ AidlMQDesc aidlMQDesc;
+ unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(filterMQDesc, &aidlMQDesc);
+ *_aidl_return = move(aidlMQDesc);
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getId(int32_t* _aidl_return) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlResult res;
+ mFilter->getId([&](HidlResult r, uint32_t filterId) {
+ res = r;
+ mId = filterId;
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ *_aidl_return = mId;
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getId64Bit(int64_t* _aidl_return) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlResult res;
+ mFilter_1_1->getId64Bit([&](HidlResult r, uint64_t filterId) {
+ res = r;
+ mId64Bit = filterId;
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ *_aidl_return = mId64Bit;
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configure(const DemuxFilterSettings& in_settings) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlDemuxFilterSettings settings;
+ switch (in_settings.getTag()) {
+ case DemuxFilterSettings::ts: {
+ getHidlTsSettings(in_settings, settings);
+ break;
+ }
+ case DemuxFilterSettings::mmtp: {
+ getHidlMmtpSettings(in_settings, settings);
+ break;
+ }
+ case DemuxFilterSettings::ip: {
+ getHidlIpSettings(in_settings, settings);
+ break;
+ }
+ case DemuxFilterSettings::tlv: {
+ getHidlTlvSettings(in_settings, settings);
+ break;
+ }
+ case DemuxFilterSettings::alp: {
+ getHidlAlpSettings(in_settings, settings);
+ break;
+ }
+ }
+
+ HidlResult res = mFilter->configure(settings);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureMonitorEvent(int32_t monitorEventType) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlResult res = mFilter_1_1->configureMonitorEvent(monitorEventType);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureIpFilterContextId(int32_t cid) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlResult res = mFilter_1_1->configureIpCid(cid);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::configureAvStreamType(const AvStreamType& in_avStreamType) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlAvStreamType type;
+ if (!getHidlAvStreamType(in_avStreamType, type)) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlResult res = mFilter_1_1->configureAvStreamType(type);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (filter == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ sp<HidlIFilter> hidlFilter = static_cast<TunerHidlFilter*>(filter.get())->getHalFilter();
+ HidlResult res = mFilter->setDataSource(hidlFilter);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getAvSharedHandle(NativeHandle* out_avMemory,
+ int64_t* _aidl_return) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlResult res;
+ mFilter_1_1->getAvSharedHandle([&](HidlResult r, hidl_handle avMemory, uint64_t avMemSize) {
+ res = r;
+ if (res == HidlResult::SUCCESS) {
+ *out_avMemory = dupToAidl(avMemory);
+ *_aidl_return = static_cast<int64_t>(avMemSize);
+ }
+ });
+
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::releaseAvHandle(const NativeHandle& in_handle,
+ int64_t in_avDataId) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ ALOGD("%s is called on a shared filter", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ HidlResult res = mFilter->releaseAvHandle(hidl_handle(makeFromAidl(in_handle)), in_avDataId);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ // Call to HAL to make sure the transport FD was able to be closed by binder.
+ // This is a tricky workaround for a problem in Binder.
+ // TODO:[b/192048842] When that problem is fixed we may be able to remove or change this code.
+ mFilter->getId([&](HidlResult /* r */, uint32_t /* filterId*/){});
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::start() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+ }
+
+ HidlResult res = mFilter->start();
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ mStarted = true;
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::stop() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+ }
+
+ HidlResult res = mFilter->stop();
+ mStarted = false;
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::flush() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ ALOGD("%s is called in wrong process", __FUNCTION__);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+ }
+
+ HidlResult res = mFilter->flush();
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::close() {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared) {
+ IPCThreadState* ipc = IPCThreadState::self();
+ int32_t callingPid = ipc->getCallingPid();
+ if (callingPid == mClientPid) {
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+ mFilterCallback->detachSharedFilterCallback();
+ }
+ TunerHidlService::getTunerService()->removeSharedFilter(this->ref<TunerHidlFilter>());
+ } else {
+ // Calling from shared process, do not really close this filter.
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->detachSharedFilterCallback();
+ }
+ mStarted = false;
+ return ::ndk::ScopedAStatus::ok();
+ }
+ }
+
+ HidlResult res = mFilter->close();
+ mFilter = nullptr;
+ mFilter_1_1 = nullptr;
+ mStarted = false;
+ mShared = false;
+ mClientPid = -1;
+
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::acquireSharedFilterToken(string* _aidl_return) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (mShared || mStarted) {
+ ALOGD("create SharedFilter in wrong state");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ IPCThreadState* ipc = IPCThreadState::self();
+ mClientPid = ipc->getCallingPid();
+ string token =
+ TunerHidlService::getTunerService()->addFilterToShared(this->ref<TunerHidlFilter>());
+ _aidl_return->assign(token);
+ mShared = true;
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
+ Mutex::Autolock _l(mLock);
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (!mShared) {
+ // The filter is not shared or the shared filter has been closed.
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->sendSharedFilterStatus(STATUS_INACCESSIBLE);
+ mFilterCallback->detachSharedFilterCallback();
+ }
+
+ TunerHidlService::getTunerService()->removeSharedFilter(this->ref<TunerHidlFilter>());
+ mShared = false;
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::getFilterType(DemuxFilterType* _aidl_return) {
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ *_aidl_return = mType;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFilter::setDelayHint(const FilterDelayHint&) {
+ // setDelayHint is not supported in HIDL HAL
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
+bool TunerHidlFilter::isSharedFilterAllowed(int callingPid) {
+ return mShared && mClientPid != callingPid;
+}
+
+void TunerHidlFilter::attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb) {
+ if (mFilterCallback != nullptr) {
+ mFilterCallback->attachSharedFilterCallback(in_cb);
+ }
+}
+
+sp<HidlIFilter> TunerHidlFilter::getHalFilter() {
+ return mFilter;
+}
+
+bool TunerHidlFilter::getHidlAvStreamType(const AvStreamType avStreamType, HidlAvStreamType& type) {
+ if (isAudioFilter()) {
+ type.audio(static_cast<HidlAudioStreamType>(avStreamType.get<AvStreamType::audio>()));
+ return true;
+ }
+
+ if (isVideoFilter()) {
+ type.video(static_cast<HidlVideoStreamType>(avStreamType.get<AvStreamType::video>()));
+ return true;
+ }
+
+ return false;
+}
+
+bool TunerHidlFilter::isAudioFilter() {
+ return (mType.mainType == DemuxFilterMainType::TS &&
+ mType.subType.get<DemuxFilterSubType::tsFilterType>() == DemuxTsFilterType::AUDIO) ||
+ (mType.mainType == DemuxFilterMainType::MMTP &&
+ mType.subType.get<DemuxFilterSubType::mmtpFilterType>() == DemuxMmtpFilterType::AUDIO);
+}
+
+bool TunerHidlFilter::isVideoFilter() {
+ return (mType.mainType == DemuxFilterMainType::TS &&
+ mType.subType.get<DemuxFilterSubType::tsFilterType>() == DemuxTsFilterType::VIDEO) ||
+ (mType.mainType == DemuxFilterMainType::MMTP &&
+ mType.subType.get<DemuxFilterSubType::mmtpFilterType>() == DemuxMmtpFilterType::VIDEO);
+}
+
+void TunerHidlFilter::getHidlTsSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings) {
+ const DemuxTsFilterSettings& tsConf = settings.get<DemuxFilterSettings::ts>();
+ HidlDemuxTsFilterSettings ts{
+ .tpid = static_cast<uint16_t>(tsConf.tpid),
+ };
+
+ switch (tsConf.filterSettings.getTag()) {
+ case DemuxTsFilterSettingsFilterSettings::av: {
+ ts.filterSettings.av(getHidlAvSettings(
+ tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::av>()));
+ break;
+ }
+ case DemuxTsFilterSettingsFilterSettings::section: {
+ ts.filterSettings.section(getHidlSectionSettings(
+ tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::section>()));
+ break;
+ }
+ case DemuxTsFilterSettingsFilterSettings::pesData: {
+ ts.filterSettings.pesData(getHidlPesDataSettings(
+ tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::pesData>()));
+ break;
+ }
+ case DemuxTsFilterSettingsFilterSettings::record: {
+ ts.filterSettings.record(getHidlRecordSettings(
+ tsConf.filterSettings.get<DemuxTsFilterSettingsFilterSettings::record>()));
+ break;
+ }
+ default: {
+ ts.filterSettings.noinit();
+ break;
+ }
+ }
+ hidlSettings.ts(ts);
+}
+
+void TunerHidlFilter::getHidlMmtpSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings) {
+ const DemuxMmtpFilterSettings& mmtpConf = settings.get<DemuxFilterSettings::mmtp>();
+ HidlDemuxMmtpFilterSettings mmtp{
+ .mmtpPid = static_cast<HidlDemuxMmtpPid>(mmtpConf.mmtpPid),
+ };
+
+ switch (mmtpConf.filterSettings.getTag()) {
+ case DemuxMmtpFilterSettingsFilterSettings::av: {
+ mmtp.filterSettings.av(getHidlAvSettings(
+ mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::av>()));
+ break;
+ }
+ case DemuxMmtpFilterSettingsFilterSettings::section: {
+ mmtp.filterSettings.section(getHidlSectionSettings(
+ mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::section>()));
+ break;
+ }
+ case DemuxMmtpFilterSettingsFilterSettings::pesData: {
+ mmtp.filterSettings.pesData(getHidlPesDataSettings(
+ mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::pesData>()));
+ break;
+ }
+ case DemuxMmtpFilterSettingsFilterSettings::record: {
+ mmtp.filterSettings.record(getHidlRecordSettings(
+ mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::record>()));
+ break;
+ }
+ case DemuxMmtpFilterSettingsFilterSettings::download: {
+ mmtp.filterSettings.download(getHidlDownloadSettings(
+ mmtpConf.filterSettings.get<DemuxMmtpFilterSettingsFilterSettings::download>()));
+ break;
+ }
+ default: {
+ mmtp.filterSettings.noinit();
+ break;
+ }
+ }
+ hidlSettings.mmtp(mmtp);
+}
+
+void TunerHidlFilter::getHidlIpSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings) {
+ const DemuxIpFilterSettings& ipConf = settings.get<DemuxFilterSettings::ip>();
+ HidlDemuxIpAddress ipAddr{
+ .srcPort = static_cast<uint16_t>(ipConf.ipAddr.srcPort),
+ .dstPort = static_cast<uint16_t>(ipConf.ipAddr.dstPort),
+ };
+
+ ipConf.ipAddr.srcIpAddress.getTag() == DemuxIpAddressIpAddress::v6
+ ? ipAddr.srcIpAddress.v6(getIpV6Address(ipConf.ipAddr.srcIpAddress))
+ : ipAddr.srcIpAddress.v4(getIpV4Address(ipConf.ipAddr.srcIpAddress));
+ ipConf.ipAddr.dstIpAddress.getTag() == DemuxIpAddressIpAddress::v6
+ ? ipAddr.dstIpAddress.v6(getIpV6Address(ipConf.ipAddr.dstIpAddress))
+ : ipAddr.dstIpAddress.v4(getIpV4Address(ipConf.ipAddr.dstIpAddress));
+
+ HidlDemuxIpFilterSettings ip;
+ ip.ipAddr = ipAddr;
+
+ switch (ipConf.filterSettings.getTag()) {
+ case DemuxIpFilterSettingsFilterSettings::section: {
+ ip.filterSettings.section(getHidlSectionSettings(
+ ipConf.filterSettings.get<DemuxIpFilterSettingsFilterSettings::section>()));
+ break;
+ }
+ case DemuxIpFilterSettingsFilterSettings::bPassthrough: {
+ ip.filterSettings.bPassthrough(
+ ipConf.filterSettings.get<DemuxIpFilterSettingsFilterSettings::bPassthrough>());
+ break;
+ }
+ default: {
+ ip.filterSettings.noinit();
+ break;
+ }
+ }
+ hidlSettings.ip(ip);
+}
+
+hidl_array<uint8_t, IP_V6_LENGTH> TunerHidlFilter::getIpV6Address(
+ const DemuxIpAddressIpAddress& addr) {
+ hidl_array<uint8_t, IP_V6_LENGTH> ip;
+ if (addr.get<DemuxIpAddressIpAddress::v6>().size() != IP_V6_LENGTH) {
+ return ip;
+ }
+ copy(addr.get<DemuxIpAddressIpAddress::v6>().begin(),
+ addr.get<DemuxIpAddressIpAddress::v6>().end(), ip.data());
+ return ip;
+}
+
+hidl_array<uint8_t, IP_V4_LENGTH> TunerHidlFilter::getIpV4Address(
+ const DemuxIpAddressIpAddress& addr) {
+ hidl_array<uint8_t, IP_V4_LENGTH> ip;
+ if (addr.get<DemuxIpAddressIpAddress::v4>().size() != IP_V4_LENGTH) {
+ return ip;
+ }
+ copy(addr.get<DemuxIpAddressIpAddress::v4>().begin(),
+ addr.get<DemuxIpAddressIpAddress::v4>().end(), ip.data());
+ return ip;
+}
+
+void TunerHidlFilter::getHidlTlvSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings) {
+ const DemuxTlvFilterSettings& tlvConf = settings.get<DemuxFilterSettings::tlv>();
+ HidlDemuxTlvFilterSettings tlv{
+ .packetType = static_cast<uint8_t>(tlvConf.packetType),
+ .isCompressedIpPacket = tlvConf.isCompressedIpPacket,
+ };
+
+ switch (tlvConf.filterSettings.getTag()) {
+ case DemuxTlvFilterSettingsFilterSettings::section: {
+ tlv.filterSettings.section(getHidlSectionSettings(
+ tlvConf.filterSettings.get<DemuxTlvFilterSettingsFilterSettings::section>()));
+ break;
+ }
+ case DemuxTlvFilterSettingsFilterSettings::bPassthrough: {
+ tlv.filterSettings.bPassthrough(
+ tlvConf.filterSettings.get<DemuxTlvFilterSettingsFilterSettings::bPassthrough>());
+ break;
+ }
+ default: {
+ tlv.filterSettings.noinit();
+ break;
+ }
+ }
+ hidlSettings.tlv(tlv);
+}
+
+void TunerHidlFilter::getHidlAlpSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings) {
+ const DemuxAlpFilterSettings& alpConf = settings.get<DemuxFilterSettings::alp>();
+ HidlDemuxAlpFilterSettings alp{
+ .packetType = static_cast<uint8_t>(alpConf.packetType),
+ .lengthType = static_cast<HidlDemuxAlpLengthType>(alpConf.lengthType),
+ };
+
+ switch (alpConf.filterSettings.getTag()) {
+ case DemuxAlpFilterSettingsFilterSettings::section: {
+ alp.filterSettings.section(getHidlSectionSettings(
+ alpConf.filterSettings.get<DemuxAlpFilterSettingsFilterSettings::section>()));
+ break;
+ }
+ default: {
+ alp.filterSettings.noinit();
+ break;
+ }
+ }
+ hidlSettings.alp(alp);
+}
+
+HidlDemuxFilterAvSettings TunerHidlFilter::getHidlAvSettings(
+ const DemuxFilterAvSettings& settings) {
+ HidlDemuxFilterAvSettings av{
+ .isPassthrough = settings.isPassthrough,
+ };
+ return av;
+}
+
+HidlDemuxFilterSectionSettings TunerHidlFilter::getHidlSectionSettings(
+ const DemuxFilterSectionSettings& settings) {
+ HidlDemuxFilterSectionSettings section{
+ .isCheckCrc = settings.isCheckCrc,
+ .isRepeat = settings.isRepeat,
+ .isRaw = settings.isRaw,
+ };
+
+ switch (settings.condition.getTag()) {
+ case DemuxFilterSectionSettingsCondition::sectionBits: {
+ const DemuxFilterSectionBits& sectionBits =
+ settings.condition.get<DemuxFilterSectionSettingsCondition::sectionBits>();
+ vector<uint8_t> filter(sectionBits.filter.begin(), sectionBits.filter.end());
+ vector<uint8_t> mask(sectionBits.mask.begin(), sectionBits.mask.end());
+ vector<uint8_t> mode(sectionBits.mode.begin(), sectionBits.mode.end());
+ section.condition.sectionBits({
+ .filter = filter,
+ .mask = mask,
+ .mode = mode,
+ });
+ break;
+ }
+ case DemuxFilterSectionSettingsCondition::tableInfo: {
+ const DemuxFilterSectionSettingsConditionTableInfo& tableInfo =
+ settings.condition.get<DemuxFilterSectionSettingsCondition::tableInfo>();
+ section.condition.tableInfo({
+ .tableId = static_cast<uint16_t>(tableInfo.tableId),
+ .version = static_cast<uint16_t>(tableInfo.version),
+ });
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ return section;
+}
+
+HidlDemuxFilterPesDataSettings TunerHidlFilter::getHidlPesDataSettings(
+ const DemuxFilterPesDataSettings& settings) {
+ HidlDemuxFilterPesDataSettings pes{
+ .streamId = static_cast<HidlDemuxStreamId>(settings.streamId),
+ .isRaw = settings.isRaw,
+ };
+ return pes;
+}
+
+HidlDemuxFilterRecordSettings TunerHidlFilter::getHidlRecordSettings(
+ const DemuxFilterRecordSettings& settings) {
+ HidlDemuxFilterRecordSettings record{
+ .tsIndexMask = static_cast<uint32_t>(settings.tsIndexMask),
+ };
+
+ switch (settings.scIndexMask.getTag()) {
+ case DemuxFilterScIndexMask::scIndex: {
+ record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
+ record.scIndexMask.sc(
+ static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scIndex>()));
+ break;
+ }
+ case DemuxFilterScIndexMask::scAvc: {
+ record.scIndexType = HidlDemuxRecordScIndexType::SC;
+ uint32_t index =
+ static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scAvc>());
+ // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+ index = index << 4;
+ record.scIndexMask.sc(index);
+ break;
+ }
+ case DemuxFilterScIndexMask::scHevc: {
+ record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
+ record.scIndexMask.scHevc(
+ static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scHevc>()));
+ break;
+ }
+ }
+ return record;
+}
+
+HidlDemuxFilterDownloadSettings TunerHidlFilter::getHidlDownloadSettings(
+ const DemuxFilterDownloadSettings& settings) {
+ HidlDemuxFilterDownloadSettings download{
+ .downloadId = static_cast<uint32_t>(settings.downloadId),
+ };
+ return download;
+}
+
+/////////////// FilterCallback ///////////////////////
+Return<void> TunerHidlFilter::FilterCallback::onFilterStatus(HidlDemuxFilterStatus status) {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr) {
+ mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
+ }
+ return Void();
+}
+
+Return<void> TunerHidlFilter::FilterCallback::onFilterEvent(
+ const HidlDemuxFilterEvent& filterEvent) {
+ vector<HidlDemuxFilterEventExt::Event> emptyEventsExt;
+ HidlDemuxFilterEventExt emptyFilterEventExt{
+ .events = emptyEventsExt,
+ };
+ onFilterEvent_1_1(filterEvent, emptyFilterEventExt);
+ return Void();
+}
+
+Return<void> TunerHidlFilter::FilterCallback::onFilterEvent_1_1(
+ const HidlDemuxFilterEvent& filterEvent, const HidlDemuxFilterEventExt& filterEventExt) {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr) {
+ vector<HidlDemuxFilterEvent::Event> events = filterEvent.events;
+ vector<HidlDemuxFilterEventExt::Event> eventsExt = filterEventExt.events;
+ vector<DemuxFilterEvent> tunerEvents;
+
+ getAidlFilterEvent(events, eventsExt, tunerEvents);
+ mTunerFilterCallback->onFilterEvent(tunerEvents);
+ }
+ return Void();
+}
+
+void TunerHidlFilter::FilterCallback::sendSharedFilterStatus(int32_t status) {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+ mTunerFilterCallback->onFilterStatus(static_cast<DemuxFilterStatus>(status));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::attachSharedFilterCallback(
+ const shared_ptr<ITunerFilterCallback>& in_cb) {
+ Mutex::Autolock _l(mCallbackLock);
+ mOriginalCallback = mTunerFilterCallback;
+ mTunerFilterCallback = in_cb;
+}
+
+void TunerHidlFilter::FilterCallback::detachSharedFilterCallback() {
+ Mutex::Autolock _l(mCallbackLock);
+ if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+ mTunerFilterCallback = mOriginalCallback;
+ mOriginalCallback = nullptr;
+ }
+}
+
+/////////////// FilterCallback Helper Methods ///////////////////////
+void TunerHidlFilter::FilterCallback::getAidlFilterEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events,
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+ vector<DemuxFilterEvent>& aidlEvents) {
+ if (events.empty() && !eventsExt.empty()) {
+ switch (eventsExt[0].getDiscriminator()) {
+ case HidlDemuxFilterEventExt::Event::hidl_discriminator::monitorEvent: {
+ getMonitorEvent(eventsExt, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEventExt::Event::hidl_discriminator::startId: {
+ getRestartEvent(eventsExt, aidlEvents);
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+
+ if (!events.empty()) {
+ switch (events[0].getDiscriminator()) {
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::media: {
+ getMediaEvent(events, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::section: {
+ getSectionEvent(events, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::pes: {
+ getPesEvent(events, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::tsRecord: {
+ getTsRecordEvent(events, eventsExt, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::mmtpRecord: {
+ getMmtpRecordEvent(events, eventsExt, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::download: {
+ getDownloadEvent(events, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::ipPayload: {
+ getIpPayloadEvent(events, aidlEvents);
+ break;
+ }
+ case HidlDemuxFilterEvent::Event::hidl_discriminator::temi: {
+ getTemiEvent(events, aidlEvents);
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getMediaEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ const HidlDemuxFilterMediaEvent& mediaEvent = events[i].media();
+ DemuxFilterMediaEvent media;
+
+ media.streamId = static_cast<int32_t>(mediaEvent.streamId);
+ media.isPtsPresent = mediaEvent.isPtsPresent;
+ media.pts = static_cast<int64_t>(mediaEvent.pts);
+ media.dataLength = static_cast<int64_t>(mediaEvent.dataLength);
+ media.offset = static_cast<int64_t>(mediaEvent.offset);
+ media.isSecureMemory = mediaEvent.isSecureMemory;
+ media.avDataId = static_cast<int64_t>(mediaEvent.avDataId);
+ media.mpuSequenceNumber = static_cast<int32_t>(mediaEvent.mpuSequenceNumber);
+ media.isPesPrivateData = mediaEvent.isPesPrivateData;
+
+ if (mediaEvent.extraMetaData.getDiscriminator() ==
+ HidlDemuxFilterMediaEvent::ExtraMetaData::hidl_discriminator::audio) {
+ AudioExtraMetaData audio;
+ audio.adFade = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adFade);
+ audio.adPan = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adPan);
+ audio.versionTextTag =
+ static_cast<int16_t>(mediaEvent.extraMetaData.audio().versionTextTag);
+ audio.adGainCenter = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainCenter);
+ audio.adGainFront = static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainFront);
+ audio.adGainSurround =
+ static_cast<int8_t>(mediaEvent.extraMetaData.audio().adGainSurround);
+ media.extraMetaData.set<DemuxFilterMediaEventExtraMetaData::audio>(audio);
+ } else {
+ media.extraMetaData.set<DemuxFilterMediaEventExtraMetaData::noinit>(true);
+ }
+
+ if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
+ media.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
+ }
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::media>(move(media));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getSectionEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ const HidlDemuxFilterSectionEvent& sectionEvent = events[i].section();
+ DemuxFilterSectionEvent section;
+
+ section.tableId = static_cast<int32_t>(sectionEvent.tableId);
+ section.version = static_cast<int32_t>(sectionEvent.version);
+ section.sectionNum = static_cast<int32_t>(sectionEvent.sectionNum);
+ section.dataLength = static_cast<int32_t>(sectionEvent.dataLength);
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::section>(move(section));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getPesEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ const HidlDemuxFilterPesEvent& pesEvent = events[i].pes();
+ DemuxFilterPesEvent pes;
+
+ pes.streamId = static_cast<int32_t>(pesEvent.streamId);
+ pes.dataLength = static_cast<int32_t>(pesEvent.dataLength);
+ pes.mpuSequenceNumber = static_cast<int32_t>(pesEvent.mpuSequenceNumber);
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::pes>(move(pes));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getTsRecordEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events,
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ DemuxFilterTsRecordEvent tsRecord;
+ const HidlDemuxFilterTsRecordEvent& tsRecordEvent = events[i].tsRecord();
+
+ DemuxFilterScIndexMask scIndexMask;
+ if (tsRecordEvent.scIndexMask.getDiscriminator() ==
+ HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
+ int32_t hidlScIndex = static_cast<int32_t>(tsRecordEvent.scIndexMask.sc());
+ if (hidlScIndex <= static_cast<int32_t>(DemuxScIndex::SEQUENCE)) {
+ scIndexMask.set<DemuxFilterScIndexMask::scIndex>(hidlScIndex);
+ } else {
+ // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+ scIndexMask.set<DemuxFilterScIndexMask::scAvc>(hidlScIndex >> 4);
+ }
+ } else if (tsRecordEvent.scIndexMask.getDiscriminator() ==
+ HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
+ scIndexMask.set<DemuxFilterScIndexMask::scHevc>(
+ static_cast<int32_t>(tsRecordEvent.scIndexMask.scHevc()));
+ }
+
+ if (tsRecordEvent.pid.getDiscriminator() == HidlDemuxPid::hidl_discriminator::tPid) {
+ DemuxPid pid;
+ pid.set<DemuxPid::tPid>(static_cast<int32_t>(tsRecordEvent.pid.tPid()));
+ tsRecord.pid = pid;
+ } else {
+ DemuxPid pid;
+ pid.set<DemuxPid::tPid>(static_cast<int32_t>(Constant::INVALID_TS_PID));
+ tsRecord.pid = pid;
+ }
+
+ tsRecord.scIndexMask = scIndexMask;
+ tsRecord.tsIndexMask = static_cast<int32_t>(tsRecordEvent.tsIndexMask);
+ tsRecord.byteNumber = static_cast<int64_t>(tsRecordEvent.byteNumber);
+
+ if (eventsExt.size() > i &&
+ eventsExt[i].getDiscriminator() ==
+ HidlDemuxFilterEventExt::Event::hidl_discriminator::tsRecord) {
+ tsRecord.pts = static_cast<int64_t>(eventsExt[i].tsRecord().pts);
+ tsRecord.firstMbInSlice = static_cast<int32_t>(eventsExt[i].tsRecord().firstMbInSlice);
+ }
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::tsRecord>(move(tsRecord));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getMmtpRecordEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events,
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ DemuxFilterMmtpRecordEvent mmtpRecord;
+ const HidlDemuxFilterMmtpRecordEvent& mmtpRecordEvent = events[i].mmtpRecord();
+
+ mmtpRecord.scHevcIndexMask = static_cast<int32_t>(mmtpRecordEvent.scHevcIndexMask);
+ mmtpRecord.byteNumber = static_cast<int64_t>(mmtpRecordEvent.byteNumber);
+
+ if (eventsExt.size() > i &&
+ eventsExt[i].getDiscriminator() ==
+ HidlDemuxFilterEventExt::Event::hidl_discriminator::mmtpRecord) {
+ mmtpRecord.pts = static_cast<int64_t>(eventsExt[i].mmtpRecord().pts);
+ mmtpRecord.mpuSequenceNumber =
+ static_cast<int32_t>(eventsExt[i].mmtpRecord().mpuSequenceNumber);
+ mmtpRecord.firstMbInSlice =
+ static_cast<int32_t>(eventsExt[i].mmtpRecord().firstMbInSlice);
+ mmtpRecord.tsIndexMask = static_cast<int32_t>(eventsExt[i].mmtpRecord().tsIndexMask);
+ }
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::mmtpRecord>(move(mmtpRecord));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getDownloadEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ const HidlDemuxFilterDownloadEvent& downloadEvent = events[i].download();
+ DemuxFilterDownloadEvent download;
+
+ download.itemId = static_cast<int32_t>(downloadEvent.itemId);
+ download.itemFragmentIndex = static_cast<int32_t>(downloadEvent.itemFragmentIndex);
+ download.mpuSequenceNumber = static_cast<int32_t>(downloadEvent.mpuSequenceNumber);
+ download.lastItemFragmentIndex = static_cast<int32_t>(downloadEvent.lastItemFragmentIndex);
+ download.dataLength = static_cast<int32_t>(downloadEvent.dataLength);
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::download>(move(download));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getIpPayloadEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ const HidlDemuxFilterIpPayloadEvent& ipPayloadEvent = events[i].ipPayload();
+ DemuxFilterIpPayloadEvent ipPayload;
+
+ ipPayload.dataLength = static_cast<int32_t>(ipPayloadEvent.dataLength);
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::ipPayload>(move(ipPayload));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getTemiEvent(
+ const vector<HidlDemuxFilterEvent::Event>& events, vector<DemuxFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ const HidlDemuxFilterTemiEvent& temiEvent = events[i].temi();
+ DemuxFilterTemiEvent temi;
+
+ temi.pts = static_cast<int64_t>(temiEvent.pts);
+ temi.descrTag = static_cast<int8_t>(temiEvent.descrTag);
+ vector<uint8_t> descrData = temiEvent.descrData;
+ temi.descrData.resize(descrData.size());
+ copy(descrData.begin(), descrData.end(), temi.descrData.begin());
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::temi>(move(temi));
+ res.push_back(move(filterEvent));
+ }
+}
+
+void TunerHidlFilter::FilterCallback::getMonitorEvent(
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+ HidlDemuxFilterMonitorEvent monitorEvent = eventsExt[0].monitorEvent();
+ DemuxFilterMonitorEvent monitor;
+
+ switch (monitorEvent.getDiscriminator()) {
+ case HidlDemuxFilterMonitorEvent::hidl_discriminator::scramblingStatus: {
+ monitor.set<DemuxFilterMonitorEvent::scramblingStatus>(
+ static_cast<ScramblingStatus>(monitorEvent.scramblingStatus()));
+ break;
+ }
+ case HidlDemuxFilterMonitorEvent::hidl_discriminator::cid: {
+ monitor.set<DemuxFilterMonitorEvent::cid>(static_cast<int32_t>(monitorEvent.cid()));
+ break;
+ }
+ }
+
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::monitorEvent>(move(monitor));
+ res.push_back(move(filterEvent));
+}
+
+void TunerHidlFilter::FilterCallback::getRestartEvent(
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
+ DemuxFilterEvent filterEvent;
+ filterEvent.set<DemuxFilterEvent::startId>(static_cast<int32_t>(eventsExt[0].startId()));
+ res.push_back(move(filterEvent));
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlFilter.h b/services/tuner/hidl/TunerHidlFilter.h
new file mode 100644
index 0000000..b8fad22
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFilter.h
@@ -0,0 +1,240 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLFILTER_H
+#define ANDROID_MEDIA_TUNERHIDLFILTER_H
+
+#include <aidl/android/hardware/tv/tuner/AvStreamType.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterAvSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterDownloadSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterPesDataSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterRecordSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSectionSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterSettings.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterType.h>
+#include <aidl/android/media/tv/tuner/BnTunerFilter.h>
+#include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <android/hardware/tv/tuner/1.1/IFilter.h>
+#include <android/hardware/tv/tuner/1.1/IFilterCallback.h>
+#include <android/hardware/tv/tuner/1.1/types.h>
+#include <fmq/MessageQueue.h>
+#include <utils/Mutex.h>
+
+#include <map>
+
+using ::aidl::android::hardware::common::NativeHandle;
+using ::aidl::android::hardware::common::fmq::MQDescriptor;
+using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+using ::aidl::android::hardware::tv::tuner::AvStreamType;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterAvSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterDownloadSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterPesDataSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterRecordSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSectionSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterSettings;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::DemuxIpAddressIpAddress;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
+using ::aidl::android::media::tv::tuner::BnTunerFilter;
+using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
+using ::android::Mutex;
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::string;
+using ::std::vector;
+
+using HidlAvStreamType = ::android::hardware::tv::tuner::V1_1::AvStreamType;
+using HidlDemuxAlpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterSettings;
+using HidlDemuxFilterAvSettings = ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
+using HidlDemuxFilterDownloadEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadEvent;
+using HidlDemuxFilterDownloadSettings =
+ ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadSettings;
+using HidlDemuxFilterIpPayloadEvent =
+ ::android::hardware::tv::tuner::V1_0::DemuxFilterIpPayloadEvent;
+using HidlDemuxFilterEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
+using HidlDemuxFilterMediaEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterMediaEvent;
+using HidlDemuxFilterMmtpRecordEvent =
+ ::android::hardware::tv::tuner::V1_0::DemuxFilterMmtpRecordEvent;
+using HidlDemuxFilterPesDataSettings =
+ ::android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
+using HidlDemuxFilterPesEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
+using HidlDemuxFilterRecordSettings =
+ ::android::hardware::tv::tuner::V1_0::DemuxFilterRecordSettings;
+using HidlDemuxFilterSectionEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
+using HidlDemuxFilterSectionSettings =
+ ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
+using HidlDemuxFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
+using HidlDemuxFilterStatus = ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
+using HidlDemuxFilterTemiEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterTemiEvent;
+using HidlDemuxFilterTsRecordEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterTsRecordEvent;
+using HidlDemuxIpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxIpFilterSettings;
+using HidlDemuxMmtpFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterSettings;
+using HidlDemuxTlvFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterSettings;
+using HidlDemuxTsFilterSettings = ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
+using HidlDemuxPid = ::android::hardware::tv::tuner::V1_0::DemuxPid;
+using HidlIFilter = ::android::hardware::tv::tuner::V1_0::IFilter;
+using HidlDvStreamType = ::android::hardware::tv::tuner::V1_1::AvStreamType;
+using HidlDemuxFilterEventExt = ::android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
+using HidlDemuxFilterMonitorEvent = ::android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
+using HidlDemuxFilterTsRecordEventExt =
+ ::android::hardware::tv::tuner::V1_1::DemuxFilterTsRecordEventExt;
+using HidlIFilterCallback = ::android::hardware::tv::tuner::V1_1::IFilterCallback;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+using MQDesc = MQDescriptorSync<uint8_t>;
+using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
+
+const static int IP_V4_LENGTH = 4;
+const static int IP_V6_LENGTH = 16;
+
+class TunerHidlFilter : public BnTunerFilter {
+public:
+ class FilterCallback : public HidlIFilterCallback {
+ public:
+ FilterCallback(const shared_ptr<ITunerFilterCallback> tunerFilterCallback)
+ : mTunerFilterCallback(tunerFilterCallback){};
+
+ virtual Return<void> onFilterEvent(const HidlDemuxFilterEvent& filterEvent);
+ virtual Return<void> onFilterEvent_1_1(const HidlDemuxFilterEvent& filterEvent,
+ const HidlDemuxFilterEventExt& filterEventExt);
+ virtual Return<void> onFilterStatus(HidlDemuxFilterStatus status);
+
+ void sendSharedFilterStatus(int32_t status);
+ void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+ void detachSharedFilterCallback();
+
+ private:
+ void getAidlFilterEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+ vector<DemuxFilterEvent>& aidlEvents);
+
+ void getMediaEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEvent>& res);
+ void getSectionEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEvent>& res);
+ void getPesEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEvent>& res);
+ void getTsRecordEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+ vector<DemuxFilterEvent>& res);
+ void getMmtpRecordEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+ vector<DemuxFilterEvent>& res);
+ void getDownloadEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEvent>& res);
+ void getIpPayloadEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEvent>& res);
+ void getTemiEvent(const vector<HidlDemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEvent>& res);
+ void getMonitorEvent(const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+ vector<DemuxFilterEvent>& res);
+ void getRestartEvent(const vector<HidlDemuxFilterEventExt::Event>& eventsExt,
+ vector<DemuxFilterEvent>& res);
+
+ private:
+ shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
+ shared_ptr<ITunerFilterCallback> mOriginalCallback;
+ Mutex mCallbackLock;
+ };
+
+ TunerHidlFilter(sp<HidlIFilter> filter, sp<FilterCallback> cb, DemuxFilterType type);
+ virtual ~TunerHidlFilter();
+
+ ::ndk::ScopedAStatus getId(int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getId64Bit(int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getQueueDesc(AidlMQDesc* _aidl_return) override;
+ ::ndk::ScopedAStatus configure(const DemuxFilterSettings& in_settings) override;
+ ::ndk::ScopedAStatus configureMonitorEvent(int32_t in_monitorEventTypes) override;
+ ::ndk::ScopedAStatus configureIpFilterContextId(int32_t in_cid) override;
+ ::ndk::ScopedAStatus configureAvStreamType(const AvStreamType& in_avStreamType) override;
+ ::ndk::ScopedAStatus getAvSharedHandle(NativeHandle* out_avMemory,
+ int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus releaseAvHandle(const NativeHandle& in_handle,
+ int64_t in_avDataId) override;
+ ::ndk::ScopedAStatus setDataSource(const shared_ptr<ITunerFilter>& in_filter) override;
+ ::ndk::ScopedAStatus start() override;
+ ::ndk::ScopedAStatus stop() override;
+ ::ndk::ScopedAStatus flush() override;
+ ::ndk::ScopedAStatus close() override;
+ ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+ ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
+ ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+ ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
+
+ bool isSharedFilterAllowed(int32_t pid);
+ void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
+ sp<HidlIFilter> getHalFilter();
+
+private:
+ bool isAudioFilter();
+ bool isVideoFilter();
+
+ HidlDemuxFilterAvSettings getHidlAvSettings(const DemuxFilterAvSettings& settings);
+ HidlDemuxFilterSectionSettings getHidlSectionSettings(
+ const DemuxFilterSectionSettings& settings);
+ HidlDemuxFilterPesDataSettings getHidlPesDataSettings(
+ const DemuxFilterPesDataSettings& settings);
+ HidlDemuxFilterRecordSettings getHidlRecordSettings(const DemuxFilterRecordSettings& settings);
+ HidlDemuxFilterDownloadSettings getHidlDownloadSettings(
+ const DemuxFilterDownloadSettings& settings);
+ bool getHidlAvStreamType(const AvStreamType avStreamType, HidlAvStreamType& type);
+ void getHidlTsSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings);
+ void getHidlMmtpSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings);
+ void getHidlIpSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings);
+ void getHidlTlvSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings);
+ void getHidlAlpSettings(const DemuxFilterSettings& settings,
+ HidlDemuxFilterSettings& hidlSettings);
+
+ hidl_array<uint8_t, IP_V4_LENGTH> getIpV4Address(const DemuxIpAddressIpAddress& addr);
+ hidl_array<uint8_t, IP_V6_LENGTH> getIpV6Address(const DemuxIpAddressIpAddress& addr);
+
+ sp<HidlIFilter> mFilter;
+ sp<::android::hardware::tv::tuner::V1_1::IFilter> mFilter_1_1;
+ int32_t mId;
+ int64_t mId64Bit;
+ DemuxFilterType mType;
+ bool mStarted;
+ bool mShared;
+ int32_t mClientPid;
+ sp<FilterCallback> mFilterCallback;
+ Mutex mLock;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLFILTER_H
diff --git a/services/tuner/hidl/TunerHidlFrontend.cpp b/services/tuner/hidl/TunerHidlFrontend.cpp
new file mode 100644
index 0000000..1f28406
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFrontend.cpp
@@ -0,0 +1,1208 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TunerHidlFrontend"
+
+#include "TunerHidlFrontend.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+#include "TunerHidlLnb.h"
+
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogSifStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogType;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Bandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3CodeRate;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Fec;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Modulation;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3PlpSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3TimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendCableTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcAnnex;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsRolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtConstellation;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtHierarchy;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::FrontendGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendInnerFec;
+using ::aidl::android::hardware::tv::tuner::FrontendInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Modulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Rolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsRolloff;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtBandwidth;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtCoderate;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtGuardInterval;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtMode;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendModulation;
+using ::aidl::android::hardware::tv::tuner::FrontendModulationStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendRollOff;
+using ::aidl::android::hardware::tv::tuner::FrontendScanAtsc3PlpInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageStandard;
+using ::aidl::android::hardware::tv::tuner::FrontendSpectralInversion;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusAtsc3PlpInfo;
+using ::aidl::android::hardware::tv::tuner::FrontendTransmissionMode;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlFrontendStatusAtsc3PlpInfo =
+ ::aidl::android::hardware::tv::tuner::FrontendStatusAtsc3PlpInfo;
+using HidlFrontendAnalogSifStandard =
+ ::android::hardware::tv::tuner::V1_0::FrontendAnalogSifStandard;
+using HidlFrontendAnalogType = ::android::hardware::tv::tuner::V1_0::FrontendAnalogType;
+using HidlFrontendAtscModulation = ::android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
+using HidlFrontendAtsc3Bandwidth = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Bandwidth;
+using HidlFrontendAtsc3CodeRate = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3CodeRate;
+using HidlFrontendAtsc3DemodOutputFormat =
+ ::android::hardware::tv::tuner::V1_0::FrontendAtsc3DemodOutputFormat;
+using HidlFrontendAtsc3Fec = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Fec;
+using HidlFrontendAtsc3Modulation = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3Modulation;
+using HidlFrontendAtsc3TimeInterleaveMode =
+ ::android::hardware::tv::tuner::V1_0::FrontendAtsc3TimeInterleaveMode;
+using HidlFrontendDvbcAnnex = ::android::hardware::tv::tuner::V1_0::FrontendDvbcAnnex;
+using HidlFrontendDvbcModulation = ::android::hardware::tv::tuner::V1_0::FrontendDvbcModulation;
+using HidlFrontendDvbcOuterFec = ::android::hardware::tv::tuner::V1_0::FrontendDvbcOuterFec;
+using HidlFrontendDvbcSpectralInversion =
+ ::android::hardware::tv::tuner::V1_0::FrontendDvbcSpectralInversion;
+using HidlFrontendDvbsModulation = ::android::hardware::tv::tuner::V1_0::FrontendDvbsModulation;
+using HidlFrontendDvbsPilot = ::android::hardware::tv::tuner::V1_0::FrontendDvbsPilot;
+using HidlFrontendDvbsRolloff = ::android::hardware::tv::tuner::V1_0::FrontendDvbsRolloff;
+using HidlFrontendDvbsSettings = ::android::hardware::tv::tuner::V1_0::FrontendDvbsSettings;
+using HidlFrontendDvbsStandard = ::android::hardware::tv::tuner::V1_0::FrontendDvbsStandard;
+using HidlFrontendDvbsVcmMode = ::android::hardware::tv::tuner::V1_0::FrontendDvbsVcmMode;
+using HidlFrontendDvbtBandwidth = ::android::hardware::tv::tuner::V1_0::FrontendDvbtBandwidth;
+using HidlFrontendDvbtCoderate = ::android::hardware::tv::tuner::V1_0::FrontendDvbtCoderate;
+using HidlFrontendDvbtConstellation =
+ ::android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
+using HidlFrontendDvbtGuardInterval =
+ ::android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
+using HidlFrontendDvbtHierarchy = ::android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
+using HidlFrontendDvbtPlpMode = ::android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
+using HidlFrontendDvbtSettings = ::android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
+using HidlFrontendDvbtStandard = ::android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
+using HidlFrontendDvbtTransmissionMode =
+ ::android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
+using HidlFrontendInnerFec = ::android::hardware::tv::tuner::V1_0::FrontendInnerFec;
+using HidlFrontendIsdbs3Coderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Coderate;
+using HidlFrontendIsdbs3Modulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Modulation;
+using HidlFrontendIsdbs3Rolloff = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Rolloff;
+using HidlFrontendIsdbs3Settings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbs3Settings;
+using HidlFrontendIsdbsCoderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsCoderate;
+using HidlFrontendIsdbsModulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsModulation;
+using HidlFrontendIsdbsRolloff = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsRolloff;
+using HidlFrontendIsdbsSettings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbsSettings;
+using HidlFrontendIsdbsStreamIdType =
+ ::android::hardware::tv::tuner::V1_0::FrontendIsdbsStreamIdType;
+using HidlFrontendIsdbtBandwidth = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtBandwidth;
+using HidlFrontendIsdbtCoderate = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtCoderate;
+using HidlFrontendIsdbtGuardInterval =
+ ::android::hardware::tv::tuner::V1_0::FrontendIsdbtGuardInterval;
+using HidlFrontendIsdbtMode = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtMode;
+using HidlFrontendIsdbtModulation = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtModulation;
+using HidlFrontendIsdbtSettings = ::android::hardware::tv::tuner::V1_0::FrontendIsdbtSettings;
+using HidlFrontendModulationStatus = ::android::hardware::tv::tuner::V1_0::FrontendModulationStatus;
+using HidlFrontendScanAtsc3PlpInfo = ::android::hardware::tv::tuner::V1_0::FrontendScanAtsc3PlpInfo;
+using HidlFrontendScanType = ::android::hardware::tv::tuner::V1_0::FrontendScanType;
+using HidlFrontendStatusType = ::android::hardware::tv::tuner::V1_0::FrontendStatusType;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlFrontendAnalogAftFlag = ::android::hardware::tv::tuner::V1_1::FrontendAnalogAftFlag;
+using HidlFrontendBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendBandwidth;
+using HidlFrontendCableTimeInterleaveMode =
+ ::android::hardware::tv::tuner::V1_1::FrontendCableTimeInterleaveMode;
+using HidlFrontendDvbcBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendDvbcBandwidth;
+using HidlFrontendDtmbBandwidth = ::android::hardware::tv::tuner::V1_1::FrontendDtmbBandwidth;
+using HidlFrontendDtmbCodeRate = ::android::hardware::tv::tuner::V1_1::FrontendDtmbCodeRate;
+using HidlFrontendDtmbGuardInterval =
+ ::android::hardware::tv::tuner::V1_1::FrontendDtmbGuardInterval;
+using HidlFrontendDtmbModulation = ::android::hardware::tv::tuner::V1_1::FrontendDtmbModulation;
+using HidlFrontendDtmbTimeInterleaveMode =
+ ::android::hardware::tv::tuner::V1_1::FrontendDtmbTimeInterleaveMode;
+using HidlFrontendDtmbTransmissionMode =
+ ::android::hardware::tv::tuner::V1_1::FrontendDtmbTransmissionMode;
+using HidlFrontendDvbsScanType = ::android::hardware::tv::tuner::V1_1::FrontendDvbsScanType;
+using HidlFrontendGuardInterval = ::android::hardware::tv::tuner::V1_1::FrontendGuardInterval;
+using HidlFrontendInterleaveMode = ::android::hardware::tv::tuner::V1_1::FrontendInterleaveMode;
+using HidlFrontendModulation = ::android::hardware::tv::tuner::V1_1::FrontendModulation;
+using HidlFrontendRollOff = ::android::hardware::tv::tuner::V1_1::FrontendRollOff;
+using HidlFrontendTransmissionMode = ::android::hardware::tv::tuner::V1_1::FrontendTransmissionMode;
+using HidlFrontendSpectralInversion =
+ ::android::hardware::tv::tuner::V1_1::FrontendSpectralInversion;
+using HidlFrontendStatusTypeExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendStatusTypeExt1_1;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlFrontend::TunerHidlFrontend(sp<HidlIFrontend> frontend, int id) {
+ mFrontend = frontend;
+ mFrontend_1_1 = ::android::hardware::tv::tuner::V1_1::IFrontend::castFrom(mFrontend);
+ mId = id;
+}
+
+TunerHidlFrontend::~TunerHidlFrontend() {
+ mFrontend = nullptr;
+ mFrontend_1_1 = nullptr;
+ mId = -1;
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::setCallback(
+ const shared_ptr<ITunerFrontendCallback>& tunerFrontendCallback) {
+ if (mFrontend == nullptr) {
+ ALOGE("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (tunerFrontendCallback == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ sp<HidlIFrontendCallback> frontendCallback = new FrontendCallback(tunerFrontendCallback);
+ HidlResult status = mFrontend->setCallback(frontendCallback);
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::tune(const FrontendSettings& settings) {
+ if (mFrontend == nullptr) {
+ ALOGE("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ HidlFrontendSettings frontendSettings;
+ HidlFrontendSettingsExt1_1 frontendSettingsExt;
+ getHidlFrontendSettings(settings, frontendSettings, frontendSettingsExt);
+ if (mFrontend_1_1 != nullptr) {
+ status = mFrontend_1_1->tune_1_1(frontendSettings, frontendSettingsExt);
+ } else {
+ status = mFrontend->tune(frontendSettings);
+ }
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::stopTune() {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mFrontend->stopTune();
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::scan(const FrontendSettings& settings,
+ FrontendScanType frontendScanType) {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ HidlFrontendSettings frontendSettings;
+ HidlFrontendSettingsExt1_1 frontendSettingsExt;
+ getHidlFrontendSettings(settings, frontendSettings, frontendSettingsExt);
+ if (mFrontend_1_1 != nullptr) {
+ status = mFrontend_1_1->scan_1_1(frontendSettings,
+ static_cast<HidlFrontendScanType>(frontendScanType),
+ frontendSettingsExt);
+ } else {
+ status = mFrontend->scan(frontendSettings,
+ static_cast<HidlFrontendScanType>(frontendScanType));
+ }
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::stopScan() {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mFrontend->stopScan();
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::setLnb(const shared_ptr<ITunerLnb>& lnb) {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (lnb == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ HidlResult status = mFrontend->setLnb(static_cast<TunerHidlLnb*>(lnb.get())->getId());
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::setLna(bool bEnable) {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mFrontend->setLna(bEnable);
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::linkCiCamToFrontend(int32_t ciCamId,
+ int32_t* _aidl_return) {
+ if (mFrontend_1_1 == nullptr) {
+ ALOGD("IFrontend_1_1 is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ int ltsId;
+ HidlResult status;
+ mFrontend_1_1->linkCiCam(static_cast<uint32_t>(ciCamId), [&](HidlResult r, uint32_t id) {
+ status = r;
+ ltsId = id;
+ });
+
+ if (status == HidlResult::SUCCESS) {
+ *_aidl_return = ltsId;
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::unlinkCiCamToFrontend(int32_t ciCamId) {
+ if (mFrontend_1_1 == nullptr) {
+ ALOGD("IFrontend_1_1 is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mFrontend_1_1->unlinkCiCam(ciCamId);
+ if (status == HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::close() {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mFrontend->close();
+ mFrontend = nullptr;
+ mFrontend_1_1 = nullptr;
+
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getStatus(const vector<FrontendStatusType>& in_statusTypes,
+ vector<FrontendStatus>* _aidl_return) {
+ if (mFrontend == nullptr) {
+ ALOGD("IFrontend is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res;
+ vector<HidlFrontendStatus> status;
+ vector<HidlFrontendStatusExt1_1> statusExt;
+ vector<HidlFrontendStatusType> types;
+ vector<HidlFrontendStatusTypeExt1_1> typesExt;
+ for (auto s : in_statusTypes) {
+ if (static_cast<int32_t>(s) <=
+ static_cast<int32_t>(HidlFrontendStatusType::ATSC3_PLP_INFO)) {
+ types.push_back(static_cast<HidlFrontendStatusType>(s));
+ } else {
+ typesExt.push_back(static_cast<HidlFrontendStatusTypeExt1_1>(s));
+ }
+ }
+
+ mFrontend->getStatus(types, [&](HidlResult r, const hidl_vec<HidlFrontendStatus>& ss) {
+ res = r;
+ for (auto s : ss) {
+ status.push_back(s);
+ }
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ if (mFrontend_1_1 != nullptr) {
+ mFrontend_1_1->getStatusExt1_1(
+ typesExt, [&](HidlResult r, const hidl_vec<HidlFrontendStatusExt1_1>& ss) {
+ res = r;
+ for (auto s : ss) {
+ statusExt.push_back(s);
+ }
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ }
+
+ getAidlFrontendStatus(status, statusExt, *_aidl_return);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlFrontend::getFrontendId(int32_t* _aidl_return) {
+ *_aidl_return = mId;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+/////////////// FrontendCallback ///////////////////////
+Return<void> TunerHidlFrontend::FrontendCallback::onEvent(HidlFrontendEventType frontendEventType) {
+ ALOGV("FrontendCallback::onEvent, type=%d", frontendEventType);
+ mTunerFrontendCallback->onEvent(static_cast<FrontendEventType>(frontendEventType));
+ return Void();
+}
+
+Return<void> TunerHidlFrontend::FrontendCallback::onScanMessage(
+ HidlFrontendScanMessageType type, const HidlFrontendScanMessage& message) {
+ ALOGV("FrontendCallback::onScanMessage, type=%d", type);
+ FrontendScanMessage scanMessage;
+ switch (type) {
+ case HidlFrontendScanMessageType::LOCKED: {
+ scanMessage.set<FrontendScanMessage::isLocked>(message.isLocked());
+ break;
+ }
+ case HidlFrontendScanMessageType::END: {
+ scanMessage.set<FrontendScanMessage::isEnd>(message.isEnd());
+ break;
+ }
+ case HidlFrontendScanMessageType::PROGRESS_PERCENT: {
+ scanMessage.set<FrontendScanMessage::progressPercent>(message.progressPercent());
+ break;
+ }
+ case HidlFrontendScanMessageType::FREQUENCY: {
+ const vector<uint32_t>& f = message.frequencies();
+ vector<int64_t> lf(begin(f), end(f));
+ scanMessage.set<FrontendScanMessage::frequencies>(lf);
+ break;
+ }
+ case HidlFrontendScanMessageType::SYMBOL_RATE: {
+ const vector<uint32_t>& s = message.symbolRates();
+ vector<int32_t> symbolRates(begin(s), end(s));
+ scanMessage.set<FrontendScanMessage::symbolRates>(symbolRates);
+ break;
+ }
+ case HidlFrontendScanMessageType::HIERARCHY: {
+ scanMessage.set<FrontendScanMessage::hierarchy>(
+ static_cast<FrontendDvbtHierarchy>(message.hierarchy()));
+ break;
+ }
+ case HidlFrontendScanMessageType::ANALOG_TYPE: {
+ scanMessage.set<FrontendScanMessage::analogType>(
+ static_cast<FrontendAnalogType>(message.analogType()));
+ break;
+ }
+ case HidlFrontendScanMessageType::PLP_IDS: {
+ const vector<uint8_t>& p = message.plpIds();
+ vector<int32_t> plpIds(begin(p), end(p));
+ scanMessage.set<FrontendScanMessage::plpIds>(plpIds);
+ break;
+ }
+ case HidlFrontendScanMessageType::GROUP_IDS: {
+ const vector<uint8_t>& g = message.groupIds();
+ vector<int32_t> groupIds(begin(g), end(g));
+ scanMessage.set<FrontendScanMessage::groupIds>(groupIds);
+ break;
+ }
+ case HidlFrontendScanMessageType::INPUT_STREAM_IDS: {
+ const vector<uint16_t>& i = message.inputStreamIds();
+ vector<int32_t> streamIds(begin(i), end(i));
+ scanMessage.set<FrontendScanMessage::inputStreamIds>(streamIds);
+ break;
+ }
+ case HidlFrontendScanMessageType::STANDARD: {
+ const HidlFrontendScanMessage::Standard& std = message.std();
+ FrontendScanMessageStandard standard;
+ if (std.getDiscriminator() == HidlFrontendScanMessage::Standard::hidl_discriminator::sStd) {
+ standard.set<FrontendScanMessageStandard::sStd>(
+ static_cast<FrontendDvbsStandard>(std.sStd()));
+ } else if (std.getDiscriminator() ==
+ HidlFrontendScanMessage::Standard::hidl_discriminator::tStd) {
+ standard.set<FrontendScanMessageStandard::tStd>(
+ static_cast<FrontendDvbtStandard>(std.tStd()));
+ } else if (std.getDiscriminator() ==
+ HidlFrontendScanMessage::Standard::hidl_discriminator::sifStd) {
+ standard.set<FrontendScanMessageStandard::sifStd>(
+ static_cast<FrontendAnalogSifStandard>(std.sifStd()));
+ }
+ scanMessage.set<FrontendScanMessage::std>(standard);
+ break;
+ }
+ case HidlFrontendScanMessageType::ATSC3_PLP_INFO: {
+ const vector<HidlFrontendScanAtsc3PlpInfo>& plpInfos = message.atsc3PlpInfos();
+ vector<FrontendScanAtsc3PlpInfo> tunerPlpInfos;
+ for (int i = 0; i < plpInfos.size(); i++) {
+ FrontendScanAtsc3PlpInfo plpInfo{
+ .plpId = static_cast<int32_t>(plpInfos[i].plpId),
+ .bLlsFlag = plpInfos[i].bLlsFlag,
+ };
+ tunerPlpInfos.push_back(plpInfo);
+ }
+ scanMessage.set<FrontendScanMessage::atsc3PlpInfos>(tunerPlpInfos);
+ break;
+ }
+ default:
+ break;
+ }
+ mTunerFrontendCallback->onScanMessage(static_cast<FrontendScanMessageType>(type), scanMessage);
+ return Void();
+}
+
+Return<void> TunerHidlFrontend::FrontendCallback::onScanMessageExt1_1(
+ HidlFrontendScanMessageTypeExt1_1 type, const HidlFrontendScanMessageExt1_1& message) {
+ ALOGV("onScanMessageExt1_1::onScanMessage, type=%d", type);
+ FrontendScanMessage scanMessage;
+ switch (type) {
+ case HidlFrontendScanMessageTypeExt1_1::MODULATION: {
+ HidlFrontendModulation m = message.modulation();
+ FrontendModulation modulation;
+ switch (m.getDiscriminator()) {
+ case HidlFrontendModulation::hidl_discriminator::dvbc: {
+ modulation.set<FrontendModulation::dvbc>(static_cast<FrontendDvbcModulation>(m.dvbc()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::dvbt: {
+ modulation.set<FrontendModulation::dvbt>(
+ static_cast<FrontendDvbtConstellation>(m.dvbt()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::dvbs: {
+ modulation.set<FrontendModulation::dvbs>(static_cast<FrontendDvbsModulation>(m.dvbs()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::isdbs: {
+ modulation.set<FrontendModulation::isdbs>(
+ static_cast<FrontendIsdbsModulation>(m.isdbs()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::isdbs3: {
+ modulation.set<FrontendModulation::isdbs3>(
+ static_cast<FrontendIsdbs3Modulation>(m.isdbs3()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::isdbt: {
+ modulation.set<FrontendModulation::isdbt>(
+ static_cast<FrontendIsdbtModulation>(m.isdbt()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::atsc: {
+ modulation.set<FrontendModulation::atsc>(static_cast<FrontendAtscModulation>(m.atsc()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::atsc3: {
+ modulation.set<FrontendModulation::atsc3>(
+ static_cast<FrontendAtsc3Modulation>(m.atsc3()));
+ break;
+ }
+ case HidlFrontendModulation::hidl_discriminator::dtmb: {
+ modulation.set<FrontendModulation::dtmb>(static_cast<FrontendDtmbModulation>(m.dtmb()));
+ break;
+ }
+ }
+ scanMessage.set<FrontendScanMessage::modulation>(modulation);
+ break;
+ }
+ case HidlFrontendScanMessageTypeExt1_1::DVBC_ANNEX: {
+ scanMessage.set<FrontendScanMessage::annex>(
+ static_cast<FrontendDvbcAnnex>(message.annex()));
+ break;
+ }
+ case HidlFrontendScanMessageTypeExt1_1::HIGH_PRIORITY: {
+ scanMessage.set<FrontendScanMessage::isHighPriority>(message.isHighPriority());
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ mTunerFrontendCallback->onScanMessage(static_cast<FrontendScanMessageType>(type), scanMessage);
+ return Void();
+}
+
+/////////////// TunerHidlFrontend Helper Methods ///////////////////////
+void TunerHidlFrontend::getAidlFrontendStatus(const vector<HidlFrontendStatus>& hidlStatus,
+ const vector<HidlFrontendStatusExt1_1>& hidlStatusExt,
+ vector<FrontendStatus>& aidlStatus) {
+ for (HidlFrontendStatus s : hidlStatus) {
+ FrontendStatus status;
+ switch (s.getDiscriminator()) {
+ case HidlFrontendStatus::hidl_discriminator::isDemodLocked: {
+ status.set<FrontendStatus::isDemodLocked>(s.isDemodLocked());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::snr: {
+ status.set<FrontendStatus::snr>((int)s.snr());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::ber: {
+ status.set<FrontendStatus::ber>((int)s.ber());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::per: {
+ status.set<FrontendStatus::per>((int)s.per());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::preBer: {
+ status.set<FrontendStatus::preBer>((int)s.preBer());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::signalQuality: {
+ status.set<FrontendStatus::signalQuality>((int)s.signalQuality());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::signalStrength: {
+ status.set<FrontendStatus::signalStrength>((int)s.signalStrength());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::symbolRate: {
+ status.set<FrontendStatus::symbolRate>((int)s.symbolRate());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::innerFec: {
+ status.set<FrontendStatus::innerFec>(static_cast<FrontendInnerFec>(s.innerFec()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::modulation: {
+ FrontendModulationStatus modulationStatus;
+ switch (s.modulation().getDiscriminator()) {
+ case HidlFrontendModulationStatus::hidl_discriminator::dvbc:
+ modulationStatus.set<FrontendModulationStatus::dvbc>(
+ static_cast<FrontendDvbcModulation>(s.modulation().dvbc()));
+ break;
+ case HidlFrontendModulationStatus::hidl_discriminator::dvbs:
+ modulationStatus.set<FrontendModulationStatus::dvbs>(
+ static_cast<FrontendDvbsModulation>(s.modulation().dvbs()));
+ break;
+ case HidlFrontendModulationStatus::hidl_discriminator::isdbs:
+ modulationStatus.set<FrontendModulationStatus::isdbs>(
+ static_cast<FrontendIsdbsModulation>(s.modulation().isdbs()));
+ break;
+ case HidlFrontendModulationStatus::hidl_discriminator::isdbs3:
+ modulationStatus.set<FrontendModulationStatus::isdbs3>(
+ static_cast<FrontendIsdbs3Modulation>(s.modulation().isdbs3()));
+ break;
+ case HidlFrontendModulationStatus::hidl_discriminator::isdbt:
+ modulationStatus.set<FrontendModulationStatus::isdbt>(
+ static_cast<FrontendIsdbtModulation>(s.modulation().isdbt()));
+ break;
+ }
+ status.set<FrontendStatus::modulationStatus>(modulationStatus);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::inversion: {
+ status.set<FrontendStatus::inversion>(
+ static_cast<FrontendSpectralInversion>(s.inversion()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::lnbVoltage: {
+ status.set<FrontendStatus::lnbVoltage>(static_cast<LnbVoltage>(s.lnbVoltage()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::plpId: {
+ status.set<FrontendStatus::plpId>((int32_t)s.plpId());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::isEWBS: {
+ status.set<FrontendStatus::isEWBS>(s.isEWBS());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::agc: {
+ status.set<FrontendStatus::agc>((int32_t)s.agc());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::isLnaOn: {
+ status.set<FrontendStatus::isLnaOn>(s.isLnaOn());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::isLayerError: {
+ vector<bool> e(s.isLayerError().begin(), s.isLayerError().end());
+ status.set<FrontendStatus::isLayerError>(e);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::mer: {
+ status.set<FrontendStatus::mer>(static_cast<int32_t>(s.mer()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::freqOffset: {
+ status.set<FrontendStatus::freqOffset>(static_cast<int64_t>(s.freqOffset()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::hierarchy: {
+ status.set<FrontendStatus::hierarchy>(
+ static_cast<FrontendDvbtHierarchy>(s.hierarchy()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::isRfLocked: {
+ status.set<FrontendStatus::isRfLocked>(s.isRfLocked());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatus::hidl_discriminator::plpInfo: {
+ vector<FrontendStatusAtsc3PlpInfo> info;
+ for (auto i : s.plpInfo()) {
+ info.push_back({
+ .plpId = static_cast<int32_t>(i.plpId),
+ .isLocked = i.isLocked,
+ .uec = static_cast<int32_t>(i.uec),
+ });
+ }
+ status.set<FrontendStatus::plpInfo>(info);
+ aidlStatus.push_back(status);
+ break;
+ }
+ }
+ }
+
+ for (HidlFrontendStatusExt1_1 s : hidlStatusExt) {
+ FrontendStatus status;
+ switch (s.getDiscriminator()) {
+ case HidlFrontendStatusExt1_1::hidl_discriminator::modulations: {
+ vector<FrontendModulation> aidlMod;
+ for (auto m : s.modulations()) {
+ switch (m.getDiscriminator()) {
+ case HidlFrontendModulation::hidl_discriminator::dvbc:
+ aidlMod.push_back(static_cast<FrontendDvbcModulation>(m.dvbc()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::dvbs:
+ aidlMod.push_back(static_cast<FrontendDvbsModulation>(m.dvbs()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::dvbt:
+ aidlMod.push_back(static_cast<FrontendDvbtConstellation>(m.dvbt()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::isdbs:
+ aidlMod.push_back(static_cast<FrontendIsdbsModulation>(m.isdbs()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::isdbs3:
+ aidlMod.push_back(static_cast<FrontendIsdbs3Modulation>(m.isdbs3()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::isdbt:
+ aidlMod.push_back(static_cast<FrontendIsdbtModulation>(m.isdbt()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::atsc:
+ aidlMod.push_back(static_cast<FrontendAtscModulation>(m.atsc()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::atsc3:
+ aidlMod.push_back(static_cast<FrontendAtsc3Modulation>(m.atsc3()));
+ break;
+ case HidlFrontendModulation::hidl_discriminator::dtmb:
+ aidlMod.push_back(static_cast<FrontendDtmbModulation>(m.dtmb()));
+ break;
+ }
+ }
+ status.set<FrontendStatus::modulations>(aidlMod);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::bers: {
+ vector<int> b(s.bers().begin(), s.bers().end());
+ status.set<FrontendStatus::bers>(b);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::codeRates: {
+ vector<FrontendInnerFec> codeRates;
+ for (auto c : s.codeRates()) {
+ codeRates.push_back(static_cast<FrontendInnerFec>(c));
+ }
+ status.set<FrontendStatus::codeRates>(codeRates);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::bandwidth: {
+ FrontendBandwidth bandwidth;
+ switch (s.bandwidth().getDiscriminator()) {
+ case HidlFrontendBandwidth::hidl_discriminator::atsc3:
+ bandwidth.set<FrontendBandwidth::atsc3>(
+ static_cast<FrontendAtsc3Bandwidth>(s.bandwidth().atsc3()));
+ break;
+ case HidlFrontendBandwidth::hidl_discriminator::dvbc:
+ bandwidth.set<FrontendBandwidth::dvbc>(
+ static_cast<FrontendDvbcBandwidth>(s.bandwidth().dvbc()));
+ break;
+ case HidlFrontendBandwidth::hidl_discriminator::dvbt:
+ bandwidth.set<FrontendBandwidth::dvbt>(
+ static_cast<FrontendDvbtBandwidth>(s.bandwidth().dvbt()));
+ break;
+ case HidlFrontendBandwidth::hidl_discriminator::isdbt:
+ bandwidth.set<FrontendBandwidth::isdbt>(
+ static_cast<FrontendIsdbtBandwidth>(s.bandwidth().isdbt()));
+ break;
+ case HidlFrontendBandwidth::hidl_discriminator::dtmb:
+ bandwidth.set<FrontendBandwidth::dtmb>(
+ static_cast<FrontendDtmbBandwidth>(s.bandwidth().dtmb()));
+ break;
+ }
+ status.set<FrontendStatus::bandwidth>(bandwidth);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::interval: {
+ FrontendGuardInterval interval;
+ switch (s.interval().getDiscriminator()) {
+ case HidlFrontendGuardInterval::hidl_discriminator::dvbt:
+ interval.set<FrontendGuardInterval::dvbt>(
+ static_cast<FrontendDvbtGuardInterval>(s.interval().dvbt()));
+ break;
+ case HidlFrontendGuardInterval::hidl_discriminator::isdbt:
+ interval.set<FrontendGuardInterval::isdbt>(
+ static_cast<FrontendIsdbtGuardInterval>(s.interval().isdbt()));
+ break;
+ case HidlFrontendGuardInterval::hidl_discriminator::dtmb:
+ interval.set<FrontendGuardInterval::dtmb>(
+ static_cast<FrontendDtmbGuardInterval>(s.interval().dtmb()));
+ break;
+ }
+ status.set<FrontendStatus::interval>(interval);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::transmissionMode: {
+ FrontendTransmissionMode transmissionMode;
+ switch (s.transmissionMode().getDiscriminator()) {
+ case HidlFrontendTransmissionMode::hidl_discriminator::dvbt:
+ transmissionMode.set<FrontendTransmissionMode::dvbt>(
+ static_cast<FrontendDvbtTransmissionMode>(s.transmissionMode().dvbt()));
+ break;
+ case HidlFrontendTransmissionMode::hidl_discriminator::isdbt:
+ transmissionMode.set<FrontendTransmissionMode::isdbt>(
+ static_cast<FrontendIsdbtMode>(s.transmissionMode().isdbt()));
+ break;
+ case HidlFrontendTransmissionMode::hidl_discriminator::dtmb:
+ transmissionMode.set<FrontendTransmissionMode::dtmb>(
+ static_cast<FrontendDtmbTransmissionMode>(s.transmissionMode().dtmb()));
+ break;
+ }
+ status.set<FrontendStatus::transmissionMode>(transmissionMode);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::uec: {
+ status.set<FrontendStatus::uec>(static_cast<int32_t>(s.uec()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::systemId: {
+ status.set<FrontendStatus::systemId>(static_cast<int32_t>(s.systemId()));
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::interleaving: {
+ vector<FrontendInterleaveMode> aidlInter;
+ for (auto i : s.interleaving()) {
+ FrontendInterleaveMode leaveMode;
+ switch (i.getDiscriminator()) {
+ case HidlFrontendInterleaveMode::hidl_discriminator::atsc3:
+ leaveMode.set<FrontendInterleaveMode::atsc3>(
+ static_cast<FrontendAtsc3TimeInterleaveMode>(i.atsc3()));
+ break;
+ case HidlFrontendInterleaveMode::hidl_discriminator::dvbc:
+ leaveMode.set<FrontendInterleaveMode::dvbc>(
+ static_cast<FrontendCableTimeInterleaveMode>(i.dvbc()));
+ break;
+ case HidlFrontendInterleaveMode::hidl_discriminator::dtmb:
+ leaveMode.set<FrontendInterleaveMode::dtmb>(
+ static_cast<FrontendDtmbTimeInterleaveMode>(i.dtmb()));
+ break;
+ }
+ aidlInter.push_back(leaveMode);
+ }
+ status.set<FrontendStatus::interleaving>(aidlInter);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::isdbtSegment: {
+ const vector<uint8_t>& seg = s.isdbtSegment();
+ vector<int32_t> i(seg.begin(), seg.end());
+ status.set<FrontendStatus::isdbtSegment>(i);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::tsDataRate: {
+ vector<int32_t> ts(s.tsDataRate().begin(), s.tsDataRate().end());
+ status.set<FrontendStatus::tsDataRate>(ts);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::rollOff: {
+ FrontendRollOff rollOff;
+ switch (s.rollOff().getDiscriminator()) {
+ case HidlFrontendRollOff::hidl_discriminator::dvbs:
+ rollOff.set<FrontendRollOff::dvbs>(
+ static_cast<FrontendDvbsRolloff>(s.rollOff().dvbs()));
+ break;
+ case HidlFrontendRollOff::hidl_discriminator::isdbs:
+ rollOff.set<FrontendRollOff::isdbs>(
+ static_cast<FrontendIsdbsRolloff>(s.rollOff().isdbs()));
+ break;
+ case HidlFrontendRollOff::hidl_discriminator::isdbs3:
+ rollOff.set<FrontendRollOff::isdbs3>(
+ static_cast<FrontendIsdbs3Rolloff>(s.rollOff().isdbs3()));
+ break;
+ }
+ status.set<FrontendStatus::rollOff>(rollOff);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::isMiso: {
+ status.set<FrontendStatus::isMiso>(s.isMiso());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::isLinear: {
+ status.set<FrontendStatus::isLinear>(s.isLinear());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case HidlFrontendStatusExt1_1::hidl_discriminator::isShortFrames: {
+ status.set<FrontendStatus::isShortFrames>(s.isShortFrames());
+ aidlStatus.push_back(status);
+ break;
+ }
+ }
+ }
+}
+
+hidl_vec<HidlFrontendAtsc3PlpSettings> TunerHidlFrontend::getAtsc3PlpSettings(
+ const FrontendAtsc3Settings& settings) {
+ int len = settings.plpSettings.size();
+ hidl_vec<HidlFrontendAtsc3PlpSettings> plps = hidl_vec<HidlFrontendAtsc3PlpSettings>(len);
+ // parse PLP settings
+ for (int i = 0; i < len; i++) {
+ uint8_t plpId = static_cast<uint8_t>(settings.plpSettings[i].plpId);
+ HidlFrontendAtsc3Modulation modulation =
+ static_cast<HidlFrontendAtsc3Modulation>(settings.plpSettings[i].modulation);
+ HidlFrontendAtsc3TimeInterleaveMode interleaveMode =
+ static_cast<HidlFrontendAtsc3TimeInterleaveMode>(
+ settings.plpSettings[i].interleaveMode);
+ HidlFrontendAtsc3CodeRate codeRate =
+ static_cast<HidlFrontendAtsc3CodeRate>(settings.plpSettings[i].codeRate);
+ HidlFrontendAtsc3Fec fec = static_cast<HidlFrontendAtsc3Fec>(settings.plpSettings[i].fec);
+ HidlFrontendAtsc3PlpSettings frontendAtsc3PlpSettings{
+ .plpId = plpId,
+ .modulation = modulation,
+ .interleaveMode = interleaveMode,
+ .codeRate = codeRate,
+ .fec = fec,
+ };
+ plps[i] = frontendAtsc3PlpSettings;
+ }
+ return plps;
+}
+
+HidlFrontendDvbsCodeRate TunerHidlFrontend::getDvbsCodeRate(const FrontendDvbsCodeRate& codeRate) {
+ HidlFrontendInnerFec innerFec = static_cast<HidlFrontendInnerFec>(codeRate.fec);
+ bool isLinear = codeRate.isLinear;
+ bool isShortFrames = codeRate.isShortFrames;
+ uint32_t bitsPer1000Symbol = static_cast<uint32_t>(codeRate.bitsPer1000Symbol);
+ HidlFrontendDvbsCodeRate coderate{
+ .fec = innerFec,
+ .isLinear = isLinear,
+ .isShortFrames = isShortFrames,
+ .bitsPer1000Symbol = bitsPer1000Symbol,
+ };
+ return coderate;
+}
+
+void TunerHidlFrontend::getHidlFrontendSettings(const FrontendSettings& aidlSettings,
+ HidlFrontendSettings& settings,
+ HidlFrontendSettingsExt1_1& settingsExt) {
+ switch (aidlSettings.getTag()) {
+ case FrontendSettings::analog: {
+ const FrontendAnalogSettings& analog = aidlSettings.get<FrontendSettings::analog>();
+ settings.analog({
+ .frequency = static_cast<uint32_t>(analog.frequency),
+ .type = static_cast<HidlFrontendAnalogType>(analog.type),
+ .sifStandard = static_cast<HidlFrontendAnalogSifStandard>(analog.sifStandard),
+ });
+ settingsExt.settingExt.analog({
+ .aftFlag = static_cast<HidlFrontendAnalogAftFlag>(analog.aftFlag),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(analog.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(analog.inversion);
+ break;
+ }
+ case FrontendSettings::atsc: {
+ const FrontendAtscSettings& atsc = aidlSettings.get<FrontendSettings::atsc>();
+ settings.atsc({
+ .frequency = static_cast<uint32_t>(atsc.frequency),
+ .modulation = static_cast<HidlFrontendAtscModulation>(atsc.modulation),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(atsc.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(atsc.inversion);
+ settingsExt.settingExt.noinit();
+ break;
+ }
+ case FrontendSettings::atsc3: {
+ const FrontendAtsc3Settings& atsc3 = aidlSettings.get<FrontendSettings::atsc3>();
+ settings.atsc3({
+ .frequency = static_cast<uint32_t>(atsc3.frequency),
+ .bandwidth = static_cast<HidlFrontendAtsc3Bandwidth>(atsc3.bandwidth),
+ .demodOutputFormat =
+ static_cast<HidlFrontendAtsc3DemodOutputFormat>(atsc3.demodOutputFormat),
+ .plpSettings = getAtsc3PlpSettings(atsc3),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(atsc3.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(atsc3.inversion);
+ settingsExt.settingExt.noinit();
+ break;
+ }
+ case FrontendSettings::dvbc: {
+ const FrontendDvbcSettings& dvbc = aidlSettings.get<FrontendSettings::dvbc>();
+ settings.dvbc({
+ .frequency = static_cast<uint32_t>(dvbc.frequency),
+ .modulation = static_cast<HidlFrontendDvbcModulation>(dvbc.modulation),
+ .fec = static_cast<HidlFrontendInnerFec>(dvbc.fec),
+ .symbolRate = static_cast<uint32_t>(dvbc.symbolRate),
+ .outerFec = static_cast<HidlFrontendDvbcOuterFec>(dvbc.outerFec),
+ .annex = static_cast<HidlFrontendDvbcAnnex>(dvbc.annex),
+ .spectralInversion = static_cast<HidlFrontendDvbcSpectralInversion>(dvbc.inversion),
+ });
+ settingsExt.settingExt.dvbc({
+ .interleaveMode =
+ static_cast<HidlFrontendCableTimeInterleaveMode>(dvbc.interleaveMode),
+ .bandwidth = static_cast<HidlFrontendDvbcBandwidth>(dvbc.bandwidth),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(dvbc.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbc.inversion);
+ break;
+ }
+ case FrontendSettings::dvbs: {
+ const FrontendDvbsSettings& dvbs = aidlSettings.get<FrontendSettings::dvbs>();
+ settings.dvbs({
+ .frequency = static_cast<uint32_t>(dvbs.frequency),
+ .modulation = static_cast<HidlFrontendDvbsModulation>(dvbs.modulation),
+ .coderate = getDvbsCodeRate(dvbs.coderate),
+ .symbolRate = static_cast<uint32_t>(dvbs.symbolRate),
+ .rolloff = static_cast<HidlFrontendDvbsRolloff>(dvbs.rolloff),
+ .pilot = static_cast<HidlFrontendDvbsPilot>(dvbs.pilot),
+ .inputStreamId = static_cast<uint32_t>(dvbs.inputStreamId),
+ .standard = static_cast<HidlFrontendDvbsStandard>(dvbs.standard),
+ .vcmMode = static_cast<HidlFrontendDvbsVcmMode>(dvbs.vcmMode),
+ });
+ settingsExt.settingExt.dvbs({
+ .scanType = static_cast<HidlFrontendDvbsScanType>(dvbs.scanType),
+ .isDiseqcRxMessage = dvbs.isDiseqcRxMessage,
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(dvbs.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbs.inversion);
+ break;
+ }
+ case FrontendSettings::dvbt: {
+ const FrontendDvbtSettings& dvbt = aidlSettings.get<FrontendSettings::dvbt>();
+ settings.dvbt({
+ .frequency = static_cast<uint32_t>(dvbt.frequency),
+ .transmissionMode =
+ static_cast<HidlFrontendDvbtTransmissionMode>(dvbt.transmissionMode),
+ .bandwidth = static_cast<HidlFrontendDvbtBandwidth>(dvbt.bandwidth),
+ .constellation = static_cast<HidlFrontendDvbtConstellation>(dvbt.constellation),
+ .hierarchy = static_cast<HidlFrontendDvbtHierarchy>(dvbt.hierarchy),
+ .hpCoderate = static_cast<HidlFrontendDvbtCoderate>(dvbt.hpCoderate),
+ .lpCoderate = static_cast<HidlFrontendDvbtCoderate>(dvbt.lpCoderate),
+ .guardInterval = static_cast<HidlFrontendDvbtGuardInterval>(dvbt.guardInterval),
+ .isHighPriority = dvbt.isHighPriority,
+ .standard = static_cast<HidlFrontendDvbtStandard>(dvbt.standard),
+ .isMiso = dvbt.isMiso,
+ .plpMode = static_cast<HidlFrontendDvbtPlpMode>(dvbt.plpMode),
+ .plpId = static_cast<uint8_t>(dvbt.plpId),
+ .plpGroupId = static_cast<uint8_t>(dvbt.plpGroupId),
+ });
+ settingsExt.settingExt.dvbt({
+ .constellation = static_cast<
+ ::android::hardware::tv::tuner::V1_1::FrontendDvbtConstellation>(
+ dvbt.constellation),
+ .transmissionMode = static_cast<
+ ::android::hardware::tv::tuner::V1_1::FrontendDvbtTransmissionMode>(
+ dvbt.transmissionMode),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(dvbt.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dvbt.inversion);
+ break;
+ }
+ case FrontendSettings::isdbs: {
+ const FrontendIsdbsSettings& isdbs = aidlSettings.get<FrontendSettings::isdbs>();
+ settings.isdbs({
+ .frequency = static_cast<uint32_t>(isdbs.frequency),
+ .streamId = static_cast<uint16_t>(isdbs.streamId),
+ .streamIdType = static_cast<HidlFrontendIsdbsStreamIdType>(isdbs.streamIdType),
+ .modulation = static_cast<HidlFrontendIsdbsModulation>(isdbs.modulation),
+ .coderate = static_cast<HidlFrontendIsdbsCoderate>(isdbs.coderate),
+ .symbolRate = static_cast<uint32_t>(isdbs.symbolRate),
+ .rolloff = static_cast<HidlFrontendIsdbsRolloff>(isdbs.rolloff),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(isdbs.endFrequency);
+ settingsExt.settingExt.noinit();
+ break;
+ }
+ case FrontendSettings::isdbs3: {
+ const FrontendIsdbs3Settings& isdbs3 = aidlSettings.get<FrontendSettings::isdbs3>();
+ settings.isdbs3({
+ .frequency = static_cast<uint32_t>(isdbs3.frequency),
+ .streamId = static_cast<uint16_t>(isdbs3.streamId),
+ .streamIdType = static_cast<HidlFrontendIsdbsStreamIdType>(isdbs3.streamIdType),
+ .modulation = static_cast<HidlFrontendIsdbs3Modulation>(isdbs3.modulation),
+ .coderate = static_cast<HidlFrontendIsdbs3Coderate>(isdbs3.coderate),
+ .symbolRate = static_cast<uint32_t>(isdbs3.symbolRate),
+ .rolloff = static_cast<HidlFrontendIsdbs3Rolloff>(isdbs3.rolloff),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(isdbs3.endFrequency);
+ settingsExt.settingExt.noinit();
+ break;
+ }
+ case FrontendSettings::isdbt: {
+ const FrontendIsdbtSettings& isdbt = aidlSettings.get<FrontendSettings::isdbt>();
+ HidlFrontendIsdbtModulation modulation = HidlFrontendIsdbtModulation::UNDEFINED;
+ HidlFrontendIsdbtCoderate coderate = HidlFrontendIsdbtCoderate::UNDEFINED;
+ if (isdbt.layerSettings.size() > 0) {
+ modulation =
+ static_cast<HidlFrontendIsdbtModulation>(isdbt.layerSettings[0].modulation);
+ coderate = static_cast<HidlFrontendIsdbtCoderate>(isdbt.layerSettings[0].coderate);
+ }
+ settings.isdbt({
+ .frequency = static_cast<uint32_t>(isdbt.frequency),
+ .modulation = modulation,
+ .bandwidth = static_cast<HidlFrontendIsdbtBandwidth>(isdbt.bandwidth),
+ .mode = static_cast<HidlFrontendIsdbtMode>(isdbt.mode),
+ .coderate = coderate,
+ .guardInterval = static_cast<HidlFrontendIsdbtGuardInterval>(isdbt.guardInterval),
+ .serviceAreaId = static_cast<uint32_t>(isdbt.serviceAreaId),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(isdbt.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(isdbt.inversion);
+ settingsExt.settingExt.noinit();
+ break;
+ }
+ case FrontendSettings::dtmb: {
+ const FrontendDtmbSettings& dtmb = aidlSettings.get<FrontendSettings::dtmb>();
+ settingsExt.settingExt.dtmb({
+ .frequency = static_cast<uint32_t>(dtmb.frequency),
+ .transmissionMode =
+ static_cast<HidlFrontendDtmbTransmissionMode>(dtmb.transmissionMode),
+ .bandwidth = static_cast<HidlFrontendDtmbBandwidth>(dtmb.bandwidth),
+ .modulation = static_cast<HidlFrontendDtmbModulation>(dtmb.modulation),
+ .codeRate = static_cast<HidlFrontendDtmbCodeRate>(dtmb.codeRate),
+ .guardInterval = static_cast<HidlFrontendDtmbGuardInterval>(dtmb.guardInterval),
+ .interleaveMode =
+ static_cast<HidlFrontendDtmbTimeInterleaveMode>(dtmb.interleaveMode),
+ });
+ settingsExt.endFrequency = static_cast<uint32_t>(dtmb.endFrequency);
+ settingsExt.inversion = static_cast<HidlFrontendSpectralInversion>(dtmb.inversion);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlFrontend.h b/services/tuner/hidl/TunerHidlFrontend.h
new file mode 100644
index 0000000..6a3a04a
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlFrontend.h
@@ -0,0 +1,122 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLFRONTEND_H
+#define ANDROID_MEDIA_TUNERHIDLFRONTEND_H
+
+#include <aidl/android/hardware/tv/tuner/IFrontendCallback.h>
+#include <aidl/android/media/tv/tuner/BnTunerFrontend.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <android/hardware/tv/tuner/1.1/IFrontend.h>
+#include <android/hardware/tv/tuner/1.1/IFrontendCallback.h>
+#include <utils/Log.h>
+
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Settings;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsCodeRate;
+using ::aidl::android::hardware::tv::tuner::FrontendEventType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessage;
+using ::aidl::android::hardware::tv::tuner::FrontendScanMessageType;
+using ::aidl::android::hardware::tv::tuner::FrontendScanType;
+using ::aidl::android::hardware::tv::tuner::FrontendSettings;
+using ::aidl::android::hardware::tv::tuner::FrontendStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendStatusType;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlFrontendAtsc3PlpSettings = ::android::hardware::tv::tuner::V1_0::FrontendAtsc3PlpSettings;
+using HidlFrontendDvbsCodeRate = ::android::hardware::tv::tuner::V1_0::FrontendDvbsCodeRate;
+using HidlFrontendEventType = ::android::hardware::tv::tuner::V1_0::FrontendEventType;
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+using HidlFrontendScanMessage = ::android::hardware::tv::tuner::V1_0::FrontendScanMessage;
+using HidlFrontendScanMessageType = ::android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
+using HidlFrontendSettings = ::android::hardware::tv::tuner::V1_0::FrontendSettings;
+using HidlFrontendStatus = ::android::hardware::tv::tuner::V1_0::FrontendStatus;
+using HidlIFrontend = ::android::hardware::tv::tuner::V1_0::IFrontend;
+using HidlIFrontendCallback = ::android::hardware::tv::tuner::V1_1::IFrontendCallback;
+using HidlFrontendScanMessageExt1_1 =
+ ::android::hardware::tv::tuner::V1_1::FrontendScanMessageExt1_1;
+using HidlFrontendScanMessageTypeExt1_1 =
+ ::android::hardware::tv::tuner::V1_1::FrontendScanMessageTypeExt1_1;
+using HidlFrontendSettingsExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendSettingsExt1_1;
+using HidlFrontendStatusExt1_1 = ::android::hardware::tv::tuner::V1_1::FrontendStatusExt1_1;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlFrontend : public BnTunerFrontend {
+public:
+ TunerHidlFrontend(sp<HidlIFrontend> frontend, int id);
+ virtual ~TunerHidlFrontend();
+
+ ::ndk::ScopedAStatus setCallback(
+ const shared_ptr<ITunerFrontendCallback>& in_tunerFrontendCallback) override;
+ ::ndk::ScopedAStatus tune(const FrontendSettings& in_settings) override;
+ ::ndk::ScopedAStatus stopTune() override;
+ ::ndk::ScopedAStatus scan(const FrontendSettings& in_settings,
+ FrontendScanType in_frontendScanType) override;
+ ::ndk::ScopedAStatus stopScan() override;
+ ::ndk::ScopedAStatus setLnb(const shared_ptr<ITunerLnb>& in_lnb) override;
+ ::ndk::ScopedAStatus setLna(bool in_bEnable) override;
+ ::ndk::ScopedAStatus linkCiCamToFrontend(int32_t in_ciCamId, int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus unlinkCiCamToFrontend(int32_t in_ciCamId) override;
+ ::ndk::ScopedAStatus close() override;
+ ::ndk::ScopedAStatus getStatus(const vector<FrontendStatusType>& in_statusTypes,
+ vector<FrontendStatus>* _aidl_return) override;
+ ::ndk::ScopedAStatus getFrontendId(int32_t* _aidl_return) override;
+
+ struct FrontendCallback : public HidlIFrontendCallback {
+ FrontendCallback(const shared_ptr<ITunerFrontendCallback> tunerFrontendCallback)
+ : mTunerFrontendCallback(tunerFrontendCallback){};
+
+ virtual Return<void> onEvent(HidlFrontendEventType frontendEventType);
+ virtual Return<void> onScanMessage(HidlFrontendScanMessageType type,
+ const HidlFrontendScanMessage& message);
+ virtual Return<void> onScanMessageExt1_1(HidlFrontendScanMessageTypeExt1_1 type,
+ const HidlFrontendScanMessageExt1_1& message);
+
+ shared_ptr<ITunerFrontendCallback> mTunerFrontendCallback;
+ };
+
+private:
+ hidl_vec<HidlFrontendAtsc3PlpSettings> getAtsc3PlpSettings(
+ const FrontendAtsc3Settings& settings);
+ HidlFrontendDvbsCodeRate getDvbsCodeRate(const FrontendDvbsCodeRate& codeRate);
+ void getHidlFrontendSettings(const FrontendSettings& aidlSettings,
+ HidlFrontendSettings& settings,
+ HidlFrontendSettingsExt1_1& settingsExt);
+ void getAidlFrontendStatus(const vector<HidlFrontendStatus>& hidlStatus,
+ const vector<HidlFrontendStatusExt1_1>& hidlStatusExt,
+ vector<FrontendStatus>& aidlStatus);
+
+ int mId;
+ sp<HidlIFrontend> mFrontend;
+ sp<::android::hardware::tv::tuner::V1_1::IFrontend> mFrontend_1_1;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLFRONTEND_H
diff --git a/services/tuner/hidl/TunerHidlLnb.cpp b/services/tuner/hidl/TunerHidlLnb.cpp
new file mode 100644
index 0000000..a7e20bb
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlLnb.cpp
@@ -0,0 +1,160 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlLnb"
+
+#include "TunerHidlLnb.h"
+
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+using ::aidl::android::hardware::tv::tuner::Result;
+using HidlLnbPosition = ::android::hardware::tv::tuner::V1_0::LnbPosition;
+using HidlLnbTone = ::android::hardware::tv::tuner::V1_0::LnbTone;
+using HidlLnbVoltage = ::android::hardware::tv::tuner::V1_0::LnbVoltage;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlLnb::TunerHidlLnb(sp<HidlILnb> lnb, int id) {
+ mLnb = lnb;
+ mId = id;
+}
+
+TunerHidlLnb::~TunerHidlLnb() {
+ mLnb = nullptr;
+ mId = -1;
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setCallback(
+ const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) {
+ if (mLnb == nullptr) {
+ ALOGE("ILnb is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (in_tunerLnbCallback == nullptr) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_ARGUMENT));
+ }
+
+ sp<HidlILnbCallback> lnbCallback = new LnbCallback(in_tunerLnbCallback);
+ HidlResult status = mLnb->setCallback(lnbCallback);
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setVoltage(LnbVoltage in_voltage) {
+ if (mLnb == nullptr) {
+ ALOGE("ILnb is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mLnb->setVoltage(static_cast<HidlLnbVoltage>(in_voltage));
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setTone(LnbTone in_tone) {
+ if (mLnb == nullptr) {
+ ALOGE("ILnb is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mLnb->setTone(static_cast<HidlLnbTone>(in_tone));
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::setSatellitePosition(LnbPosition in_position) {
+ if (mLnb == nullptr) {
+ ALOGE("ILnb is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mLnb->setSatellitePosition(static_cast<HidlLnbPosition>(in_position));
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) {
+ if (mLnb == nullptr) {
+ ALOGE("ILnb is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mLnb->sendDiseqcMessage(in_diseqcMessage);
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlLnb::close() {
+ if (mLnb == nullptr) {
+ ALOGE("ILnb is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mLnb->close();
+ mLnb = nullptr;
+
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+/////////////// ILnbCallback ///////////////////////
+Return<void> TunerHidlLnb::LnbCallback::onEvent(const HidlLnbEventType lnbEventType) {
+ if (mTunerLnbCallback != nullptr) {
+ mTunerLnbCallback->onEvent(static_cast<LnbEventType>(lnbEventType));
+ }
+ return Void();
+}
+
+Return<void> TunerHidlLnb::LnbCallback::onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
+ if (mTunerLnbCallback != nullptr) {
+ vector<uint8_t> msg(begin(diseqcMessage), end(diseqcMessage));
+ mTunerLnbCallback->onDiseqcMessage(msg);
+ }
+ return Void();
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlLnb.h b/services/tuner/hidl/TunerHidlLnb.h
new file mode 100644
index 0000000..becf848
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlLnb.h
@@ -0,0 +1,83 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLLNB_H
+#define ANDROID_MEDIA_TUNERHIDLLNB_H
+
+#include <aidl/android/hardware/tv/tuner/ILnb.h>
+#include <aidl/android/media/tv/tuner/BnTunerLnb.h>
+#include <android/hardware/tv/tuner/1.0/ILnb.h>
+#include <android/hardware/tv/tuner/1.0/ILnbCallback.h>
+#include <utils/Log.h>
+
+using ::aidl::android::hardware::tv::tuner::LnbEventType;
+using ::aidl::android::hardware::tv::tuner::LnbPosition;
+using ::aidl::android::hardware::tv::tuner::LnbTone;
+using ::aidl::android::hardware::tv::tuner::LnbVoltage;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::vector;
+
+using HidlILnb = ::android::hardware::tv::tuner::V1_0::ILnb;
+using HidlILnbCallback = ::android::hardware::tv::tuner::V1_0::ILnbCallback;
+using HidlLnbEventType = ::android::hardware::tv::tuner::V1_0::LnbEventType;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlLnb : public BnTunerLnb {
+public:
+ TunerHidlLnb(sp<HidlILnb> lnb, int id);
+ virtual ~TunerHidlLnb();
+
+ ::ndk::ScopedAStatus setCallback(
+ const shared_ptr<ITunerLnbCallback>& in_tunerLnbCallback) override;
+ ::ndk::ScopedAStatus setVoltage(LnbVoltage in_voltage) override;
+ ::ndk::ScopedAStatus setTone(LnbTone in_tone) override;
+ ::ndk::ScopedAStatus setSatellitePosition(LnbPosition in_position) override;
+ ::ndk::ScopedAStatus sendDiseqcMessage(const vector<uint8_t>& in_diseqcMessage) override;
+ ::ndk::ScopedAStatus close() override;
+
+ int getId() { return mId; }
+
+ struct LnbCallback : public HidlILnbCallback {
+ LnbCallback(const shared_ptr<ITunerLnbCallback> tunerLnbCallback)
+ : mTunerLnbCallback(tunerLnbCallback){};
+
+ virtual Return<void> onEvent(const HidlLnbEventType lnbEventType);
+ virtual Return<void> onDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage);
+
+ shared_ptr<ITunerLnbCallback> mTunerLnbCallback;
+ };
+
+private:
+ int mId;
+ sp<HidlILnb> mLnb;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLLNB_H
diff --git a/services/tuner/hidl/TunerHidlService.cpp b/services/tuner/hidl/TunerHidlService.cpp
new file mode 100644
index 0000000..f4b0cde
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlService.cpp
@@ -0,0 +1,656 @@
+/**
+ * Copyright (c) 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TunerHidlService"
+
+#include "TunerHidlService.h"
+
+#include <aidl/android/hardware/tv/tuner/FrontendIsdbtTimeInterleaveMode.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+#include <android/binder_manager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/PermissionCache.h>
+#include <utils/Log.h>
+
+#include "TunerHelper.h"
+#include "TunerHidlDemux.h"
+#include "TunerHidlDescrambler.h"
+#include "TunerHidlFrontend.h"
+#include "TunerHidlLnb.h"
+
+using ::aidl::android::hardware::tv::tuner::FrontendAnalogCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendAtsc3Capabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendAtscCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDtmbCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbcCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbsCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendDvbtCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbs3Capabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbsCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtCapabilities;
+using ::aidl::android::hardware::tv::tuner::FrontendIsdbtTimeInterleaveMode;
+using ::aidl::android::hardware::tv::tuner::FrontendType;
+using ::aidl::android::hardware::tv::tuner::Result;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::IPCThreadState;
+using ::android::PermissionCache;
+using ::android::hardware::hidl_vec;
+
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+using HidlLnbId = ::android::hardware::tv::tuner::V1_0::LnbId;
+using HidlFrontendType = ::android::hardware::tv::tuner::V1_1::FrontendType;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+shared_ptr<TunerHidlService> TunerHidlService::sTunerService = nullptr;
+
+TunerHidlService::TunerHidlService() {
+ if (!TunerHelper::checkTunerFeature()) {
+ ALOGD("Device doesn't have tuner hardware.");
+ return;
+ }
+
+ updateTunerResources();
+}
+
+TunerHidlService::~TunerHidlService() {}
+
+binder_status_t TunerHidlService::instantiate() {
+ if (HidlITuner::getService() == nullptr) {
+ ALOGD("Failed to get ITuner HIDL HAL");
+ return STATUS_NAME_NOT_FOUND;
+ }
+
+ sTunerService = ::ndk::SharedRefBase::make<TunerHidlService>();
+ return AServiceManager_addService(sTunerService->asBinder().get(), getServiceName());
+}
+
+shared_ptr<TunerHidlService> TunerHidlService::getTunerService() {
+ return sTunerService;
+}
+
+bool TunerHidlService::hasITuner() {
+ ALOGV("hasITuner");
+ if (mTuner != nullptr) {
+ return true;
+ }
+
+ mTuner = HidlITuner::getService();
+ if (mTuner == nullptr) {
+ ALOGE("Failed to get ITuner service");
+ return false;
+ }
+ mTunerVersion = TUNER_HAL_VERSION_1_0;
+
+ mTuner_1_1 = ::android::hardware::tv::tuner::V1_1::ITuner::castFrom(mTuner);
+ if (mTuner_1_1 != nullptr) {
+ mTunerVersion = TUNER_HAL_VERSION_1_1;
+ } else {
+ ALOGD("Failed to get ITuner_1_1 service");
+ }
+
+ return true;
+}
+
+bool TunerHidlService::hasITuner_1_1() {
+ ALOGV("hasITuner_1_1");
+ hasITuner();
+ return (mTunerVersion == TUNER_HAL_VERSION_1_1);
+}
+
+::ndk::ScopedAStatus TunerHidlService::openDemux(int32_t /* in_demuxHandle */,
+ shared_ptr<ITunerDemux>* _aidl_return) {
+ ALOGV("openDemux");
+ if (!hasITuner()) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res;
+ uint32_t id;
+ sp<IDemux> demuxSp = nullptr;
+ mTuner->openDemux([&](HidlResult r, uint32_t demuxId, const sp<IDemux>& demux) {
+ demuxSp = demux;
+ id = demuxId;
+ res = r;
+ ALOGD("open demux, id = %d", demuxId);
+ });
+ if (res == HidlResult::SUCCESS) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDemux>(demuxSp, id);
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ ALOGW("open demux failed, res = %d", res);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
+ ALOGV("getDemuxCaps");
+ if (!hasITuner()) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res;
+ HidlDemuxCapabilities caps;
+ mTuner->getDemuxCaps([&](HidlResult r, const HidlDemuxCapabilities& demuxCaps) {
+ caps = demuxCaps;
+ res = r;
+ });
+ if (res == HidlResult::SUCCESS) {
+ *_aidl_return = getAidlDemuxCaps(caps);
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ ALOGW("Get demux caps failed, res = %d", res);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getFrontendIds(vector<int32_t>* ids) {
+ if (!hasITuner()) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ hidl_vec<HidlFrontendId> feIds;
+ HidlResult res = getHidlFrontendIds(feIds);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ ids->resize(feIds.size());
+ copy(feIds.begin(), feIds.end(), ids->begin());
+
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::getFrontendInfo(int32_t id, FrontendInfo* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGE("ITuner service is not init.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlFrontendInfo info;
+ HidlResult res = getHidlFrontendInfo(id, info);
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ HidlFrontendDtmbCapabilities dtmbCaps;
+ if (static_cast<HidlFrontendType>(info.type) == HidlFrontendType::DTMB) {
+ if (!hasITuner_1_1()) {
+ ALOGE("ITuner_1_1 service is not init.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ mTuner_1_1->getFrontendDtmbCapabilities(
+ id, [&](HidlResult r, const HidlFrontendDtmbCapabilities& caps) {
+ dtmbCaps = caps;
+ res = r;
+ });
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ }
+
+ *_aidl_return = getAidlFrontendInfo(info, dtmbCaps);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openFrontend(int32_t frontendHandle,
+ shared_ptr<ITunerFrontend>* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGE("ITuner service is not init.");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ sp<HidlIFrontend> frontend;
+ int id = TunerHelper::getResourceIdFromHandle(frontendHandle, FRONTEND);
+ mTuner->openFrontendById(id, [&](HidlResult result, const sp<HidlIFrontend>& fe) {
+ frontend = fe;
+ status = result;
+ });
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlFrontend>(frontend, id);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGD("get ITuner failed");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ sp<HidlILnb> lnb;
+ int id = TunerHelper::getResourceIdFromHandle(lnbHandle, LNB);
+ mTuner->openLnbById(id, [&](HidlResult result, const sp<HidlILnb>& lnbSp) {
+ lnb = lnbSp;
+ status = result;
+ });
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlLnb>(lnb, id);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openLnbByName(const string& lnbName,
+ shared_ptr<ITunerLnb>* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGE("get ITuner failed");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ int lnbId;
+ HidlResult status;
+ sp<HidlILnb> lnb;
+ mTuner->openLnbByName(lnbName, [&](HidlResult r, HidlLnbId id, const sp<HidlILnb>& lnbSp) {
+ status = r;
+ lnb = lnbSp;
+ lnbId = static_cast<int32_t>(id);
+ });
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlLnb>(lnb, lnbId);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openDescrambler(
+ int32_t /*descramblerHandle*/, shared_ptr<ITunerDescrambler>* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGD("get ITuner failed");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ sp<HidlIDescrambler> descrambler;
+ //int id = TunerHelper::getResourceIdFromHandle(descramblerHandle, DESCRAMBLER);
+ mTuner->openDescrambler([&](HidlResult r, const sp<HidlIDescrambler>& descramblerSp) {
+ status = r;
+ descrambler = descramblerSp;
+ });
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerHidlDescrambler>(descrambler);
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::getTunerHalVersion(int* _aidl_return) {
+ hasITuner();
+ *_aidl_return = mTunerVersion;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlService::openSharedFilter(
+ const string& in_filterToken, const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) {
+ if (!hasITuner()) {
+ ALOGE("get ITuner failed");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ if (!PermissionCache::checkCallingPermission(sSharedFilterPermission)) {
+ ALOGE("Request requires android.permission.ACCESS_TV_SHARED_FILTER");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Mutex::Autolock _l(mSharedFiltersLock);
+ if (mSharedFilters.find(in_filterToken) == mSharedFilters.end()) {
+ *_aidl_return = nullptr;
+ ALOGD("fail to find %s", in_filterToken.c_str());
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ shared_ptr<TunerHidlFilter> filter = mSharedFilters.at(in_filterToken);
+ IPCThreadState* ipc = IPCThreadState::self();
+ const int pid = ipc->getCallingPid();
+ if (!filter->isSharedFilterAllowed(pid)) {
+ *_aidl_return = nullptr;
+ ALOGD("shared filter %s is opened in the same process", in_filterToken.c_str());
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ filter->attachSharedFilterCallback(in_cb);
+
+ *_aidl_return = filter;
+ return ::ndk::ScopedAStatus::ok();
+}
+
+string TunerHidlService::addFilterToShared(const shared_ptr<TunerHidlFilter>& sharedFilter) {
+ Mutex::Autolock _l(mSharedFiltersLock);
+
+ // Use sharedFilter address as token.
+ string token = to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get()));
+ mSharedFilters[token] = sharedFilter;
+
+ return token;
+}
+
+void TunerHidlService::removeSharedFilter(const shared_ptr<TunerHidlFilter>& sharedFilter) {
+ Mutex::Autolock _l(mSharedFiltersLock);
+
+ // Use sharedFilter address as token.
+ mSharedFilters.erase(to_string(reinterpret_cast<std::uintptr_t>(sharedFilter.get())));
+}
+
+void TunerHidlService::updateTunerResources() {
+ if (!hasITuner()) {
+ ALOGE("Failed to updateTunerResources");
+ return;
+ }
+
+ TunerHelper::updateTunerResources(getTRMFrontendInfos(), getTRMLnbHandles());
+}
+
+vector<TunerFrontendInfo> TunerHidlService::getTRMFrontendInfos() {
+ vector<TunerFrontendInfo> infos;
+ hidl_vec<HidlFrontendId> ids;
+ HidlResult res = getHidlFrontendIds(ids);
+ if (res != HidlResult::SUCCESS) {
+ return infos;
+ }
+
+ for (int i = 0; i < ids.size(); i++) {
+ HidlFrontendInfo frontendInfo;
+ HidlResult res = getHidlFrontendInfo(static_cast<int32_t>(ids[i]), frontendInfo);
+ if (res != HidlResult::SUCCESS) {
+ continue;
+ }
+ TunerFrontendInfo tunerFrontendInfo{
+ .handle = TunerHelper::getResourceHandleFromId(static_cast<int32_t>(ids[i]),
+ FRONTEND),
+ .type = static_cast<int32_t>(frontendInfo.type),
+ .exclusiveGroupId = static_cast<int32_t>(frontendInfo.exclusiveGroupId),
+ };
+ infos.push_back(tunerFrontendInfo);
+ }
+
+ return infos;
+}
+
+vector<int32_t> TunerHidlService::getTRMLnbHandles() {
+ vector<int32_t> lnbHandles;
+ if (mTuner != nullptr) {
+ HidlResult res;
+ vector<HidlLnbId> lnbIds;
+ mTuner->getLnbIds([&](HidlResult r, const hidl_vec<HidlLnbId>& ids) {
+ lnbIds = ids;
+ res = r;
+ });
+ if (res == HidlResult::SUCCESS && lnbIds.size() > 0) {
+ for (int i = 0; i < lnbIds.size(); i++) {
+ lnbHandles.push_back(
+ TunerHelper::getResourceHandleFromId(static_cast<int32_t>(lnbIds[i]), LNB));
+ }
+ }
+ }
+
+ return lnbHandles;
+}
+
+HidlResult TunerHidlService::getHidlFrontendIds(hidl_vec<HidlFrontendId>& ids) {
+ if (mTuner == nullptr) {
+ return HidlResult::NOT_INITIALIZED;
+ }
+ HidlResult res;
+ mTuner->getFrontendIds([&](HidlResult r, const hidl_vec<HidlFrontendId>& frontendIds) {
+ ids = frontendIds;
+ res = r;
+ });
+ return res;
+}
+
+HidlResult TunerHidlService::getHidlFrontendInfo(const int id, HidlFrontendInfo& info) {
+ if (mTuner == nullptr) {
+ return HidlResult::NOT_INITIALIZED;
+ }
+ HidlResult res;
+ mTuner->getFrontendInfo(id, [&](HidlResult r, const HidlFrontendInfo& feInfo) {
+ info = feInfo;
+ res = r;
+ });
+ return res;
+}
+
+DemuxCapabilities TunerHidlService::getAidlDemuxCaps(const HidlDemuxCapabilities& caps) {
+ DemuxCapabilities aidlCaps{
+ .numDemux = static_cast<int32_t>(caps.numDemux),
+ .numRecord = static_cast<int32_t>(caps.numRecord),
+ .numPlayback = static_cast<int32_t>(caps.numPlayback),
+ .numTsFilter = static_cast<int32_t>(caps.numTsFilter),
+ .numSectionFilter = static_cast<int32_t>(caps.numSectionFilter),
+ .numAudioFilter = static_cast<int32_t>(caps.numAudioFilter),
+ .numVideoFilter = static_cast<int32_t>(caps.numVideoFilter),
+ .numPesFilter = static_cast<int32_t>(caps.numPesFilter),
+ .numPcrFilter = static_cast<int32_t>(caps.numPcrFilter),
+ .numBytesInSectionFilter = static_cast<int64_t>(caps.numBytesInSectionFilter),
+ .filterCaps = static_cast<int32_t>(caps.filterCaps),
+ .bTimeFilter = caps.bTimeFilter,
+ };
+ aidlCaps.linkCaps.resize(caps.linkCaps.size());
+ copy(caps.linkCaps.begin(), caps.linkCaps.end(), aidlCaps.linkCaps.begin());
+ return aidlCaps;
+}
+
+FrontendInfo TunerHidlService::getAidlFrontendInfo(
+ const HidlFrontendInfo& halInfo, const HidlFrontendDtmbCapabilities& halDtmbCaps) {
+ FrontendInfo info{
+ .type = static_cast<FrontendType>(halInfo.type),
+ .minFrequency = static_cast<int64_t>(halInfo.minFrequency),
+ .maxFrequency = static_cast<int64_t>(halInfo.maxFrequency),
+ .minSymbolRate = static_cast<int32_t>(halInfo.minSymbolRate),
+ .maxSymbolRate = static_cast<int32_t>(halInfo.maxSymbolRate),
+ .acquireRange = static_cast<int64_t>(halInfo.acquireRange),
+ .exclusiveGroupId = static_cast<int32_t>(halInfo.exclusiveGroupId),
+ };
+ for (int i = 0; i < halInfo.statusCaps.size(); i++) {
+ info.statusCaps.push_back(static_cast<FrontendStatusType>(halInfo.statusCaps[i]));
+ }
+
+ FrontendCapabilities caps;
+ switch (halInfo.type) {
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::ANALOG: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendAnalogCapabilities analogCaps{
+ .typeCap = static_cast<int32_t>(halInfo.frontendCaps.analogCaps().typeCap),
+ .sifStandardCap =
+ static_cast<int32_t>(halInfo.frontendCaps.analogCaps().sifStandardCap),
+ };
+ caps.set<FrontendCapabilities::analogCaps>(analogCaps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::ATSC: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendAtscCapabilities atscCaps{
+ .modulationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.atscCaps().modulationCap),
+ };
+ caps.set<FrontendCapabilities::atscCaps>(atscCaps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::ATSC3: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendAtsc3Capabilities atsc3Caps{
+ .bandwidthCap =
+ static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().bandwidthCap),
+ .modulationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().modulationCap),
+ .timeInterleaveModeCap = static_cast<int32_t>(
+ halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap),
+ .codeRateCap =
+ static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().codeRateCap),
+ .demodOutputFormatCap = static_cast<int8_t>(
+ halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap),
+ .fecCap = static_cast<int32_t>(halInfo.frontendCaps.atsc3Caps().fecCap),
+ };
+ caps.set<FrontendCapabilities::atsc3Caps>(atsc3Caps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBC: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendDvbcCapabilities dvbcCaps{
+ .modulationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.dvbcCaps().modulationCap),
+ .fecCap = static_cast<int64_t>(halInfo.frontendCaps.dvbcCaps().fecCap),
+ .annexCap = static_cast<int8_t>(halInfo.frontendCaps.dvbcCaps().annexCap),
+ };
+ caps.set<FrontendCapabilities::dvbcCaps>(dvbcCaps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBS: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendDvbsCapabilities dvbsCaps{
+ .modulationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.dvbsCaps().modulationCap),
+ .innerfecCap =
+ static_cast<int64_t>(halInfo.frontendCaps.dvbsCaps().innerfecCap),
+ .standard = static_cast<int8_t>(halInfo.frontendCaps.dvbsCaps().standard),
+ };
+ caps.set<FrontendCapabilities::dvbsCaps>(dvbsCaps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::DVBT: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendDvbtCapabilities dvbtCaps{
+ .transmissionModeCap = static_cast<int32_t>(
+ halInfo.frontendCaps.dvbtCaps().transmissionModeCap),
+ .bandwidthCap =
+ static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().bandwidthCap),
+ .constellationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().constellationCap),
+ .coderateCap =
+ static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().coderateCap),
+ .hierarchyCap =
+ static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().hierarchyCap),
+ .guardIntervalCap =
+ static_cast<int32_t>(halInfo.frontendCaps.dvbtCaps().guardIntervalCap),
+ .isT2Supported = halInfo.frontendCaps.dvbtCaps().isT2Supported,
+ .isMisoSupported = halInfo.frontendCaps.dvbtCaps().isMisoSupported,
+ };
+ caps.set<FrontendCapabilities::dvbtCaps>(dvbtCaps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBS: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendIsdbsCapabilities isdbsCaps{
+ .modulationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbsCaps().modulationCap),
+ .coderateCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbsCaps().coderateCap),
+ };
+ caps.set<FrontendCapabilities::isdbsCaps>(isdbsCaps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBS3: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendIsdbs3Capabilities isdbs3Caps{
+ .modulationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbs3Caps().modulationCap),
+ .coderateCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbs3Caps().coderateCap),
+ };
+ caps.set<FrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+ }
+ break;
+ }
+ case ::android::hardware::tv::tuner::V1_0::FrontendType::ISDBT: {
+ if (HidlFrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps ==
+ halInfo.frontendCaps.getDiscriminator()) {
+ FrontendIsdbtCapabilities isdbtCaps{
+ .modeCap = static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().modeCap),
+ .bandwidthCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().bandwidthCap),
+ .modulationCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().modulationCap),
+ .coderateCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().coderateCap),
+ .guardIntervalCap =
+ static_cast<int32_t>(halInfo.frontendCaps.isdbtCaps().guardIntervalCap),
+ .timeInterleaveCap =
+ static_cast<int32_t>(FrontendIsdbtTimeInterleaveMode::UNDEFINED),
+ .isSegmentAuto = false,
+ .isFullSegment = false,
+ };
+ caps.set<FrontendCapabilities::isdbtCaps>(isdbtCaps);
+ }
+ break;
+ }
+ default: {
+ if (static_cast<HidlFrontendType>(info.type) == HidlFrontendType::DTMB) {
+ FrontendDtmbCapabilities dtmbCaps{
+ .transmissionModeCap = static_cast<int32_t>(halDtmbCaps.transmissionModeCap),
+ .bandwidthCap = static_cast<int32_t>(halDtmbCaps.bandwidthCap),
+ .modulationCap = static_cast<int32_t>(halDtmbCaps.modulationCap),
+ .codeRateCap = static_cast<int32_t>(halDtmbCaps.codeRateCap),
+ .guardIntervalCap = static_cast<int32_t>(halDtmbCaps.guardIntervalCap),
+ .interleaveModeCap = static_cast<int32_t>(halDtmbCaps.interleaveModeCap),
+ };
+ caps.set<FrontendCapabilities::dtmbCaps>(dtmbCaps);
+ }
+ break;
+ }
+ }
+
+ info.frontendCaps = caps;
+ return info;
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlService.h b/services/tuner/hidl/TunerHidlService.h
new file mode 100644
index 0000000..2b8750e
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlService.h
@@ -0,0 +1,121 @@
+/**
+ * Copyright (c) 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLSERVICE_H
+#define ANDROID_MEDIA_TUNERHIDLSERVICE_H
+
+#include <aidl/android/hardware/tv/tuner/DemuxFilterEvent.h>
+#include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
+#include <aidl/android/media/tv/tuner/BnTunerService.h>
+#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
+#include <android/hardware/tv/tuner/1.1/ITuner.h>
+#include <utils/Mutex.h>
+
+#include "TunerHelper.h"
+#include "TunerHidlFilter.h"
+
+using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
+using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::FrontendInfo;
+using ::aidl::android::media::tv::tuner::ITunerDemux;
+using ::aidl::android::media::tv::tuner::ITunerDescrambler;
+using ::aidl::android::media::tv::tuner::ITunerFrontend;
+using ::aidl::android::media::tv::tuner::ITunerLnb;
+using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
+using ::android::Mutex;
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::std::shared_ptr;
+using ::std::string;
+using ::std::vector;
+
+using HidlFrontendDtmbCapabilities = ::android::hardware::tv::tuner::V1_1::FrontendDtmbCapabilities;
+using HidlDemuxFilterEvent = ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
+using HidlDemuxFilterStatus = ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
+using HidlDemuxCapabilities = ::android::hardware::tv::tuner::V1_0::DemuxCapabilities;
+using HidlFrontendInfo = ::android::hardware::tv::tuner::V1_0::FrontendInfo;
+using HidlITuner = ::android::hardware::tv::tuner::V1_0::ITuner;
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+using HidlFrontendId = ::android::hardware::tv::tuner::V1_0::FrontendId;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlService : public BnTunerService {
+public:
+ static char const* getServiceName() { return "media.tuner"; }
+ static binder_status_t instantiate();
+ TunerHidlService();
+ virtual ~TunerHidlService();
+
+ ::ndk::ScopedAStatus getFrontendIds(vector<int32_t>* out_ids) override;
+ ::ndk::ScopedAStatus getFrontendInfo(int32_t in_frontendHandle,
+ FrontendInfo* _aidl_return) override;
+ ::ndk::ScopedAStatus openFrontend(int32_t in_frontendHandle,
+ shared_ptr<ITunerFrontend>* _aidl_return) override;
+ ::ndk::ScopedAStatus openLnb(int32_t in_lnbHandle,
+ shared_ptr<ITunerLnb>* _aidl_return) override;
+ ::ndk::ScopedAStatus openLnbByName(const std::string& in_lnbName,
+ shared_ptr<ITunerLnb>* _aidl_return) override;
+ ::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
+ shared_ptr<ITunerDemux>* _aidl_return) override;
+ ::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+ ::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
+ shared_ptr<ITunerDescrambler>* _aidl_return) override;
+ ::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
+ ::ndk::ScopedAStatus openSharedFilter(const string& in_filterToken,
+ const shared_ptr<ITunerFilterCallback>& in_cb,
+ shared_ptr<ITunerFilter>* _aidl_return) override;
+
+ string addFilterToShared(const shared_ptr<TunerHidlFilter>& sharedFilter);
+ void removeSharedFilter(const shared_ptr<TunerHidlFilter>& sharedFilter);
+
+ static shared_ptr<TunerHidlService> getTunerService();
+
+private:
+ bool hasITuner();
+ bool hasITuner_1_1();
+ void updateTunerResources();
+ vector<TunerFrontendInfo> getTRMFrontendInfos();
+ vector<int32_t> getTRMLnbHandles();
+ HidlResult getHidlFrontendIds(hidl_vec<HidlFrontendId>& ids);
+ HidlResult getHidlFrontendInfo(const int id, HidlFrontendInfo& info);
+ DemuxCapabilities getAidlDemuxCaps(const HidlDemuxCapabilities& caps);
+ FrontendInfo getAidlFrontendInfo(const HidlFrontendInfo& halInfo,
+ const HidlFrontendDtmbCapabilities& dtmbCaps);
+
+ sp<HidlITuner> mTuner;
+ sp<::android::hardware::tv::tuner::V1_1::ITuner> mTuner_1_1;
+ int mTunerVersion = TUNER_HAL_VERSION_UNKNOWN;
+ Mutex mSharedFiltersLock;
+ map<string, shared_ptr<TunerHidlFilter>> mSharedFilters;
+
+ static shared_ptr<TunerHidlService> sTunerService;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLSERVICE_H
diff --git a/services/tuner/hidl/TunerHidlTimeFilter.cpp b/services/tuner/hidl/TunerHidlTimeFilter.cpp
new file mode 100644
index 0000000..d0606d6
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlTimeFilter.cpp
@@ -0,0 +1,133 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TunerHidlTimeFilter"
+
+#include "TunerHidlTimeFilter.h"
+
+#include <aidl/android/hardware/tv/tuner/Constant64Bit.h>
+#include <aidl/android/hardware/tv/tuner/Result.h>
+
+using ::aidl::android::hardware::tv::tuner::Constant64Bit;
+using ::aidl::android::hardware::tv::tuner::Result;
+
+using HidlResult = ::android::hardware::tv::tuner::V1_0::Result;
+
+using namespace std;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+TunerHidlTimeFilter::TunerHidlTimeFilter(sp<HidlITimeFilter> timeFilter) {
+ mTimeFilter = timeFilter;
+}
+
+TunerHidlTimeFilter::~TunerHidlTimeFilter() {
+ mTimeFilter = nullptr;
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::setTimeStamp(int64_t timeStamp) {
+ if (mTimeFilter == nullptr) {
+ ALOGE("ITimeFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mTimeFilter->setTimeStamp(static_cast<uint64_t>(timeStamp));
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::clearTimeStamp() {
+ if (mTimeFilter == nullptr) {
+ ALOGE("ITimeFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status = mTimeFilter->clearTimeStamp();
+ if (status != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::getSourceTime(int64_t* _aidl_return) {
+ if (mTimeFilter == nullptr) {
+ *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+ ALOGE("ITimeFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ mTimeFilter->getSourceTime([&](HidlResult r, uint64_t t) {
+ status = r;
+ *_aidl_return = static_cast<int64_t>(t);
+ });
+ if (status != HidlResult::SUCCESS) {
+ *_aidl_return = static_cast<int64_t>(Constant64Bit::INVALID_PRESENTATION_TIME_STAMP);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::getTimeStamp(int64_t* _aidl_return) {
+ if (mTimeFilter == nullptr) {
+ *_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+ ALOGE("ITimeFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult status;
+ mTimeFilter->getTimeStamp([&](HidlResult r, uint64_t t) {
+ status = r;
+ *_aidl_return = static_cast<int64_t>(t);
+ });
+ if (status != HidlResult::SUCCESS) {
+ *_aidl_return = static_cast<int64_t>(Constant64Bit::INVALID_PRESENTATION_TIME_STAMP);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+::ndk::ScopedAStatus TunerHidlTimeFilter::close() {
+ if (mTimeFilter == nullptr) {
+ ALOGE("ITimeFilter is not initialized");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ HidlResult res = mTimeFilter->close();
+ mTimeFilter = nullptr;
+
+ if (res != HidlResult::SUCCESS) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return ::ndk::ScopedAStatus::ok();
+}
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
diff --git a/services/tuner/hidl/TunerHidlTimeFilter.h b/services/tuner/hidl/TunerHidlTimeFilter.h
new file mode 100644
index 0000000..78f9c5e
--- /dev/null
+++ b/services/tuner/hidl/TunerHidlTimeFilter.h
@@ -0,0 +1,57 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
+#define ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
+
+#include <aidl/android/media/tv/tuner/BnTunerTimeFilter.h>
+#include <android/hardware/tv/tuner/1.0/ITimeFilter.h>
+#include <utils/Log.h>
+
+using ::android::sp;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using HidlITimeFilter = ::android::hardware::tv::tuner::V1_0::ITimeFilter;
+
+namespace aidl {
+namespace android {
+namespace media {
+namespace tv {
+namespace tuner {
+
+class TunerHidlTimeFilter : public BnTunerTimeFilter {
+public:
+ TunerHidlTimeFilter(sp<HidlITimeFilter> timeFilter);
+ virtual ~TunerHidlTimeFilter();
+
+ ::ndk::ScopedAStatus setTimeStamp(int64_t in_timeStamp) override;
+ ::ndk::ScopedAStatus clearTimeStamp() override;
+ ::ndk::ScopedAStatus getSourceTime(int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus getTimeStamp(int64_t* _aidl_return) override;
+ ::ndk::ScopedAStatus close() override;
+
+private:
+ sp<HidlITimeFilter> mTimeFilter;
+};
+
+} // namespace tuner
+} // namespace tv
+} // namespace media
+} // namespace android
+} // namespace aidl
+
+#endif // ANDROID_MEDIA_TUNERHIDLTIMEFILTER_H
diff --git a/services/tuner/main_tunerservice.cpp b/services/tuner/main_tunerservice.cpp
index 586a0e2..a014dea 100644
--- a/services/tuner/main_tunerservice.cpp
+++ b/services/tuner/main_tunerservice.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,30 +14,33 @@
* limitations under the License.
*/
-#include <utils/Log.h>
+#include <android-base/logging.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <hidl/HidlTransportSupport.h>
+#include <utils/Log.h>
#include "TunerService.h"
+#include "hidl/TunerHidlService.h"
+
+using ::aidl::android::media::tv::tuner::TunerHidlService;
+using ::aidl::android::media::tv::tuner::TunerService;
using namespace android;
-int main(int argc __unused, char** argv) {
+int main() {
ALOGD("Tuner service starting");
- strcpy(argv[0], "media.tuner");
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
- ALOGD("ServiceManager: %p", sm.get());
- binder_status_t status = TunerService::instantiate();
+ // Check legacy HIDL HAL first. If it's not existed, use AIDL HAL.
+ binder_status_t status = TunerHidlService::instantiate();
if (status != STATUS_OK) {
- ALOGD("Failed to add tuner service as AIDL interface");
- return -1;
+ status = TunerService::instantiate();
+ CHECK(status == STATUS_OK);
}
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
+ return EXIT_FAILURE; // should not reached
}
diff --git a/services/tuner/mediatuner.rc b/services/tuner/mediatuner.rc
index fd30618..6a3e199 100644
--- a/services/tuner/mediatuner.rc
+++ b/services/tuner/mediatuner.rc
@@ -2,4 +2,7 @@
class main
group media
ioprio rt 4
+ onrestart restart vendor.tuner-hal-1-0
+ onrestart restart vendor.tuner-hal-1-1
+ onrestart restart vendor.tuner-default
task_profiles ProcessCapacityHigh HighPerformance