Move common audio AIDL types to audio.media.audio.common
The following types are being moved from 'android.media':
- AudioChannelLayout
- AudioFormatDescription
- AudioFormatType
- PcmType
Updated usages and conversion functions.
Bug: 188932434
Test: atest audio_aidl_conversion_tests
Test: atest audiofoundation_parcelable_test
Change-Id: Ib65a4f428da4a7f29ec22bc2a393617c6c4389d0
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index da59da6..01aaa2a 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -168,6 +168,7 @@
"libbinder",
"framework-permission-aidl-cpp",
"aaudio-aidl-cpp",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
],
@@ -246,6 +247,7 @@
"binding/aidl/aaudio/IAAudioService.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"shared-file-region-aidl",
"framework-permission-aidl",
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index cf76225..14d1671 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -29,7 +29,7 @@
using namespace aaudio;
-using android::media::AudioFormatDescription;
+using android::media::audio::common::AudioFormatDescription;
AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
setChannelMask(parcelable.channelMask);
@@ -76,7 +76,8 @@
result.audioFormat = convAudioFormat.value();
} else {
result.audioFormat = AudioFormatDescription{};
- result.audioFormat.type = android::media::AudioFormatType::SYS_RESERVED_INVALID;
+ result.audioFormat.type =
+ android::media::audio::common::AudioFormatType::SYS_RESERVED_INVALID;
}
static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
result.direction = getDirection();
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index 3b274f2..cc4f138 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -16,7 +16,7 @@
package aaudio;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioFormatDescription;
parcelable StreamParameters {
int channelMask; // = AAUDIO_UNSPECIFIED;
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index b3f6a15..102e54a 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -34,6 +34,10 @@
namespace android {
using base::unexpected;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
namespace {
@@ -388,21 +392,21 @@
namespace {
namespace detail {
-using AudioChannelPair = std::pair<audio_channel_mask_t, media::AudioChannelLayout>;
+using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
using AudioChannelPairs = std::vector<AudioChannelPair>;
using AudioDevicePair = std::pair<audio_devices_t, media::AudioDeviceDescription>;
using AudioDevicePairs = std::vector<AudioDevicePair>;
-using AudioFormatPair = std::pair<audio_format_t, media::AudioFormatDescription>;
+using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
using AudioFormatPairs = std::vector<AudioFormatPair>;
}
const detail::AudioChannelPairs& getInAudioChannelPairs() {
static const detail::AudioChannelPairs pairs = {
-#define DEFINE_INPUT_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_IN_##n, \
- media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
- media::AudioChannelLayout::LAYOUT_##n) \
+#define DEFINE_INPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
}
DEFINE_INPUT_LAYOUT(MONO),
@@ -421,11 +425,11 @@
const detail::AudioChannelPairs& getOutAudioChannelPairs() {
static const detail::AudioChannelPairs pairs = {
-#define DEFINE_OUTPUT_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_OUT_##n, \
- media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
- media::AudioChannelLayout::LAYOUT_##n) \
+#define DEFINE_OUTPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_OUT_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
}
DEFINE_OUTPUT_LAYOUT(MONO),
@@ -464,11 +468,11 @@
const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
static const detail::AudioChannelPairs pairs = {
-#define DEFINE_VOICE_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_IN_VOICE_##n, \
- media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>( \
- media::AudioChannelLayout::VOICE_##n) \
+#define DEFINE_VOICE_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_VOICE_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>( \
+ AudioChannelLayout::VOICE_##n) \
}
DEFINE_VOICE_LAYOUT(UPLINK_MONO),
DEFINE_VOICE_LAYOUT(DNLINK_MONO),
@@ -671,25 +675,25 @@
return pairs;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+ AudioFormatDescription result;
result.type = type;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
- auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+ auto result = make_AudioFormatDescription(AudioFormatType::PCM);
result.pcm = pcm;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ AudioFormatDescription result;
result.encoding = encoding;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
const std::string& encoding) {
auto result = make_AudioFormatDescription(encoding);
result.pcm = transport;
@@ -700,28 +704,28 @@
static const detail::AudioFormatPairs pairs = {{
{
AUDIO_FORMAT_INVALID,
- make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID)
+ make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)
},
{
- AUDIO_FORMAT_DEFAULT, media::AudioFormatDescription{}
+ AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}
},
{
- AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(media::PcmType::INT_16_BIT)
+ AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)
},
{
- AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(media::PcmType::UINT_8_BIT)
+ AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)
},
{
- AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(media::PcmType::INT_32_BIT)
+ AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)
},
{
- AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(media::PcmType::FIXED_Q_8_24)
+ AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)
},
{
- AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(media::PcmType::FLOAT_32_BIT)
+ AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)
},
{
- AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(media::PcmType::INT_24_BIT)
+ AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)
},
{
AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
@@ -797,7 +801,7 @@
// specify the format of the encapsulated bitstream.
{
AUDIO_FORMAT_IEC61937,
- make_AudioFormatDescription(media::PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
+ make_AudioFormatDescription(PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
},
{
AUDIO_FORMAT_DOLBY_TRUEHD,
@@ -984,7 +988,7 @@
},
{
AUDIO_FORMAT_IEC60958,
- make_AudioFormatDescription(media::PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
+ make_AudioFormatDescription(PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
},
{
AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
@@ -1028,14 +1032,14 @@
} // namespace
ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- const media::AudioChannelLayout& aidl, bool isInput) {
- using ReverseMap = std::unordered_map<media::AudioChannelLayout, audio_channel_mask_t>;
- using Tag = media::AudioChannelLayout::Tag;
+ const AudioChannelLayout& aidl, bool isInput) {
+ using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
+ using Tag = AudioChannelLayout::Tag;
static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
- auto convert = [](const media::AudioChannelLayout& aidl, const ReverseMap& m,
+ auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
if (auto it = m.find(aidl); it != m.end()) {
return it->second;
@@ -1073,16 +1077,16 @@
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
audio_channel_mask_t legacy, bool isInput) {
- using DirectMap = std::unordered_map<audio_channel_mask_t, media::AudioChannelLayout>;
- using Tag = media::AudioChannelLayout::Tag;
+ using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
+ using Tag = AudioChannelLayout::Tag;
static const DirectMap mInAndVoice = make_DirectMap(
getInAudioChannelPairs(), getVoiceAudioChannelPairs());
static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
- const char* func, const char* type) -> ConversionResult<media::AudioChannelLayout> {
+ const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
if (auto it = m.find(legacy); it != m.end()) {
return it->second;
} else {
@@ -1093,9 +1097,9 @@
};
if (legacy == AUDIO_CHANNEL_NONE) {
- return media::AudioChannelLayout{};
+ return AudioChannelLayout{};
} else if (legacy == AUDIO_CHANNEL_INVALID) {
- return media::AudioChannelLayout::make<Tag::invalid>(0);
+ return AudioChannelLayout::make<Tag::invalid>(0);
}
const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
@@ -1103,7 +1107,7 @@
if (audio_channel_mask_is_valid(legacy)) {
const int indexMask = VALUE_OR_RETURN(
convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
- return media::AudioChannelLayout::make<Tag::indexMask>(indexMask);
+ return AudioChannelLayout::make<Tag::indexMask>(indexMask);
} else {
ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
return unexpected(BAD_VALUE);
@@ -1144,8 +1148,8 @@
}
ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
- const media::AudioFormatDescription& aidl) {
- static const std::unordered_map<media::AudioFormatDescription, audio_format_t> m =
+ const AudioFormatDescription& aidl) {
+ static const std::unordered_map<AudioFormatDescription, audio_format_t> m =
make_ReverseMap(getAudioFormatPairs());
if (auto it = m.find(aidl); it != m.end()) {
return it->second;
@@ -1155,9 +1159,9 @@
}
}
-ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
+ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
audio_format_t legacy) {
- static const std::unordered_map<audio_format_t, media::AudioFormatDescription> m =
+ static const std::unordered_map<audio_format_t, AudioFormatDescription> m =
make_DirectMap(getAudioFormatPairs());
if (auto it = m.find(legacy); it != m.end()) {
return it->second;
@@ -2684,7 +2688,7 @@
}
RETURN_IF_ERROR(
convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
- [isInput](const media::AudioChannelLayout& l) {
+ [isInput](const AudioChannelLayout& l) {
return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
}));
legacy.num_channel_masks = aidl.channelMasks.size();
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 048aca2..471b15c 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -51,6 +51,7 @@
"PolicyAidlConversion.cpp"
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -71,6 +72,7 @@
include_dirs: ["system/media/audio_utils/include"],
export_include_dirs: ["include"],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -111,6 +113,7 @@
"TrackPlayerBase.cpp",
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -228,6 +231,7 @@
"libaudioclient_aidl_conversion_util",
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libbase",
"libbinder",
@@ -239,6 +243,7 @@
"framework-permission-aidl-cpp",
],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libbase",
"shared-file-region-aidl-cpp",
@@ -307,7 +312,6 @@
local_include_dir: "aidl",
srcs: [
"aidl/android/media/AudioAttributesInternal.aidl",
- "aidl/android/media/AudioChannelLayout.aidl",
"aidl/android/media/AudioClient.aidl",
"aidl/android/media/AudioConfig.aidl",
"aidl/android/media/AudioConfigBase.aidl",
@@ -319,8 +323,6 @@
"aidl/android/media/AudioEncapsulationMode.aidl",
"aidl/android/media/AudioEncapsulationMetadataType.aidl",
"aidl/android/media/AudioEncapsulationType.aidl",
- "aidl/android/media/AudioFormatDescription.aidl",
- "aidl/android/media/AudioFormatType.aidl",
"aidl/android/media/AudioFlag.aidl",
"aidl/android/media/AudioGain.aidl",
"aidl/android/media/AudioGainConfig.aidl",
@@ -360,7 +362,6 @@
"aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
"aidl/android/media/ExtraAudioDescriptor.aidl",
- "aidl/android/media/PcmType.aidl",
"aidl/android/media/TrackSecondaryOutputInfo.aidl",
],
imports: [
@@ -407,6 +408,7 @@
"aidl/android/media/SpatializationLevel.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
],
backend: {
@@ -449,6 +451,7 @@
"aidl/android/media/IAudioTrackCallback.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"av-types-aidl",
"effect-aidl",
@@ -486,6 +489,7 @@
"aidl/android/media/IAudioPolicyServiceClient.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"audiopolicy-types-aidl",
"capture_state_listener-aidl",
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 65bf97d..057befd 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -51,8 +51,9 @@
namespace android {
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
+using content::AttributionSourceState;
using media::IAudioPolicyService;
-using android::content::AttributionSourceState;
+using media::audio::common::AudioFormatDescription;
// client singleton for AudioFlinger binder interface
Mutex AudioSystem::gLock;
@@ -1877,7 +1878,7 @@
media::Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::AudioFormatDescription> surroundFormatsAidl;
+ std::vector<AudioFormatDescription> surroundFormatsAidl;
std::vector<bool> surroundFormatsEnabledAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl,
@@ -1904,7 +1905,7 @@
media::Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::AudioFormatDescription> surroundFormatsAidl;
+ std::vector<AudioFormatDescription> surroundFormatsAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getReportedSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl)));
@@ -1920,7 +1921,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
+ AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_format_t_AudioFormatDescription(audioFormat));
return statusTFromBinderStatus(
aps->setSurroundFormatEnabled(audioFormatAidl, enabled));
@@ -1982,7 +1983,7 @@
& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- std::vector<media::AudioFormatDescription> formatsAidl;
+ std::vector<AudioFormatDescription> formatsAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getHwOffloadEncodingFormatsSupportedForA2DP(&formatsAidl)));
*formats = VALUE_OR_RETURN_STATUS(
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 6d2ec93..ee64894 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -30,6 +30,8 @@
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
#define MAX_ITEMS_PER_LIST 1024
@@ -252,7 +254,7 @@
audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
auto result = [&]() -> ConversionResult<audio_format_t> {
int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
- media::AudioFormatDescription aidlRet;
+ AudioFormatDescription aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
return aidl2legacy_AudioFormatDescription_audio_format_t(aidlRet);
}();
@@ -420,9 +422,9 @@
audio_channel_mask_t channelMask) const {
auto result = [&]() -> ConversionResult<size_t> {
int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
- media::AudioFormatDescription formatAidl = VALUE_OR_RETURN(
+ AudioFormatDescription formatAidl = VALUE_OR_RETURN(
legacy2aidl_audio_format_t_AudioFormatDescription(format));
- media::AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
+ AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
legacy2aidl_audio_channel_mask_t_AudioChannelLayout(channelMask, true /*isInput*/));
int64_t aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -820,7 +822,7 @@
}
Status AudioFlingerServerAdapter::format(int32_t output,
- media::AudioFormatDescription* _aidl_return) {
+ AudioFormatDescription* _aidl_return) {
audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(output));
*_aidl_return = VALUE_OR_RETURN_BINDER(
@@ -948,8 +950,8 @@
}
Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
- const media::AudioFormatDescription& format,
- const media::AudioChannelLayout& channelMask,
+ const AudioFormatDescription& format,
+ const AudioChannelLayout& channelMask,
int64_t* _aidl_return) {
uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
diff --git a/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl b/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
deleted file mode 100644
index 3259105..0000000
--- a/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * This structure describes a layout of a multi-channel stream.
- * There are two possible ways for representing a layout:
- *
- * - indexed mask, which tells what channels of an audio frame are used, but
- * doesn't label them in any way, thus a correspondence between channels in
- * the same position of frames originating from different streams must be
- * established externally;
- *
- * - layout mask, which gives a label to each channel, thus allowing to
- * match channels between streams of different layouts.
- *
- * Both representations are agnostic of the direction of audio transfer. Also,
- * by construction, the number of bits set to '1' in the mask indicates the
- * number of channels in the audio frame. A channel mask per se only defines the
- * presence or absence of a channel, not the order. Please see 'INTERLEAVE_*'
- * constants for the platform convention of order.
- *
- * The structure also defines a "voice mask" which is a special case of
- * layout mask, intended for processing voice audio from telecommunication
- * use cases.
- */
-union AudioChannelLayout {
- /**
- * This variant is used for representing the "null" ("none") value
- * for the channel layout. The field value must always be '0'.
- */
- int none = 0;
- /**
- * This variant is used for indicating an "invalid" layout for use by the
- * framework only. HAL implementations must not accept or emit
- * AudioChannelLayout values for this variant. The field value must always
- * be '0'.
- */
- int invalid = 0;
- /**
- * This variant is used for representing indexed masks. The mask indicates
- * what channels are used. For example, the mask that specifies to use only
- * channels 1 and 3 when interacting with a multi-channel device is defined
- * as a combination of the 1st and the 3rd bits and thus is equal to 5. See
- * also the 'INDEX_MASK_*' constants. The 'indexMask' field must have at
- * least one bit set.
- */
- int indexMask;
- /**
- * This variant is used for representing layout masks.
- * It is recommended to use one of 'LAYOUT_*' values. The 'layoutMask' field
- * must have at least one bit set.
- */
- int layoutMask;
- /**
- * This variant is used for processing of voice audio input and output.
- * It is recommended to use one of 'VOICE_*' values. The 'voiceMask' field
- * must have at least one bit set.
- */
- int voiceMask;
-
- /**
- * 'INDEX_MASK_*' constants define how many channels are used.
- * The mask constants below are 'canonical' masks. Each 'INDEX_MASK_N'
- * constant declares that all N channels are used and arranges
- * them starting from the LSB.
- */
- const int INDEX_MASK_1 = (1 << 1) - 1;
- const int INDEX_MASK_2 = (1 << 2) - 1;
- const int INDEX_MASK_3 = (1 << 3) - 1;
- const int INDEX_MASK_4 = (1 << 4) - 1;
- const int INDEX_MASK_5 = (1 << 5) - 1;
- const int INDEX_MASK_6 = (1 << 6) - 1;
- const int INDEX_MASK_7 = (1 << 7) - 1;
- const int INDEX_MASK_8 = (1 << 8) - 1;
- const int INDEX_MASK_9 = (1 << 9) - 1;
- const int INDEX_MASK_10 = (1 << 10) - 1;
- const int INDEX_MASK_11 = (1 << 11) - 1;
- const int INDEX_MASK_12 = (1 << 12) - 1;
- const int INDEX_MASK_13 = (1 << 13) - 1;
- const int INDEX_MASK_14 = (1 << 14) - 1;
- const int INDEX_MASK_15 = (1 << 15) - 1;
- const int INDEX_MASK_16 = (1 << 16) - 1;
- const int INDEX_MASK_17 = (1 << 17) - 1;
- const int INDEX_MASK_18 = (1 << 18) - 1;
- const int INDEX_MASK_19 = (1 << 19) - 1;
- const int INDEX_MASK_20 = (1 << 20) - 1;
- const int INDEX_MASK_21 = (1 << 21) - 1;
- const int INDEX_MASK_22 = (1 << 22) - 1;
- const int INDEX_MASK_23 = (1 << 23) - 1;
- const int INDEX_MASK_24 = (1 << 24) - 1;
-
- /**
- * 'LAYOUT_*' constants define channel layouts recognized by
- * the audio system. The order of the channels in the frame is assumed
- * to be from the LSB to MSB for all the bits set to '1'.
- */
- const int LAYOUT_MONO = CHANNEL_FRONT_LEFT;
- const int LAYOUT_STEREO =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT;
- const int LAYOUT_2POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_TRI =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER;
- const int LAYOUT_TRI_BACK =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_BACK_CENTER;
- const int LAYOUT_3POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER |
- CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_2POINT0POINT2 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_2POINT1POINT2 =
- LAYOUT_2POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_3POINT0POINT2 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_3POINT1POINT2 =
- LAYOUT_3POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_QUAD =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
- const int LAYOUT_QUAD_SIDE =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
- const int LAYOUT_SURROUND =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
- const int LAYOUT_PENTA = LAYOUT_QUAD | CHANNEL_FRONT_CENTER;
- const int LAYOUT_5POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
- CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
- const int LAYOUT_5POINT1_SIDE =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
- const int LAYOUT_5POINT1POINT2 = LAYOUT_5POINT1 |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_5POINT1POINT4 = LAYOUT_5POINT1 |
- CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
- CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
- const int LAYOUT_6POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
- CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT | CHANNEL_BACK_CENTER;
- const int LAYOUT_7POINT1 = LAYOUT_5POINT1 |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
- const int LAYOUT_7POINT1POINT2 = LAYOUT_7POINT1 |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_7POINT1POINT4 = LAYOUT_7POINT1 |
- CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
- CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
- const int LAYOUT_9POINT1POINT4 = LAYOUT_7POINT1POINT4 |
- CHANNEL_FRONT_WIDE_LEFT | CHANNEL_FRONT_WIDE_RIGHT;
- const int LAYOUT_9POINT1POINT6 = LAYOUT_9POINT1POINT4 |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_13POINT_360RA =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT |
- CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
- CHANNEL_TOP_FRONT_CENTER |
- CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT |
- CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
- CHANNEL_BOTTOM_FRONT_CENTER;
- const int LAYOUT_22POINT2 = LAYOUT_7POINT1POINT4 |
- CHANNEL_FRONT_LEFT_OF_CENTER | CHANNEL_FRONT_RIGHT_OF_CENTER |
- CHANNEL_BACK_CENTER | CHANNEL_TOP_CENTER |
- CHANNEL_TOP_FRONT_CENTER | CHANNEL_TOP_BACK_CENTER |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT |
- CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
- CHANNEL_BOTTOM_FRONT_CENTER |
- CHANNEL_LOW_FREQUENCY_2;
- const int LAYOUT_MONO_HAPTIC_A =
- LAYOUT_MONO | CHANNEL_HAPTIC_A;
- const int LAYOUT_STEREO_HAPTIC_A =
- LAYOUT_STEREO | CHANNEL_HAPTIC_A;
- const int LAYOUT_HAPTIC_AB =
- CHANNEL_HAPTIC_A | CHANNEL_HAPTIC_B;
- const int LAYOUT_MONO_HAPTIC_AB =
- LAYOUT_MONO | LAYOUT_HAPTIC_AB;
- const int LAYOUT_STEREO_HAPTIC_AB =
- LAYOUT_STEREO | LAYOUT_HAPTIC_AB;
- const int LAYOUT_FRONT_BACK =
- CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
-
- /**
- * Expresses the convention when stereo audio samples are stored interleaved
- * in an array. This should improve readability by allowing code to use
- * symbolic indices instead of hard-coded [0] and [1].
- *
- * For multi-channel beyond stereo, the platform convention is that channels
- * are interleaved in order from least significant channel mask bit to most
- * significant channel mask bit, with unused bits skipped. Any exceptions
- * to this convention will be noted at the appropriate API.
- */
- const int INTERLEAVE_LEFT = 0;
- const int INTERLEAVE_RIGHT = 1;
-
- /**
- * 'CHANNEL_*' constants are used to build 'LAYOUT_*' masks. Each constant
- * must have exactly one bit set. The values do not match
- * 'android.media.AudioFormat.CHANNEL_OUT_*' constants from the SDK
- * for better efficiency in masks processing.
- */
- const int CHANNEL_FRONT_LEFT = 1 << 0;
- const int CHANNEL_FRONT_RIGHT = 1 << 1;
- const int CHANNEL_FRONT_CENTER = 1 << 2;
- const int CHANNEL_LOW_FREQUENCY = 1 << 3;
- const int CHANNEL_BACK_LEFT = 1 << 4;
- const int CHANNEL_BACK_RIGHT = 1 << 5;
- const int CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6;
- const int CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7;
- const int CHANNEL_BACK_CENTER = 1 << 8;
- const int CHANNEL_SIDE_LEFT = 1 << 9;
- const int CHANNEL_SIDE_RIGHT = 1 << 10;
- const int CHANNEL_TOP_CENTER = 1 << 11;
- const int CHANNEL_TOP_FRONT_LEFT = 1 << 12;
- const int CHANNEL_TOP_FRONT_CENTER = 1 << 13;
- const int CHANNEL_TOP_FRONT_RIGHT = 1 << 14;
- const int CHANNEL_TOP_BACK_LEFT = 1 << 15;
- const int CHANNEL_TOP_BACK_CENTER = 1 << 16;
- const int CHANNEL_TOP_BACK_RIGHT = 1 << 17;
- const int CHANNEL_TOP_SIDE_LEFT = 1 << 18;
- const int CHANNEL_TOP_SIDE_RIGHT = 1 << 19;
- const int CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20;
- const int CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21;
- const int CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22;
- const int CHANNEL_LOW_FREQUENCY_2 = 1 << 23;
- const int CHANNEL_FRONT_WIDE_LEFT = 1 << 24;
- const int CHANNEL_FRONT_WIDE_RIGHT = 1 << 25;
- /**
- * Haptic channels are not part of multichannel standards, however they
- * enhance user experience when playing so they are packed together with the
- * channels of the program. To avoid collision with positional channels the
- * values for haptic channels start at the MSB of an integer (after the sign
- * bit) and move down to LSB.
- */
- const int CHANNEL_HAPTIC_B = 1 << 29;
- const int CHANNEL_HAPTIC_A = 1 << 30;
-
- /**
- * 'VOICE_*' constants define layouts for voice audio. The order of the
- * channels in the frame is assumed to be from the LSB to MSB for all the
- * bits set to '1'.
- */
- const int VOICE_UPLINK_MONO = CHANNEL_VOICE_UPLINK;
- const int VOICE_DNLINK_MONO = CHANNEL_VOICE_DNLINK;
- const int VOICE_CALL_MONO = CHANNEL_VOICE_UPLINK | CHANNEL_VOICE_DNLINK;
-
- /**
- * 'CHANNEL_VOICE_*' constants are used to build 'VOICE_*' masks. Each
- * constant must have exactly one bit set. Use the same values as
- * 'android.media.AudioFormat.CHANNEL_IN_VOICE_*' constants from the SDK.
- */
- const int CHANNEL_VOICE_UPLINK = 0x4000;
- const int CHANNEL_VOICE_DNLINK = 0x8000;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfig.aidl b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
index 6996d42..aea7a34 100644
--- a/media/libaudioclient/aidl/android/media/AudioConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
import android.media.AudioOffloadInfo;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
index e84161b..54b1780 100644
--- a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
@@ -16,8 +16,8 @@
package android.media;
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl b/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
deleted file mode 100644
index a656348..0000000
--- a/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioFormatType;
-import android.media.PcmType;
-
-/**
- * An extensible type for specifying audio formats. All formats are largely
- * divided into two classes: PCM and non-PCM (bitstreams). Bitstreams can
- * be encapsulated into PCM streams.
- *
- * The type defined in a way to make each format uniquely identifiable, so
- * that if the framework and the HAL construct a value for the same type
- * (e.g. PCM 16 bit), they will produce identical parcelables which will have
- * identical hashes. This makes possible deduplicating type descriptions
- * by the framework when they are received from different HAL modules without
- * relying on having some centralized registry of enumeration values.
- *
- * {@hide}
- */
-parcelable AudioFormatDescription {
- /**
- * The type of the audio format. See the 'AudioFormatType' for the
- * list of supported values.
- */
- AudioFormatType type = AudioFormatType.DEFAULT;
- /**
- * The type of the PCM stream or the transport stream for PCM
- * encapsulations. See 'PcmType' for the list of supported values.
- */
- PcmType pcm = PcmType.DEFAULT;
- /**
- * Optional encoding specification. Must be left empty when:
- *
- * - 'type == DEFAULT && pcm == DEFAULT' -- that means "default" type;
- * - 'type == PCM' -- that means a regular PCM stream (not an encapsulation
- * of an encoded bitstream).
- *
- * For PCM encapsulations of encoded bitstreams (e.g. an encapsulation
- * according to IEC-61937 standard), the value of the 'pcm' field must
- * be set accordingly, as an example, PCM_INT_16_BIT must be used for
- * IEC-61937. Note that 'type == NON_PCM' in this case.
- *
- * Encoding names mostly follow IANA standards for media types (MIME), and
- * frameworks/av/media/libstagefright/foundation/MediaDefs.cpp with the
- * latter having priority. Since there are still many audio types not found
- * in any of these lists, the following rules are applied:
- *
- * - If there is a direct MIME type for the encoding, the MIME type name
- * is used as is, e.g. "audio/eac3" for the EAC-3 format.
- * - If the encoding is a "subformat" of a MIME-registered format,
- * the latter is augmented with a suffix, e.g. "audio/eac3-joc" for the
- * JOC extension of EAC-3.
- * - If it's a proprietary format, a "vnd." prefix is added, similar to
- * IANA rules, e.g. "audio/vnd.dolby.truehd".
- * - Otherwise, "x-" prefix is added, e.g. "audio/x-iec61937".
- * - All MIME types not found in the IANA formats list have an associated
- * comment.
- *
- * For PCM encapsulations with a known bitstream format, the latter
- * is added to the encapsulation encoding as a suffix, after a "+" char.
- * For example, an IEC61937 encapsulation of AC3 has the following
- * representation:
- * type = NON_PCM,
- * pcm = PcmType.INT_16_BIT,
- * encoding = "audio/x-iec61937+audio/ac3"
- */
- @utf8InCpp String encoding;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatType.aidl b/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
deleted file mode 100644
index 31ed2be..0000000
--- a/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The type of the audio format. Only used as part of 'AudioFormatDescription'
- * structure.
- */
-@Backing(type="byte")
-enum AudioFormatType {
- /**
- * "Default" type is used when the client does not care about the actual
- * format. All fields of 'AudioFormatDescription' must have default / empty
- * / null values.
- */
- DEFAULT = 0,
- /**
- * When the 'encoding' field of 'AudioFormatDescription' is not empty, it
- * specifies the codec used for bitstream (non-PCM) data. It is also used
- * in the case when the bitstream data is encapsulated into a PCM stream,
- * see the documentation for 'AudioFormatDescription'.
- */
- NON_PCM = DEFAULT,
- /**
- * PCM type. The 'pcm' field of 'AudioFormatDescription' is used to specify
- * the actual sample size and representation.
- */
- PCM = 1,
- /**
- * Value reserved for system use only. HALs must never return this value to
- * the system or accept it from the system.
- */
- SYS_RESERVED_INVALID = -1,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGain.aidl b/media/libaudioclient/aidl/android/media/AudioGain.aidl
index 4cfa96e..ff85b50 100644
--- a/media/libaudioclient/aidl/android/media/AudioGain.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGain.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioChannelLayout;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
index afa3aca..f60c461 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioChannelLayout;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index efdf99b..b01f902 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
import android.media.AudioPatch;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
index be32a69..3908cb1 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
@@ -16,14 +16,14 @@
package android.media;
-import android.media.AudioChannelLayout;
import android.media.AudioGainConfig;
import android.media.AudioIoFlags;
import android.media.AudioPortConfigExt;
import android.media.AudioPortConfigType;
import android.media.AudioPortRole;
import android.media.AudioPortType;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioProfile.aidl b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
index 9fb8d49..3e234de 100644
--- a/media/libaudioclient/aidl/android/media/AudioProfile.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioChannelLayout;
import android.media.AudioEncapsulationType;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index 16f70c1..7b02a9d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,7 +16,6 @@
package android.media;
-import android.media.AudioChannelLayout;
import android.media.AudioMode;
import android.media.AudioPatch;
import android.media.AudioPort;
@@ -42,7 +41,8 @@
import android.media.MicrophoneInfoData;
import android.media.RenderPosition;
import android.media.TrackSecondaryOutputInfo;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 7022b9d..74bfa05 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -16,7 +16,6 @@
package android.media;
-import android.media.AudioFormatDescription;
import android.content.AttributionSourceState;
import android.media.AudioAttributesEx;
@@ -53,6 +52,7 @@
import android.media.INativeSpatializerCallback;
import android.media.Int;
import android.media.SoundTriggerSession;
+import android.media.audio.common.AudioFormatDescription;
/**
* IAudioPolicyService interface (see AudioPolicyInterface for method descriptions).
diff --git a/media/libaudioclient/aidl/android/media/PcmType.aidl b/media/libaudioclient/aidl/android/media/PcmType.aidl
deleted file mode 100644
index c9e327c..0000000
--- a/media/libaudioclient/aidl/android/media/PcmType.aidl
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The type of the encoding used for representing PCM samples. Only used as
- * part of 'AudioFormatDescription' structure.
- */
-@Backing(type="byte")
-enum PcmType {
- /**
- * "Default" value used when the type 'AudioFormatDescription' is "default".
- */
- DEFAULT = 0,
- /**
- * Unsigned 8-bit integer.
- */
- UINT_8_BIT = DEFAULT,
- /**
- * Signed 16-bit integer.
- */
- INT_16_BIT = 1,
- /**
- * Signed 32-bit integer.
- */
- INT_32_BIT = 2,
- /**
- * Q8.24 fixed point format.
- */
- FIXED_Q_8_24 = 3,
- /**
- * IEEE 754 32-bit floating point format.
- */
- FLOAT_32_BIT = 4,
- /**
- * Signed 24-bit integer.
- */
- INT_24_BIT = 5,
-}
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index b290aa8..969e3e6 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -46,6 +46,7 @@
],
shared_libs: [
"android.hardware.audio.common-util",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 2cf127c..7e87a4e 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -22,7 +22,6 @@
#include <system/audio.h>
#include <android/media/AudioAttributesInternal.h>
-#include <android/media/AudioChannelLayout.h>
#include <android/media/AudioClient.h>
#include <android/media/AudioConfig.h>
#include <android/media/AudioConfigBase.h>
@@ -32,7 +31,6 @@
#include <android/media/AudioEncapsulationMetadataType.h>
#include <android/media/AudioEncapsulationType.h>
#include <android/media/AudioFlag.h>
-#include <android/media/AudioFormatDescription.h>
#include <android/media/AudioGain.h>
#include <android/media/AudioGainMode.h>
#include <android/media/AudioInputFlags.h>
@@ -54,6 +52,8 @@
#include <android/media/EffectDescriptor.h>
#include <android/media/ExtraAudioDescriptor.h>
#include <android/media/TrackSecondaryOutputInfo.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
#include <android/media/SharedFileRegion.h>
#include <binder/IMemory.h>
@@ -132,9 +132,9 @@
audio_port_type_t legacy);
ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- const media::AudioChannelLayout& aidl, bool isInput);
-ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
- audio_channel_mask_t legacy, bool isInput);
+ const android::media::audio::common::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<android::media::audio::common::AudioChannelLayout>
+legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
const media::AudioDeviceDescription& aidl);
@@ -142,9 +142,9 @@
audio_devices_t legacy);
ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
- const media::AudioFormatDescription& aidl);
-ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
- audio_format_t legacy);
+ const media::audio::common::AudioFormatDescription& aidl);
+ConversionResult<media::audio::common::AudioFormatDescription>
+legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
ConversionResult<audio_gain_mode_t>
aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl);
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index bd341e3..b63f389 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -19,9 +19,9 @@
#include <functional>
-#include <android/media/AudioChannelLayout.h>
#include <android/media/AudioDeviceDescription.h>
-#include <android/media/AudioFormatDescription.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
#include <binder/Parcelable.h>
#include <system/audio.h>
#include <system/audio_policy.h>
@@ -42,10 +42,11 @@
// possibility of processing types belonging to different versions of the type,
// e.g. a HAL may be using a previous version of the AIDL interface.
-template<> struct hash<android::media::AudioChannelLayout>
+template<> struct hash<android::media::audio::common::AudioChannelLayout>
{
- std::size_t operator()(const android::media::AudioChannelLayout& acl) const noexcept {
- using Tag = android::media::AudioChannelLayout::Tag;
+ std::size_t operator()(
+ const android::media::audio::common::AudioChannelLayout& acl) const noexcept {
+ using Tag = android::media::audio::common::AudioChannelLayout::Tag;
const size_t seed = std::hash<Tag>{}(acl.getTag());
switch (acl.getTag()) {
case Tag::none:
@@ -72,13 +73,14 @@
}
};
-template<> struct hash<android::media::AudioFormatDescription>
+template<> struct hash<android::media::audio::common::AudioFormatDescription>
{
- std::size_t operator()(const android::media::AudioFormatDescription& afd) const noexcept {
+ std::size_t operator()(
+ const android::media::audio::common::AudioFormatDescription& afd) const noexcept {
return hash_combine(
- std::hash<android::media::AudioFormatType>{}(afd.type),
+ std::hash<android::media::audio::common::AudioFormatType>{}(afd.type),
hash_combine(
- std::hash<android::media::PcmType>{}(afd.pcm),
+ std::hash<android::media::audio::common::PcmType>{}(afd.pcm),
std::hash<std::string>{}(afd.encoding)));
}
};
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index a2ce145..a74661a 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -570,7 +570,8 @@
Status createRecord(const media::CreateRecordRequest& request,
media::CreateRecordResponse* _aidl_return) override;
Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
- Status format(int32_t output, media::AudioFormatDescription* _aidl_return) override;
+ Status format(int32_t output,
+ media::audio::common::AudioFormatDescription* _aidl_return) override;
Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
Status latency(int32_t output, int32_t* _aidl_return) override;
Status setMasterVolume(float value) override;
@@ -592,8 +593,9 @@
Status
getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
- Status getInputBufferSize(int32_t sampleRate, const media::AudioFormatDescription& format,
- const media::AudioChannelLayout& channelMask,
+ Status getInputBufferSize(int32_t sampleRate,
+ const media::audio::common::AudioFormatDescription& format,
+ const media::audio::common::AudioChannelLayout& channelMask,
int64_t* _aidl_return) override;
Status openOutput(const media::OpenOutputRequest& request,
media::OpenOutputResponse* _aidl_return) override;
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 2279244..891293e 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -33,6 +33,7 @@
"libutils",
],
static_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
"libstagefright_foundation",
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
index 424b387..253ccac 100644
--- a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -22,38 +22,43 @@
using namespace android;
using namespace android::aidl_utils;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
+
namespace {
template<typename T> size_t hash(const T& t) {
return std::hash<T>{}(t);
}
-media::AudioChannelLayout make_ACL_None() {
- return media::AudioChannelLayout{};
+AudioChannelLayout make_ACL_None() {
+ return AudioChannelLayout{};
}
-media::AudioChannelLayout make_ACL_Invalid() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::invalid>(0);
+AudioChannelLayout make_ACL_Invalid() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::invalid>(0);
}
-media::AudioChannelLayout make_ACL_Stereo() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>(
- media::AudioChannelLayout::LAYOUT_STEREO);
+AudioChannelLayout make_ACL_Stereo() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+ AudioChannelLayout::LAYOUT_STEREO);
}
-media::AudioChannelLayout make_ACL_ChannelIndex2() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>(
- media::AudioChannelLayout::INDEX_MASK_2);
+AudioChannelLayout make_ACL_ChannelIndex2() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(
+ AudioChannelLayout::INDEX_MASK_2);
}
-media::AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
+AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
// Use channels 1 and 3.
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>(5);
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(5);
}
-media::AudioChannelLayout make_ACL_VoiceCall() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>(
- media::AudioChannelLayout::VOICE_CALL_MONO);
+AudioChannelLayout make_ACL_VoiceCall() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(
+ AudioChannelLayout::VOICE_CALL_MONO);
}
media::AudioDeviceDescription make_AudioDeviceDescription(media::AudioDeviceType type,
@@ -86,52 +91,52 @@
media::AudioDeviceDescription::CONNECTION_BT_SCO());
}
-media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+ AudioFormatDescription result;
result.type = type;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
- auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+ auto result = make_AudioFormatDescription(AudioFormatType::PCM);
result.pcm = pcm;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ AudioFormatDescription result;
result.encoding = encoding;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
const std::string& encoding) {
auto result = make_AudioFormatDescription(encoding);
result.pcm = transport;
return result;
}
-media::AudioFormatDescription make_AFD_Default() {
- return media::AudioFormatDescription{};
+AudioFormatDescription make_AFD_Default() {
+ return AudioFormatDescription{};
}
-media::AudioFormatDescription make_AFD_Invalid() {
- return make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID);
+AudioFormatDescription make_AFD_Invalid() {
+ return make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID);
}
-media::AudioFormatDescription make_AFD_Pcm16Bit() {
- return make_AudioFormatDescription(media::PcmType::INT_16_BIT);
+AudioFormatDescription make_AFD_Pcm16Bit() {
+ return make_AudioFormatDescription(PcmType::INT_16_BIT);
}
-media::AudioFormatDescription make_AFD_Bitstream() {
+AudioFormatDescription make_AFD_Bitstream() {
return make_AudioFormatDescription("example");
}
-media::AudioFormatDescription make_AFD_Encap() {
- return make_AudioFormatDescription(media::PcmType::INT_16_BIT, "example.encap");
+AudioFormatDescription make_AFD_Encap() {
+ return make_AudioFormatDescription(PcmType::INT_16_BIT, "example.encap");
}
-media::AudioFormatDescription make_AFD_Encap_with_Enc() {
+AudioFormatDescription make_AFD_Encap_with_Enc() {
auto afd = make_AFD_Encap();
afd.encoding += "+example";
return afd;
@@ -160,7 +165,7 @@
};
TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
- verifyHashIdentity<media::AudioChannelLayout>({
+ verifyHashIdentity<AudioChannelLayout>({
make_ACL_None, make_ACL_Invalid, make_ACL_Stereo, make_ACL_ChannelIndex2,
make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
}
@@ -172,12 +177,12 @@
}
TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
- verifyHashIdentity<media::AudioFormatDescription>({
+ verifyHashIdentity<AudioFormatDescription>({
make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
make_AFD_Encap, make_AFD_Encap_with_Enc});
}
-using ChannelLayoutParam = std::tuple<media::AudioChannelLayout, bool /*isInput*/>;
+using ChannelLayoutParam = std::tuple<AudioChannelLayout, bool /*isInput*/>;
class AudioChannelLayoutRoundTripTest :
public testing::TestWithParam<ChannelLayoutParam> {};
TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
@@ -192,7 +197,7 @@
INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
AudioChannelLayoutRoundTripTest,
testing::Combine(
- testing::Values(media::AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
+ testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
make_ACL_ChannelIndex2(), make_ACL_ChannelIndexArbitrary()),
testing::Values(false, true)));
INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
@@ -216,7 +221,7 @@
make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
class AudioFormatDescriptionRoundTripTest :
- public testing::TestWithParam<media::AudioFormatDescription> {};
+ public testing::TestWithParam<AudioFormatDescription> {};
TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
const auto initial = GetParam();
auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
@@ -227,4 +232,4 @@
}
INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
AudioFormatDescriptionRoundTripTest,
- testing::Values(make_AFD_Invalid(), media::AudioFormatDescription{}, make_AFD_Pcm16Bit()));
+ testing::Values(make_AFD_Invalid(), AudioFormatDescription{}, make_AFD_Pcm16Bit()));
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 3bef55b..727b86f 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -24,9 +24,11 @@
"libmedia_helper_headers",
],
static_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
],
export_static_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
],
host_supported: true,
@@ -52,6 +54,7 @@
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
"libaudioutils",
@@ -63,6 +66,7 @@
],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
],
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 47b2d54..2d44f4a 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -27,6 +27,8 @@
namespace android {
+using media::audio::common::AudioChannelLayout;
+
bool operator == (const AudioProfile &left, const AudioProfile &right)
{
return (left.getFormat() == right.getFormat()) &&
@@ -160,7 +162,7 @@
parcelable.name = mName;
parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
parcelable.channelMasks = VALUE_OR_RETURN(
- convertContainer<std::vector<media::AudioChannelLayout>>(
+ convertContainer<std::vector<AudioChannelLayout>>(
mChannelMasks,
[isInput](audio_channel_mask_t m) {
return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
@@ -184,7 +186,7 @@
aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
legacy->mChannelMasks = VALUE_OR_RETURN(
convertContainer<ChannelMaskSet>(parcelable.channelMasks,
- [isInput](const media::AudioChannelLayout& l) {
+ [isInput](const AudioChannelLayout& l) {
return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
}));
legacy->mSamplingRates = VALUE_OR_RETURN(
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index f3cd446..3f1fbea 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -18,6 +18,7 @@
],
static_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 9c1b563..4a2523f 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -347,6 +347,7 @@
shared_libs: [
"android.hidl.token@1.0-utils",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"av-types-aidl-cpp",
"liblog",
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index b91f302..e4f1b47 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -54,6 +54,7 @@
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioflinger-aidl-cpp",
"audioclient-types-aidl-cpp",
"av-types-aidl-cpp",
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index 8ae0901..de1bc37 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -44,6 +44,7 @@
"libsensorprivacy",
"libshmemcompat",
"libutils",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 95c1bab..3ff927d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -44,6 +44,7 @@
using binder::Status;
using aidl_utils::binderStatusFromStatusT;
using content::AttributionSourceState;
+using media::audio::common::AudioFormatDescription;
const std::vector<audio_usage_t>& SYSTEM_USAGES = {
AUDIO_USAGE_CALL_ASSISTANT,
@@ -99,7 +100,7 @@
const media::AudioDevice& deviceAidl,
media::AudioPolicyDeviceState stateAidl,
const std::string& deviceNameAidl,
- const media::AudioFormatDescription& encodedFormatAidl) {
+ const AudioFormatDescription& encodedFormatAidl) {
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
audio_policy_dev_state_t state = VALUE_OR_RETURN_BINDER_STATUS(
@@ -149,7 +150,7 @@
Status AudioPolicyService::handleDeviceConfigChange(
const media::AudioDevice& deviceAidl,
const std::string& deviceNameAidl,
- const media::AudioFormatDescription& encodedFormatAidl) {
+ const AudioFormatDescription& encodedFormatAidl) {
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1827,7 +1828,7 @@
}
Status AudioPolicyService::getSurroundFormats(media::Int* count,
- std::vector<media::AudioFormatDescription>* formats,
+ std::vector<AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
@@ -1860,7 +1861,7 @@
}
Status AudioPolicyService::getReportedSurroundFormats(
- media::Int* count, std::vector<media::AudioFormatDescription>* formats) {
+ media::Int* count, std::vector<AudioFormatDescription>* formats) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
if (numSurroundFormats > MAX_ITEMS_PER_LIST) {
@@ -1887,7 +1888,7 @@
}
Status AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::AudioFormatDescription>* _aidl_return) {
+ std::vector<AudioFormatDescription>* _aidl_return) {
std::vector<audio_format_t> formats;
if (mAudioPolicyManager == NULL) {
@@ -1898,14 +1899,14 @@
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(&formats)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<media::AudioFormatDescription>>(
+ convertContainer<std::vector<AudioFormatDescription>>(
formats,
legacy2aidl_audio_format_t_AudioFormatDescription));
return Status::ok();
}
Status AudioPolicyService::setSurroundFormatEnabled(
- const media::AudioFormatDescription& audioFormatAidl, bool enabled) {
+ const AudioFormatDescription& audioFormatAidl, bool enabled) {
audio_format_t audioFormat = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioFormatDescription_audio_format_t(audioFormatAidl));
if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 97d4c00..129c757 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -49,6 +49,7 @@
namespace android {
using content::AttributionSourceState;
+using media::audio::common::AudioFormatDescription;
// ----------------------------------------------------------------------------
@@ -74,13 +75,13 @@
const media::AudioDevice& device,
media::AudioPolicyDeviceState state,
const std::string& deviceName,
- const media::AudioFormatDescription& encodedFormat) override;
+ const AudioFormatDescription& encodedFormat) override;
binder::Status getDeviceConnectionState(const media::AudioDevice& device,
media::AudioPolicyDeviceState* _aidl_return) override;
binder::Status handleDeviceConfigChange(
const media::AudioDevice& device,
const std::string& deviceName,
- const media::AudioFormatDescription& encodedFormat) override;
+ const AudioFormatDescription& encodedFormat) override;
binder::Status setPhoneState(media::AudioMode state, int32_t uid) override;
binder::Status setForceUse(media::AudioPolicyForceUse usage,
media::AudioPolicyForcedConfig config) override;
@@ -199,13 +200,13 @@
const media::AudioDeviceDescription& device,
float* _aidl_return) override;
binder::Status getSurroundFormats(media::Int* count,
- std::vector<media::AudioFormatDescription>* formats,
+ std::vector<AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) override;
binder::Status getReportedSurroundFormats(
- media::Int* count, std::vector<media::AudioFormatDescription>* formats) override;
+ media::Int* count, std::vector<AudioFormatDescription>* formats) override;
binder::Status getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::AudioFormatDescription>* _aidl_return) override;
- binder::Status setSurroundFormatEnabled(const media::AudioFormatDescription& audioFormat,
+ std::vector<AudioFormatDescription>* _aidl_return) override;
+ binder::Status setSurroundFormatEnabled(const AudioFormatDescription& audioFormat,
bool enabled) override;
binder::Status setAssistantUid(int32_t uid) override;
binder::Status setHotwordDetectionServiceUid(int32_t uid) override;