François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #pragma once |
| 18 | |
| 19 | #include <system/audio.h> |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 20 | #include <vector> |
| 21 | |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 22 | #include <media/AudioContainers.h> |
| 23 | |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 24 | namespace android { |
| 25 | |
| 26 | using StreamTypeVector = std::vector<audio_stream_type_t>; |
| 27 | |
François Gaffie | ad3dce9 | 2024-03-26 17:20:04 +0100 | [diff] [blame^] | 28 | /** |
| 29 | * Legacy audio policy product strategies IDs. These strategies are supported by the default |
| 30 | * policy engine. |
| 31 | */ |
| 32 | enum legacy_strategy { |
| 33 | STRATEGY_NONE = -1, |
| 34 | STRATEGY_MEDIA, |
| 35 | STRATEGY_PHONE, |
| 36 | STRATEGY_SONIFICATION, |
| 37 | STRATEGY_SONIFICATION_RESPECTFUL, |
| 38 | STRATEGY_DTMF, |
| 39 | STRATEGY_ENFORCED_AUDIBLE, |
| 40 | STRATEGY_TRANSMITTED_THROUGH_SPEAKER, |
| 41 | STRATEGY_ACCESSIBILITY, |
| 42 | STRATEGY_REROUTING, |
| 43 | STRATEGY_CALL_ASSISTANT, |
| 44 | STRATEGY_PATCH, |
| 45 | }; |
| 46 | |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 47 | static const audio_attributes_t defaultAttr = AUDIO_ATTRIBUTES_INITIALIZER; |
| 48 | |
| 49 | } // namespace android |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 50 | |
François Gaffie | 5fcd6f9 | 2015-11-27 13:46:12 +0100 | [diff] [blame] | 51 | static const audio_format_t gDynamicFormat = AUDIO_FORMAT_DEFAULT; |
François Gaffie | 5fcd6f9 | 2015-11-27 13:46:12 +0100 | [diff] [blame] | 52 | |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 53 | static const uint32_t SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY = 5000; |
| 54 | |
Glenn Kasten | 05ddca5 | 2016-02-11 08:17:12 -0800 | [diff] [blame] | 55 | // Used when a client opens a capture stream, without specifying a desired sample rate. |
| 56 | #define SAMPLE_RATE_HZ_DEFAULT 48000 |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 57 | |
| 58 | // For mixed output and inputs, the policy will use max mixer channel count. |
| 59 | // Do not limit channel count otherwise |
Andy Hung | 936845a | 2021-06-08 00:09:06 -0700 | [diff] [blame] | 60 | #define MAX_MIXER_CHANNEL_COUNT FCC_LIMIT |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 61 | |
| 62 | /** |
Eric Laurent | 5a2b629 | 2016-04-14 18:05:57 -0700 | [diff] [blame] | 63 | * Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume |
| 64 | * control APIs (e.g setStreamVolumeIndex(). |
| 65 | */ |
| 66 | #define AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME AUDIO_DEVICE_OUT_DEFAULT |
| 67 | |
| 68 | |
| 69 | /** |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 70 | * Check if the state given correspond to an in call state. |
| 71 | * @TODO find a better name for widely call state |
| 72 | * |
| 73 | * @param[in] state to consider |
| 74 | * |
| 75 | * @return true if given state represents a device in a telephony or VoIP call |
| 76 | */ |
| 77 | static inline bool is_state_in_call(int state) |
| 78 | { |
| 79 | return (state == AUDIO_MODE_IN_CALL) || (state == AUDIO_MODE_IN_COMMUNICATION); |
| 80 | } |
| 81 | |
| 82 | /** |
jiabin | b124ec5 | 2019-09-18 15:13:13 -0700 | [diff] [blame] | 83 | * Check whether the output device type is one |
| 84 | * where addresses are used to distinguish between one connected device and another |
| 85 | * |
| 86 | * @param[in] device to consider |
| 87 | * |
| 88 | * @return true if the device needs distinguish on address, false otherwise.. |
| 89 | */ |
| 90 | static inline bool apm_audio_out_device_distinguishes_on_address(audio_devices_t device) |
| 91 | { |
| 92 | return device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX || |
| 93 | device == AUDIO_DEVICE_OUT_BUS; |
| 94 | } |
| 95 | |
| 96 | /** |
| 97 | * Check whether the input device type is one |
| 98 | * where addresses are used to distinguish between one connected device and another |
| 99 | * |
| 100 | * @param[in] device to consider |
| 101 | * |
| 102 | * @return true if the device needs distinguish on address, false otherwise.. |
| 103 | */ |
| 104 | static inline bool apm_audio_in_device_distinguishes_on_address(audio_devices_t device) |
| 105 | { |
| 106 | return device == AUDIO_DEVICE_IN_REMOTE_SUBMIX || |
| 107 | device == AUDIO_DEVICE_IN_BUS; |
| 108 | } |
| 109 | |
| 110 | /** |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 111 | * Check whether the device type is one |
| 112 | * where addresses are used to distinguish between one connected device and another |
| 113 | * |
| 114 | * @param[in] device to consider |
| 115 | * |
| 116 | * @return true if the device needs distinguish on address, false otherwise.. |
| 117 | */ |
Chih-Hung Hsieh | 5603d28 | 2015-05-04 17:14:15 -0700 | [diff] [blame] | 118 | static inline bool device_distinguishes_on_address(audio_devices_t device) |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 119 | { |
jiabin | b124ec5 | 2019-09-18 15:13:13 -0700 | [diff] [blame] | 120 | return apm_audio_in_device_distinguishes_on_address(device) || |
| 121 | apm_audio_out_device_distinguishes_on_address(device); |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 122 | } |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 123 | |
| 124 | /** |
Aniket Kumar Lata | 4e46470 | 2019-01-10 23:38:46 -0800 | [diff] [blame] | 125 | * Check whether audio device has encoding capability. |
| 126 | * |
| 127 | * @param[in] device to consider |
| 128 | * |
| 129 | * @return true if device has encoding capability, false otherwise.. |
| 130 | */ |
| 131 | static inline bool device_has_encoding_capability(audio_devices_t device) |
| 132 | { |
Eric Laurent | 7e3c083 | 2023-11-30 15:04:50 +0100 | [diff] [blame] | 133 | return audio_is_a2dp_out_device(device) || audio_is_ble_out_device(device); |
Aniket Kumar Lata | 4e46470 | 2019-01-10 23:38:46 -0800 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | /** |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 137 | * Returns the priority of a given audio source for capture. The priority is used when more than one |
| 138 | * capture session is active on a given input stream to determine which session drives routing and |
| 139 | * effect configuration. |
| 140 | * |
| 141 | * @param[in] inputSource to consider. Valid sources are: |
| 142 | * - AUDIO_SOURCE_VOICE_COMMUNICATION |
| 143 | * - AUDIO_SOURCE_CAMCORDER |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 144 | * - AUDIO_SOURCE_VOICE_PERFORMANCE |
| 145 | * - AUDIO_SOURCE_UNPROCESSED |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 146 | * - AUDIO_SOURCE_MIC |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 147 | * - AUDIO_SOURCE_ECHO_REFERENCE |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 148 | * - AUDIO_SOURCE_FM_TUNER |
| 149 | * - AUDIO_SOURCE_VOICE_RECOGNITION |
| 150 | * - AUDIO_SOURCE_HOTWORD |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 151 | * - AUDIO_SOURCE_ULTRASOUND |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 152 | * |
| 153 | * @return the corresponding input source priority or 0 if priority is irrelevant for this source. |
| 154 | * This happens when the specified source cannot share a given input stream (e.g remote submix) |
| 155 | * The higher the value, the higher the priority. |
| 156 | */ |
| 157 | static inline int32_t source_priority(audio_source_t inputSource) |
| 158 | { |
| 159 | switch (inputSource) { |
| 160 | case AUDIO_SOURCE_VOICE_COMMUNICATION: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 161 | return 10; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 162 | case AUDIO_SOURCE_CAMCORDER: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 163 | return 9; |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 164 | case AUDIO_SOURCE_VOICE_PERFORMANCE: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 165 | return 8; |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 166 | case AUDIO_SOURCE_UNPROCESSED: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 167 | return 7; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 168 | case AUDIO_SOURCE_MIC: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 169 | return 6; |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 170 | case AUDIO_SOURCE_ECHO_REFERENCE: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 171 | return 5; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 172 | case AUDIO_SOURCE_FM_TUNER: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 173 | return 4; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 174 | case AUDIO_SOURCE_VOICE_RECOGNITION: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 175 | return 3; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 176 | case AUDIO_SOURCE_HOTWORD: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 177 | return 2; |
| 178 | case AUDIO_SOURCE_ULTRASOUND: |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 179 | return 1; |
| 180 | default: |
| 181 | break; |
| 182 | } |
| 183 | return 0; |
| 184 | } |
Eric Laurent | e693002 | 2016-02-11 10:20:40 -0800 | [diff] [blame] | 185 | |
| 186 | /* Indicates if audio formats are equivalent when considering a match between |
| 187 | * audio HAL supported formats and client requested formats |
| 188 | */ |
| 189 | static inline bool audio_formats_match(audio_format_t format1, |
| 190 | audio_format_t format2) |
| 191 | { |
| 192 | if (audio_is_linear_pcm(format1) && |
| 193 | (audio_bytes_per_sample(format1) > 2) && |
| 194 | audio_is_linear_pcm(format2) && |
| 195 | (audio_bytes_per_sample(format2) > 2)) { |
| 196 | return true; |
| 197 | } |
| 198 | return format1 == format2; |
| 199 | } |
François Gaffie | c005e56 | 2018-11-06 15:04:49 +0100 | [diff] [blame] | 200 | |
| 201 | /** |
| 202 | * @brief hasStream checks if a given stream type is found in the list of streams |
| 203 | * @param streams collection of stream types to consider. |
| 204 | * @param streamType to consider |
| 205 | * @return true if voice stream is found in the given streams, false otherwise |
| 206 | */ |
| 207 | static inline bool hasStream(const android::StreamTypeVector &streams, |
| 208 | audio_stream_type_t streamType) |
| 209 | { |
| 210 | return std::find(begin(streams), end(streams), streamType) != end(streams); |
| 211 | } |
| 212 | |
| 213 | /** |
| 214 | * @brief hasVoiceStream checks if a voice stream is found in the list of streams |
| 215 | * @param streams collection to consider. |
| 216 | * @return true if voice stream is found in the given streams, false otherwise |
| 217 | */ |
| 218 | static inline bool hasVoiceStream(const android::StreamTypeVector &streams) |
| 219 | { |
| 220 | return hasStream(streams, AUDIO_STREAM_VOICE_CALL); |
| 221 | } |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 222 | |
| 223 | /** |
| 224 | * @brief extract one device relevant from multiple device selection |
| 225 | * @param deviceTypes collection of audio device type |
| 226 | * @return the device type that is selected |
| 227 | */ |
| 228 | static inline audio_devices_t apm_extract_one_audio_device( |
| 229 | const android::DeviceTypeSet& deviceTypes) { |
| 230 | if (deviceTypes.empty()) { |
| 231 | return AUDIO_DEVICE_NONE; |
| 232 | } else if (deviceTypes.size() == 1) { |
| 233 | return *(deviceTypes.begin()); |
| 234 | } else { |
| 235 | // Multiple device selection is either: |
jiabin | a35a040 | 2023-04-12 16:35:18 +0000 | [diff] [blame] | 236 | // - dock + one other device: give priority to dock in this case. |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 237 | // - speaker + one other device: give priority to speaker in this case. |
| 238 | // - one A2DP device + another device: happens with duplicated output. In this case |
| 239 | // retain the device on the A2DP output as the other must not correspond to an active |
| 240 | // selection if not the speaker. |
| 241 | // - HDMI-CEC system audio mode only output: give priority to available item in order. |
jiabin | a35a040 | 2023-04-12 16:35:18 +0000 | [diff] [blame] | 242 | if (deviceTypes.count(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET) != 0) { |
| 243 | return AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET; |
| 244 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) != 0) { |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 245 | return AUDIO_DEVICE_OUT_SPEAKER; |
| 246 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER_SAFE) != 0) { |
| 247 | return AUDIO_DEVICE_OUT_SPEAKER_SAFE; |
| 248 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_ARC) != 0) { |
| 249 | return AUDIO_DEVICE_OUT_HDMI_ARC; |
Kuowei Li | 01a686b | 2020-10-27 16:54:39 +0800 | [diff] [blame] | 250 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_EARC) != 0) { |
| 251 | return AUDIO_DEVICE_OUT_HDMI_EARC; |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 252 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_AUX_LINE) != 0) { |
| 253 | return AUDIO_DEVICE_OUT_AUX_LINE; |
| 254 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPDIF) != 0) { |
| 255 | return AUDIO_DEVICE_OUT_SPDIF; |
| 256 | } else { |
| 257 | std::vector<audio_devices_t> a2dpDevices = android::Intersection( |
| 258 | deviceTypes, android::getAudioDeviceOutAllA2dpSet()); |
| 259 | if (a2dpDevices.empty() || a2dpDevices.size() > 1) { |
| 260 | ALOGW("%s invalid device combination: %s", |
| 261 | __func__, android::dumpDeviceTypes(deviceTypes).c_str()); |
| 262 | } |
| 263 | return a2dpDevices.empty() ? AUDIO_DEVICE_NONE : a2dpDevices[0]; |
| 264 | } |
| 265 | } |
Kuowei Li | 01a686b | 2020-10-27 16:54:39 +0800 | [diff] [blame] | 266 | } |
jiabin | a84c3d3 | 2022-12-02 18:59:55 +0000 | [diff] [blame] | 267 | |
| 268 | /** |
| 269 | * Indicates if two given audio output flags are considered as matched, which means that |
| 270 | * 1) the `supersetFlags` and `subsetFlags` both contain or both don't contain must match flags and |
| 271 | * 2) `supersetFlags` contains all flags from `subsetFlags`. |
| 272 | */ |
| 273 | static inline bool audio_output_flags_is_subset(audio_output_flags_t supersetFlags, |
| 274 | audio_output_flags_t subsetFlags, |
| 275 | uint32_t mustMatchFlags) |
| 276 | { |
| 277 | return ((supersetFlags ^ subsetFlags) & mustMatchFlags) == AUDIO_OUTPUT_FLAG_NONE |
| 278 | && (supersetFlags & subsetFlags) == subsetFlags; |
| 279 | } |