François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #pragma once |
| 18 | |
| 19 | #include <system/audio.h> |
jiabin | 220eea1 | 2024-05-17 17:55:20 +0000 | [diff] [blame] | 20 | #include <set> |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 21 | #include <vector> |
| 22 | |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 23 | #include <media/AudioContainers.h> |
| 24 | |
Mikhail Naganov | bada1f5 | 2024-12-03 16:20:10 -0800 | [diff] [blame^] | 25 | #include <string.h> |
| 26 | |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 27 | namespace android { |
| 28 | |
| 29 | using StreamTypeVector = std::vector<audio_stream_type_t>; |
| 30 | |
Mikhail Naganov | bada1f5 | 2024-12-03 16:20:10 -0800 | [diff] [blame^] | 31 | #define AUDIO_ENUM_QUOTE(x) #x |
| 32 | #define AUDIO_ENUM_STRINGIFY(x) AUDIO_ENUM_QUOTE(x) |
| 33 | #define AUDIO_DEFINE_ENUM_SYMBOL_V(symbol, value) symbol = value, |
| 34 | #define AUDIO_DEFINE_STRINGIFY_CASE_V(symbol, _) case symbol: return AUDIO_ENUM_STRINGIFY(symbol); |
| 35 | #define AUDIO_DEFINE_PARSE_CASE_V(symbol, _) \ |
| 36 | if (strcmp(s, AUDIO_ENUM_STRINGIFY(symbol)) == 0) { *t = symbol; return true; } else |
| 37 | #define AUDIO_DEFINE_MAP_ENTRY_V(symbol, _) { AUDIO_ENUM_STRINGIFY(symbol), symbol }, |
| 38 | |
François Gaffie | ad3dce9 | 2024-03-26 17:20:04 +0100 | [diff] [blame] | 39 | /** |
| 40 | * Legacy audio policy product strategies IDs. These strategies are supported by the default |
| 41 | * policy engine. |
Eric Laurent | 3047783 | 2024-10-09 17:31:03 +0000 | [diff] [blame] | 42 | * IMPORTANT NOTE: the order of this enum is important as it determines the priority |
Mikhail Naganov | bada1f5 | 2024-12-03 16:20:10 -0800 | [diff] [blame^] | 43 | * between active strategies for routing decisions: lower enum value => higher priority |
François Gaffie | ad3dce9 | 2024-03-26 17:20:04 +0100 | [diff] [blame] | 44 | */ |
Mikhail Naganov | bada1f5 | 2024-12-03 16:20:10 -0800 | [diff] [blame^] | 45 | #define AUDIO_LEGACY_STRATEGY_LIST_DEF(V) \ |
| 46 | V(STRATEGY_NONE, -1) \ |
| 47 | V(STRATEGY_PHONE, 0) \ |
| 48 | V(STRATEGY_SONIFICATION, 1) \ |
| 49 | V(STRATEGY_ENFORCED_AUDIBLE, 2) \ |
| 50 | V(STRATEGY_ACCESSIBILITY, 3) \ |
| 51 | V(STRATEGY_SONIFICATION_RESPECTFUL, 4) \ |
| 52 | V(STRATEGY_MEDIA, 5) \ |
| 53 | V(STRATEGY_DTMF, 6) \ |
| 54 | V(STRATEGY_CALL_ASSISTANT, 7) \ |
| 55 | V(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, 8) \ |
| 56 | V(STRATEGY_REROUTING, 9) \ |
| 57 | V(STRATEGY_PATCH, 10) |
| 58 | |
François Gaffie | ad3dce9 | 2024-03-26 17:20:04 +0100 | [diff] [blame] | 59 | enum legacy_strategy { |
Mikhail Naganov | bada1f5 | 2024-12-03 16:20:10 -0800 | [diff] [blame^] | 60 | AUDIO_LEGACY_STRATEGY_LIST_DEF(AUDIO_DEFINE_ENUM_SYMBOL_V) |
François Gaffie | ad3dce9 | 2024-03-26 17:20:04 +0100 | [diff] [blame] | 61 | }; |
| 62 | |
Mikhail Naganov | bada1f5 | 2024-12-03 16:20:10 -0800 | [diff] [blame^] | 63 | inline const char* legacy_strategy_to_string(legacy_strategy t) { |
| 64 | switch (t) { |
| 65 | AUDIO_LEGACY_STRATEGY_LIST_DEF(AUDIO_DEFINE_STRINGIFY_CASE_V) |
| 66 | } |
| 67 | return ""; |
| 68 | } |
| 69 | |
| 70 | inline bool legacy_strategy_from_string(const char* s, legacy_strategy* t) { |
| 71 | AUDIO_LEGACY_STRATEGY_LIST_DEF(AUDIO_DEFINE_PARSE_CASE_V) |
| 72 | return false; |
| 73 | } |
| 74 | |
| 75 | namespace audio_policy { |
| 76 | |
| 77 | struct legacy_strategy_map { const char *name; legacy_strategy id; }; |
| 78 | |
| 79 | inline std::vector<legacy_strategy_map> getLegacyStrategyMap() { |
| 80 | return std::vector<legacy_strategy_map> { |
| 81 | AUDIO_LEGACY_STRATEGY_LIST_DEF(AUDIO_DEFINE_MAP_ENTRY_V) |
| 82 | }; |
| 83 | } |
| 84 | |
| 85 | } // namespace audio_policy |
| 86 | |
| 87 | #undef AUDIO_LEGACY_STRATEGY_LIST_DEF |
| 88 | |
| 89 | #undef AUDIO_DEFINE_MAP_ENTRY_V |
| 90 | #undef AUDIO_DEFINE_PARSE_CASE_V |
| 91 | #undef AUDIO_DEFINE_STRINGIFY_CASE_V |
| 92 | #undef AUDIO_DEFINE_ENUM_SYMBOL_V |
| 93 | #undef AUDIO_ENUM_STRINGIFY |
| 94 | #undef AUDIO_ENUM_QUOTE |
| 95 | |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 96 | static const audio_attributes_t defaultAttr = AUDIO_ATTRIBUTES_INITIALIZER; |
| 97 | |
jiabin | 220eea1 | 2024-05-17 17:55:20 +0000 | [diff] [blame] | 98 | static const std::set<audio_usage_t > gHighPriorityUseCases = { |
| 99 | AUDIO_USAGE_ALARM, AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE |
| 100 | }; |
| 101 | |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 102 | } // namespace android |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 103 | |
François Gaffie | 5fcd6f9 | 2015-11-27 13:46:12 +0100 | [diff] [blame] | 104 | static const audio_format_t gDynamicFormat = AUDIO_FORMAT_DEFAULT; |
François Gaffie | 5fcd6f9 | 2015-11-27 13:46:12 +0100 | [diff] [blame] | 105 | |
François Gaffie | dc7553f | 2018-11-02 10:39:57 +0100 | [diff] [blame] | 106 | static const uint32_t SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY = 5000; |
| 107 | |
Glenn Kasten | 05ddca5 | 2016-02-11 08:17:12 -0800 | [diff] [blame] | 108 | // Used when a client opens a capture stream, without specifying a desired sample rate. |
| 109 | #define SAMPLE_RATE_HZ_DEFAULT 48000 |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 110 | |
| 111 | // For mixed output and inputs, the policy will use max mixer channel count. |
| 112 | // Do not limit channel count otherwise |
Andy Hung | 936845a | 2021-06-08 00:09:06 -0700 | [diff] [blame] | 113 | #define MAX_MIXER_CHANNEL_COUNT FCC_LIMIT |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 114 | |
| 115 | /** |
Eric Laurent | 5a2b629 | 2016-04-14 18:05:57 -0700 | [diff] [blame] | 116 | * Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume |
| 117 | * control APIs (e.g setStreamVolumeIndex(). |
| 118 | */ |
| 119 | #define AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME AUDIO_DEVICE_OUT_DEFAULT |
| 120 | |
| 121 | |
| 122 | /** |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 123 | * Check if the state given correspond to an in call state. |
| 124 | * @TODO find a better name for widely call state |
| 125 | * |
| 126 | * @param[in] state to consider |
| 127 | * |
| 128 | * @return true if given state represents a device in a telephony or VoIP call |
| 129 | */ |
| 130 | static inline bool is_state_in_call(int state) |
| 131 | { |
| 132 | return (state == AUDIO_MODE_IN_CALL) || (state == AUDIO_MODE_IN_COMMUNICATION); |
| 133 | } |
| 134 | |
| 135 | /** |
jiabin | b124ec5 | 2019-09-18 15:13:13 -0700 | [diff] [blame] | 136 | * Check whether the output device type is one |
| 137 | * where addresses are used to distinguish between one connected device and another |
| 138 | * |
| 139 | * @param[in] device to consider |
| 140 | * |
| 141 | * @return true if the device needs distinguish on address, false otherwise.. |
| 142 | */ |
| 143 | static inline bool apm_audio_out_device_distinguishes_on_address(audio_devices_t device) |
| 144 | { |
| 145 | return device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX || |
| 146 | device == AUDIO_DEVICE_OUT_BUS; |
| 147 | } |
| 148 | |
| 149 | /** |
| 150 | * Check whether the input device type is one |
| 151 | * where addresses are used to distinguish between one connected device and another |
| 152 | * |
| 153 | * @param[in] device to consider |
| 154 | * |
| 155 | * @return true if the device needs distinguish on address, false otherwise.. |
| 156 | */ |
| 157 | static inline bool apm_audio_in_device_distinguishes_on_address(audio_devices_t device) |
| 158 | { |
| 159 | return device == AUDIO_DEVICE_IN_REMOTE_SUBMIX || |
| 160 | device == AUDIO_DEVICE_IN_BUS; |
| 161 | } |
| 162 | |
| 163 | /** |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 164 | * Check whether the device type is one |
| 165 | * where addresses are used to distinguish between one connected device and another |
| 166 | * |
| 167 | * @param[in] device to consider |
| 168 | * |
| 169 | * @return true if the device needs distinguish on address, false otherwise.. |
| 170 | */ |
Chih-Hung Hsieh | 5603d28 | 2015-05-04 17:14:15 -0700 | [diff] [blame] | 171 | static inline bool device_distinguishes_on_address(audio_devices_t device) |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 172 | { |
jiabin | b124ec5 | 2019-09-18 15:13:13 -0700 | [diff] [blame] | 173 | return apm_audio_in_device_distinguishes_on_address(device) || |
| 174 | apm_audio_out_device_distinguishes_on_address(device); |
François Gaffie | 53615e2 | 2015-03-19 09:24:12 +0100 | [diff] [blame] | 175 | } |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 176 | |
| 177 | /** |
Aniket Kumar Lata | 4e46470 | 2019-01-10 23:38:46 -0800 | [diff] [blame] | 178 | * Check whether audio device has encoding capability. |
| 179 | * |
| 180 | * @param[in] device to consider |
| 181 | * |
| 182 | * @return true if device has encoding capability, false otherwise.. |
| 183 | */ |
| 184 | static inline bool device_has_encoding_capability(audio_devices_t device) |
| 185 | { |
Eric Laurent | 7e3c083 | 2023-11-30 15:04:50 +0100 | [diff] [blame] | 186 | return audio_is_a2dp_out_device(device) || audio_is_ble_out_device(device); |
Aniket Kumar Lata | 4e46470 | 2019-01-10 23:38:46 -0800 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | /** |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 190 | * Returns the priority of a given audio source for capture. The priority is used when more than one |
| 191 | * capture session is active on a given input stream to determine which session drives routing and |
| 192 | * effect configuration. |
| 193 | * |
| 194 | * @param[in] inputSource to consider. Valid sources are: |
| 195 | * - AUDIO_SOURCE_VOICE_COMMUNICATION |
| 196 | * - AUDIO_SOURCE_CAMCORDER |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 197 | * - AUDIO_SOURCE_VOICE_PERFORMANCE |
| 198 | * - AUDIO_SOURCE_UNPROCESSED |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 199 | * - AUDIO_SOURCE_MIC |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 200 | * - AUDIO_SOURCE_ECHO_REFERENCE |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 201 | * - AUDIO_SOURCE_FM_TUNER |
| 202 | * - AUDIO_SOURCE_VOICE_RECOGNITION |
| 203 | * - AUDIO_SOURCE_HOTWORD |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 204 | * - AUDIO_SOURCE_ULTRASOUND |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 205 | * |
| 206 | * @return the corresponding input source priority or 0 if priority is irrelevant for this source. |
| 207 | * This happens when the specified source cannot share a given input stream (e.g remote submix) |
| 208 | * The higher the value, the higher the priority. |
| 209 | */ |
| 210 | static inline int32_t source_priority(audio_source_t inputSource) |
| 211 | { |
| 212 | switch (inputSource) { |
| 213 | case AUDIO_SOURCE_VOICE_COMMUNICATION: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 214 | return 10; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 215 | case AUDIO_SOURCE_CAMCORDER: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 216 | return 9; |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 217 | case AUDIO_SOURCE_VOICE_PERFORMANCE: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 218 | return 8; |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 219 | case AUDIO_SOURCE_UNPROCESSED: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 220 | return 7; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 221 | case AUDIO_SOURCE_MIC: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 222 | return 6; |
Eric Laurent | ae4b6ec | 2019-01-15 18:34:38 -0800 | [diff] [blame] | 223 | case AUDIO_SOURCE_ECHO_REFERENCE: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 224 | return 5; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 225 | case AUDIO_SOURCE_FM_TUNER: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 226 | return 4; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 227 | case AUDIO_SOURCE_VOICE_RECOGNITION: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 228 | return 3; |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 229 | case AUDIO_SOURCE_HOTWORD: |
Carter Hsu | a3abb40 | 2021-10-26 11:11:20 +0800 | [diff] [blame] | 230 | return 2; |
| 231 | case AUDIO_SOURCE_ULTRASOUND: |
Eric Laurent | fb66dd9 | 2016-01-28 18:32:03 -0800 | [diff] [blame] | 232 | return 1; |
| 233 | default: |
| 234 | break; |
| 235 | } |
| 236 | return 0; |
| 237 | } |
Eric Laurent | e693002 | 2016-02-11 10:20:40 -0800 | [diff] [blame] | 238 | |
| 239 | /* Indicates if audio formats are equivalent when considering a match between |
| 240 | * audio HAL supported formats and client requested formats |
| 241 | */ |
| 242 | static inline bool audio_formats_match(audio_format_t format1, |
| 243 | audio_format_t format2) |
| 244 | { |
| 245 | if (audio_is_linear_pcm(format1) && |
| 246 | (audio_bytes_per_sample(format1) > 2) && |
| 247 | audio_is_linear_pcm(format2) && |
| 248 | (audio_bytes_per_sample(format2) > 2)) { |
| 249 | return true; |
| 250 | } |
| 251 | return format1 == format2; |
| 252 | } |
François Gaffie | c005e56 | 2018-11-06 15:04:49 +0100 | [diff] [blame] | 253 | |
| 254 | /** |
| 255 | * @brief hasStream checks if a given stream type is found in the list of streams |
| 256 | * @param streams collection of stream types to consider. |
| 257 | * @param streamType to consider |
| 258 | * @return true if voice stream is found in the given streams, false otherwise |
| 259 | */ |
| 260 | static inline bool hasStream(const android::StreamTypeVector &streams, |
| 261 | audio_stream_type_t streamType) |
| 262 | { |
| 263 | return std::find(begin(streams), end(streams), streamType) != end(streams); |
| 264 | } |
| 265 | |
| 266 | /** |
| 267 | * @brief hasVoiceStream checks if a voice stream is found in the list of streams |
| 268 | * @param streams collection to consider. |
| 269 | * @return true if voice stream is found in the given streams, false otherwise |
| 270 | */ |
| 271 | static inline bool hasVoiceStream(const android::StreamTypeVector &streams) |
| 272 | { |
| 273 | return hasStream(streams, AUDIO_STREAM_VOICE_CALL); |
| 274 | } |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 275 | |
| 276 | /** |
| 277 | * @brief extract one device relevant from multiple device selection |
| 278 | * @param deviceTypes collection of audio device type |
| 279 | * @return the device type that is selected |
| 280 | */ |
| 281 | static inline audio_devices_t apm_extract_one_audio_device( |
| 282 | const android::DeviceTypeSet& deviceTypes) { |
| 283 | if (deviceTypes.empty()) { |
| 284 | return AUDIO_DEVICE_NONE; |
| 285 | } else if (deviceTypes.size() == 1) { |
| 286 | return *(deviceTypes.begin()); |
| 287 | } else { |
| 288 | // Multiple device selection is either: |
jiabin | a35a040 | 2023-04-12 16:35:18 +0000 | [diff] [blame] | 289 | // - dock + one other device: give priority to dock in this case. |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 290 | // - speaker + one other device: give priority to speaker in this case. |
| 291 | // - one A2DP device + another device: happens with duplicated output. In this case |
| 292 | // retain the device on the A2DP output as the other must not correspond to an active |
| 293 | // selection if not the speaker. |
| 294 | // - HDMI-CEC system audio mode only output: give priority to available item in order. |
jiabin | a35a040 | 2023-04-12 16:35:18 +0000 | [diff] [blame] | 295 | if (deviceTypes.count(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET) != 0) { |
| 296 | return AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET; |
| 297 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) != 0) { |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 298 | return AUDIO_DEVICE_OUT_SPEAKER; |
| 299 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER_SAFE) != 0) { |
| 300 | return AUDIO_DEVICE_OUT_SPEAKER_SAFE; |
| 301 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_ARC) != 0) { |
| 302 | return AUDIO_DEVICE_OUT_HDMI_ARC; |
Kuowei Li | 01a686b | 2020-10-27 16:54:39 +0800 | [diff] [blame] | 303 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_EARC) != 0) { |
| 304 | return AUDIO_DEVICE_OUT_HDMI_EARC; |
jiabin | 4381040 | 2019-10-24 14:58:31 -0700 | [diff] [blame] | 305 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_AUX_LINE) != 0) { |
| 306 | return AUDIO_DEVICE_OUT_AUX_LINE; |
| 307 | } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPDIF) != 0) { |
| 308 | return AUDIO_DEVICE_OUT_SPDIF; |
| 309 | } else { |
| 310 | std::vector<audio_devices_t> a2dpDevices = android::Intersection( |
| 311 | deviceTypes, android::getAudioDeviceOutAllA2dpSet()); |
| 312 | if (a2dpDevices.empty() || a2dpDevices.size() > 1) { |
| 313 | ALOGW("%s invalid device combination: %s", |
| 314 | __func__, android::dumpDeviceTypes(deviceTypes).c_str()); |
| 315 | } |
| 316 | return a2dpDevices.empty() ? AUDIO_DEVICE_NONE : a2dpDevices[0]; |
| 317 | } |
| 318 | } |
Kuowei Li | 01a686b | 2020-10-27 16:54:39 +0800 | [diff] [blame] | 319 | } |
jiabin | a84c3d3 | 2022-12-02 18:59:55 +0000 | [diff] [blame] | 320 | |
| 321 | /** |
| 322 | * Indicates if two given audio output flags are considered as matched, which means that |
| 323 | * 1) the `supersetFlags` and `subsetFlags` both contain or both don't contain must match flags and |
| 324 | * 2) `supersetFlags` contains all flags from `subsetFlags`. |
| 325 | */ |
| 326 | static inline bool audio_output_flags_is_subset(audio_output_flags_t supersetFlags, |
| 327 | audio_output_flags_t subsetFlags, |
| 328 | uint32_t mustMatchFlags) |
| 329 | { |
| 330 | return ((supersetFlags ^ subsetFlags) & mustMatchFlags) == AUDIO_OUTPUT_FLAG_NONE |
| 331 | && (supersetFlags & subsetFlags) == subsetFlags; |
| 332 | } |