Merge changes from topic "upstream-hal-v6"
* changes:
Allow device to have use Audio HAL V6
Introduce Audio V6
Change some formatting for better script parsing
Script to generate new audio HAL version
Convert audio HAL service mk to bp and rename the service
Audio service: Refactor register interface
Version dependant target should included version in name
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 690d92f..76594fb 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -80,6 +80,7 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib64/android.hardware.configstore@1.2.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/vndk-Q/android.hardware.configstore@1.2.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/android.hardware.configstore@1.2.so)
+$(call add-clean-step, rm -f $(PRODUCT_OUT)/etc/init/android.hardware.audio@2.0-service.rc $(PRODUCT_OUT)/vendor/bin/hw/android.hardware.audio@2.0-service)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/bin/hw/android.hardware.cas@1.1*)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/etc/init/android.hardware.cas@1.1*)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/etc/vintf/manifest/android.hardware.cas@1.1*)
diff --git a/audio/6.0/Android.bp b/audio/6.0/Android.bp
new file mode 100644
index 0000000..dc6bb98
--- /dev/null
+++ b/audio/6.0/Android.bp
@@ -0,0 +1,27 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.audio@6.0",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IDevice.hal",
+ "IDevicesFactory.hal",
+ "IPrimaryDevice.hal",
+ "IStream.hal",
+ "IStreamIn.hal",
+ "IStreamOut.hal",
+ "IStreamOutCallback.hal",
+ ],
+ interfaces: [
+ "android.hardware.audio.common@6.0",
+ "android.hardware.audio.effect@6.0",
+ "android.hidl.base@1.0",
+ "android.hidl.safe_union@1.0",
+ ],
+ gen_java: false,
+ gen_java_constants: true,
+}
diff --git a/audio/6.0/IDevice.hal b/audio/6.0/IDevice.hal
new file mode 100644
index 0000000..42a545b
--- /dev/null
+++ b/audio/6.0/IDevice.hal
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+import android.hardware.audio.common@6.0;
+import IStreamIn;
+import IStreamOut;
+
+interface IDevice {
+ /**
+ * Returns whether the audio hardware interface has been initialized.
+ *
+ * @return retval OK on success, NOT_INITIALIZED on failure.
+ */
+ initCheck() generates (Result retval);
+
+ /**
+ * Sets the audio volume for all audio activities other than voice call. If
+ * NOT_SUPPORTED is returned, the software mixer will emulate this
+ * capability.
+ *
+ * @param volume 1.0f means unity, 0.0f is zero.
+ * @return retval operation completion status.
+ */
+ setMasterVolume(float volume) generates (Result retval);
+
+ /**
+ * Get the current master volume value for the HAL, if the HAL supports
+ * master volume control. For example, AudioFlinger will query this value
+ * from the primary audio HAL when the service starts and use the value for
+ * setting the initial master volume across all HALs. HALs which do not
+ * support this method must return NOT_SUPPORTED in 'retval'.
+ *
+ * @return retval operation completion status.
+ * @return volume 1.0f means unity, 0.0f is zero.
+ */
+ getMasterVolume() generates (Result retval, float volume);
+
+ /**
+ * Sets microphone muting state.
+ *
+ * @param mute whether microphone is muted.
+ * @return retval operation completion status.
+ */
+ setMicMute(bool mute) generates (Result retval);
+
+ /**
+ * Gets whether microphone is muted.
+ *
+ * @return retval operation completion status.
+ * @return mute whether microphone is muted.
+ */
+ getMicMute() generates (Result retval, bool mute);
+
+ /**
+ * Set the audio mute status for all audio activities. If the return value
+ * is NOT_SUPPORTED, the software mixer will emulate this capability.
+ *
+ * @param mute whether audio is muted.
+ * @return retval operation completion status.
+ */
+ setMasterMute(bool mute) generates (Result retval);
+
+ /**
+ * Get the current master mute status for the HAL, if the HAL supports
+ * master mute control. AudioFlinger will query this value from the primary
+ * audio HAL when the service starts and use the value for setting the
+ * initial master mute across all HALs. HAL must indicate that the feature
+ * is not supported by returning NOT_SUPPORTED status.
+ *
+ * @return retval operation completion status.
+ * @return mute whether audio is muted.
+ */
+ getMasterMute() generates (Result retval, bool mute);
+
+ /**
+ * Returns audio input buffer size according to parameters passed or
+ * INVALID_ARGUMENTS if one of the parameters is not supported.
+ *
+ * @param config audio configuration.
+ * @return retval operation completion status.
+ * @return bufferSize input buffer size in bytes.
+ */
+ getInputBufferSize(AudioConfig config)
+ generates (Result retval, uint64_t bufferSize);
+
+ /**
+ * This method creates and opens the audio hardware output stream.
+ * If the stream can not be opened with the proposed audio config,
+ * HAL must provide suggested values for the audio config.
+ *
+ * @param ioHandle handle assigned by AudioFlinger.
+ * @param device device type and (if needed) address.
+ * @param config stream configuration.
+ * @param flags additional flags.
+ * @param sourceMetadata Description of the audio that will be played.
+ May be used by implementations to configure hardware effects.
+ * @return retval operation completion status.
+ * @return outStream created output stream.
+ * @return suggestedConfig in case of invalid parameters, suggested config.
+ */
+ openOutputStream(
+ AudioIoHandle ioHandle,
+ DeviceAddress device,
+ AudioConfig config,
+ bitfield<AudioOutputFlag> flags,
+ SourceMetadata sourceMetadata) generates (
+ Result retval,
+ IStreamOut outStream,
+ AudioConfig suggestedConfig);
+
+ /**
+ * This method creates and opens the audio hardware input stream.
+ * If the stream can not be opened with the proposed audio config,
+ * HAL must provide suggested values for the audio config.
+ *
+ * @param ioHandle handle assigned by AudioFlinger.
+ * @param device device type and (if needed) address.
+ * @param config stream configuration.
+ * @param flags additional flags.
+ * @param sinkMetadata Description of the audio that is suggested by the client.
+ * May be used by implementations to configure processing effects.
+ * @return retval operation completion status.
+ * @return inStream in case of success, created input stream.
+ * @return suggestedConfig in case of invalid parameters, suggested config.
+ */
+ openInputStream(
+ AudioIoHandle ioHandle,
+ DeviceAddress device,
+ AudioConfig config,
+ bitfield<AudioInputFlag> flags,
+ SinkMetadata sinkMetadata) generates (
+ Result retval,
+ IStreamIn inStream,
+ AudioConfig suggestedConfig);
+
+ /**
+ * Returns whether HAL supports audio patches.
+ *
+ * @return supports true if audio patches are supported.
+ */
+ supportsAudioPatches() generates (bool supports);
+
+ /**
+ * Creates an audio patch between several source and sink ports. The handle
+ * is allocated by the HAL and must be unique for this audio HAL module.
+ *
+ * @param sources patch sources.
+ * @param sinks patch sinks.
+ * @return retval operation completion status.
+ * @return patch created patch handle.
+ */
+ createAudioPatch(vec<AudioPortConfig> sources, vec<AudioPortConfig> sinks)
+ generates (Result retval, AudioPatchHandle patch);
+
+ /**
+ * Release an audio patch.
+ *
+ * @param patch patch handle.
+ * @return retval operation completion status.
+ */
+ releaseAudioPatch(AudioPatchHandle patch) generates (Result retval);
+
+ /**
+ * Returns the list of supported attributes for a given audio port.
+ *
+ * As input, 'port' contains the information (type, role, address etc...)
+ * needed by the HAL to identify the port.
+ *
+ * As output, 'resultPort' contains possible attributes (sampling rates,
+ * formats, channel masks, gain controllers...) for this port.
+ *
+ * @param port port identifier.
+ * @return retval operation completion status.
+ * @return resultPort port descriptor with all parameters filled up.
+ */
+ getAudioPort(AudioPort port)
+ generates (Result retval, AudioPort resultPort);
+
+ /**
+ * Set audio port configuration.
+ *
+ * @param config audio port configuration.
+ * @return retval operation completion status.
+ */
+ setAudioPortConfig(AudioPortConfig config) generates (Result retval);
+
+ /**
+ * Gets the HW synchronization source of the device. Calling this method is
+ * equivalent to getting AUDIO_PARAMETER_HW_AV_SYNC on the legacy HAL.
+ * Optional method
+ *
+ * @return retval operation completion status: OK or NOT_SUPPORTED.
+ * @return hwAvSync HW synchronization source
+ */
+ getHwAvSync() generates (Result retval, AudioHwSync hwAvSync);
+
+ /**
+ * Sets whether the screen is on. Calling this method is equivalent to
+ * setting AUDIO_PARAMETER_KEY_SCREEN_STATE on the legacy HAL.
+ * Optional method
+ *
+ * @param turnedOn whether the screen is turned on.
+ * @return retval operation completion status.
+ */
+ setScreenState(bool turnedOn) generates (Result retval);
+
+ /**
+ * Generic method for retrieving vendor-specific parameter values.
+ * The framework does not interpret the parameters, they are passed
+ * in an opaque manner between a vendor application and HAL.
+ *
+ * Multiple parameters can be retrieved at the same time.
+ * The implementation should return as many requested parameters
+ * as possible, even if one or more is not supported
+ *
+ * @param context provides more information about the request
+ * @param keys keys of the requested parameters
+ * @return retval operation completion status.
+ * OK must be returned if keys is empty.
+ * NOT_SUPPORTED must be returned if at least one key is unknown.
+ * @return parameters parameter key value pairs.
+ * Must contain the value of all requested keys if retval == OK
+ */
+ getParameters(vec<ParameterValue> context, vec<string> keys)
+ generates (Result retval, vec<ParameterValue> parameters);
+
+ /**
+ * Generic method for setting vendor-specific parameter values.
+ * The framework does not interpret the parameters, they are passed
+ * in an opaque manner between a vendor application and HAL.
+ *
+ * Multiple parameters can be set at the same time though this is
+ * discouraged as it make failure analysis harder.
+ *
+ * If possible, a failed setParameters should not impact the platform state.
+ *
+ * @param context provides more information about the request
+ * @param parameters parameter key value pairs.
+ * @return retval operation completion status.
+ * All parameters must be successfully set for OK to be returned
+ */
+ setParameters(vec<ParameterValue> context, vec<ParameterValue> parameters)
+ generates (Result retval);
+
+ /**
+ * Returns an array with available microphones in device.
+ *
+ * @return retval INVALID_STATE if the call is not successful,
+ * OK otherwise.
+ *
+ * @return microphones array with microphones info
+ */
+ getMicrophones()
+ generates(Result retval, vec<MicrophoneInfo> microphones);
+
+ /**
+ * Notifies the device module about the connection state of an input/output
+ * device attached to it. Calling this method is equivalent to setting
+ * AUDIO_PARAMETER_DEVICE_[DIS]CONNECT on the legacy HAL.
+ *
+ * @param address audio device specification.
+ * @param connected whether the device is connected.
+ * @return retval operation completion status.
+ */
+ setConnectedState(DeviceAddress address, bool connected)
+ generates (Result retval);
+};
diff --git a/audio/6.0/IDevicesFactory.hal b/audio/6.0/IDevicesFactory.hal
new file mode 100644
index 0000000..0483473
--- /dev/null
+++ b/audio/6.0/IDevicesFactory.hal
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+import android.hardware.audio.common@6.0;
+import IDevice;
+import IPrimaryDevice;
+
+/** This factory allows a HAL implementation to be split in multiple independent
+ * devices (called module in the pre-treble API).
+ * Note that this division is arbitrary and implementation are free
+ * to only have a Primary.
+ * The framework will query the devices according to audio_policy_configuration.xml
+ *
+ * Each device name is arbitrary, provided by the vendor's audio_policy_configuration.xml
+ * and only used to identify a device in this factory.
+ * The framework must not interpret the name, treating it as a vendor opaque data
+ * with the following exception:
+ * - the "r_submix" device that must be present to support policyMixes (Eg: Android projected).
+ * Note that this Device is included by default in a build derived from AOSP.
+ *
+ * Note that on AOSP Oreo (including MR1) the "a2dp" module is not using this API
+ * but is loaded directly from the system partition using the legacy API
+ * due to limitations with the Bluetooth framework.
+ */
+interface IDevicesFactory {
+
+ /**
+ * Opens an audio device. To close the device, it is necessary to release
+ * references to the returned device object.
+ *
+ * @param device device name.
+ * @return retval operation completion status. Returns INVALID_ARGUMENTS
+ * if there is no corresponding hardware module found,
+ * NOT_INITIALIZED if an error occurred while opening the hardware
+ * module.
+ * @return result the interface for the created device.
+ */
+ openDevice(string device) generates (Result retval, IDevice result);
+
+ /**
+ * Opens the Primary audio device that must be present.
+ * This function is not optional and must return successfully the primary device.
+ *
+ * This device must have the name "primary".
+ *
+ * The telephony stack uses this device to control the audio during a voice call.
+ *
+ * @return retval operation completion status. Must be SUCCESS.
+ * For debugging, return INVALID_ARGUMENTS if there is no corresponding
+ * hardware module found, NOT_INITIALIZED if an error occurred
+ * while opening the hardware module.
+ * @return result the interface for the created device.
+ */
+ openPrimaryDevice() generates (Result retval, IPrimaryDevice result);
+};
diff --git a/audio/6.0/IPrimaryDevice.hal b/audio/6.0/IPrimaryDevice.hal
new file mode 100644
index 0000000..78cfc65
--- /dev/null
+++ b/audio/6.0/IPrimaryDevice.hal
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+import android.hardware.audio.common@6.0;
+import IDevice;
+
+interface IPrimaryDevice extends IDevice {
+ /**
+ * Sets the audio volume of a voice call.
+ *
+ * @param volume 1.0f means unity, 0.0f is zero.
+ * @return retval operation completion status.
+ */
+ setVoiceVolume(float volume) generates (Result retval);
+
+ /**
+ * This method is used to notify the HAL about audio mode changes.
+ *
+ * @param mode new mode.
+ * @return retval operation completion status.
+ */
+ setMode(AudioMode mode) generates (Result retval);
+
+ /**
+ * Sets the name of the current BT SCO headset. Calling this method
+ * is equivalent to setting legacy "bt_headset_name" parameter.
+ * The BT SCO headset name must only be used for debugging purposes.
+ * Optional method
+ *
+ * @param name the name of the current BT SCO headset (can be empty).
+ * @return retval operation completion status.
+ */
+ setBtScoHeadsetDebugName(string name) generates (Result retval);
+
+ /**
+ * Gets whether BT SCO Noise Reduction and Echo Cancellation are enabled.
+ * Calling this method is equivalent to getting AUDIO_PARAMETER_KEY_BT_NREC
+ * on the legacy HAL.
+ *
+ * @return retval operation completion status.
+ * @return enabled whether BT SCO NR + EC are enabled.
+ */
+ getBtScoNrecEnabled() generates (Result retval, bool enabled);
+
+ /**
+ * Sets whether BT SCO Noise Reduction and Echo Cancellation are enabled.
+ * Calling this method is equivalent to setting AUDIO_PARAMETER_KEY_BT_NREC
+ * on the legacy HAL.
+ * Optional method
+ *
+ * @param enabled whether BT SCO NR + EC are enabled.
+ * @return retval operation completion status.
+ */
+ setBtScoNrecEnabled(bool enabled) generates (Result retval);
+
+ /**
+ * Gets whether BT SCO Wideband mode is enabled. Calling this method is
+ * equivalent to getting AUDIO_PARAMETER_KEY_BT_SCO_WB on the legacy HAL.
+ *
+ * @return retval operation completion status.
+ * @return enabled whether BT Wideband is enabled.
+ */
+ getBtScoWidebandEnabled() generates (Result retval, bool enabled);
+
+ /**
+ * Sets whether BT SCO Wideband mode is enabled. Calling this method is
+ * equivalent to setting AUDIO_PARAMETER_KEY_BT_SCO_WB on the legacy HAL.
+ * Optional method
+ *
+ * @param enabled whether BT Wideband is enabled.
+ * @return retval operation completion status.
+ */
+ setBtScoWidebandEnabled(bool enabled) generates (Result retval);
+
+ /**
+ * Gets whether BT HFP (Hands-Free Profile) is enabled. Calling this method
+ * is equivalent to getting "hfp_enable" parameter value on the legacy HAL.
+ *
+ * @return retval operation completion status.
+ * @return enabled whether BT HFP is enabled.
+ */
+ getBtHfpEnabled() generates (Result retval, bool enabled);
+
+ /**
+ * Sets whether BT HFP (Hands-Free Profile) is enabled. Calling this method
+ * is equivalent to setting "hfp_enable" parameter on the legacy HAL.
+ * Optional method
+ *
+ * @param enabled whether BT HFP is enabled.
+ * @return retval operation completion status.
+ */
+ setBtHfpEnabled(bool enabled) generates (Result retval);
+
+ /**
+ * Sets the sampling rate of BT HFP (Hands-Free Profile). Calling this
+ * method is equivalent to setting "hfp_set_sampling_rate" parameter
+ * on the legacy HAL.
+ * Optional method
+ *
+ * @param sampleRateHz sample rate in Hz.
+ * @return retval operation completion status.
+ */
+ setBtHfpSampleRate(uint32_t sampleRateHz) generates (Result retval);
+
+ /**
+ * Sets the current output volume Hz for BT HFP (Hands-Free Profile).
+ * Calling this method is equivalent to setting "hfp_volume" parameter value
+ * on the legacy HAL (except that legacy HAL implementations expect
+ * an integer value in the range from 0 to 15.)
+ * Optional method
+ *
+ * @param volume 1.0f means unity, 0.0f is zero.
+ * @return retval operation completion status.
+ */
+ setBtHfpVolume(float volume) generates (Result retval);
+
+ enum TtyMode : int32_t {
+ OFF,
+ VCO,
+ HCO,
+ FULL
+ };
+
+ /**
+ * Gets current TTY mode selection. Calling this method is equivalent to
+ * getting AUDIO_PARAMETER_KEY_TTY_MODE on the legacy HAL.
+ *
+ * @return retval operation completion status.
+ * @return mode TTY mode.
+ */
+ getTtyMode() generates (Result retval, TtyMode mode);
+
+ /**
+ * Sets current TTY mode. Calling this method is equivalent to setting
+ * AUDIO_PARAMETER_KEY_TTY_MODE on the legacy HAL.
+ *
+ * @param mode TTY mode.
+ * @return retval operation completion status.
+ */
+ setTtyMode(TtyMode mode) generates (Result retval);
+
+ /**
+ * Gets whether Hearing Aid Compatibility - Telecoil (HAC-T) mode is
+ * enabled. Calling this method is equivalent to getting
+ * AUDIO_PARAMETER_KEY_HAC on the legacy HAL.
+ *
+ * @return retval operation completion status.
+ * @return enabled whether HAC mode is enabled.
+ */
+ getHacEnabled() generates (Result retval, bool enabled);
+
+ /**
+ * Sets whether Hearing Aid Compatibility - Telecoil (HAC-T) mode is
+ * enabled. Calling this method is equivalent to setting
+ * AUDIO_PARAMETER_KEY_HAC on the legacy HAL.
+ * Optional method
+ *
+ * @param enabled whether HAC mode is enabled.
+ * @return retval operation completion status.
+ */
+ setHacEnabled(bool enabled) generates (Result retval);
+
+ enum Rotation : int32_t {
+ DEG_0,
+ DEG_90,
+ DEG_180,
+ DEG_270
+ };
+
+ /**
+ * Updates HAL on the current rotation of the device relative to natural
+ * orientation. Calling this method is equivalent to setting legacy
+ * parameter "rotation".
+ *
+ * @param rotation rotation in degrees relative to natural device
+ * orientation.
+ * @return retval operation completion status.
+ */
+ updateRotation(Rotation rotation) generates (Result retval);
+};
diff --git a/audio/6.0/IStream.hal b/audio/6.0/IStream.hal
new file mode 100644
index 0000000..f4c91f8
--- /dev/null
+++ b/audio/6.0/IStream.hal
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+import android.hardware.audio.common@6.0;
+import android.hardware.audio.effect@6.0::IEffect;
+
+interface IStream {
+ /**
+ * Return the frame size (number of bytes per sample).
+ *
+ * @return frameSize frame size in bytes.
+ */
+ getFrameSize() generates (uint64_t frameSize);
+
+ /**
+ * Return the frame count of the buffer. Calling this method is equivalent
+ * to getting AUDIO_PARAMETER_STREAM_FRAME_COUNT on the legacy HAL.
+ *
+ * @return count frame count.
+ */
+ getFrameCount() generates (uint64_t count);
+
+ /**
+ * Return the size of input/output buffer in bytes for this stream.
+ * It must be a multiple of the frame size.
+ *
+ * @return buffer buffer size in bytes.
+ */
+ getBufferSize() generates (uint64_t bufferSize);
+
+ /**
+ * Return the sampling rate in Hz.
+ *
+ * @return sampleRateHz sample rate in Hz.
+ */
+ getSampleRate() generates (uint32_t sampleRateHz);
+
+ /**
+ * Return supported native sampling rates of the stream for a given format.
+ * A supported native sample rate is a sample rate that can be efficiently
+ * played by the hardware (typically without sample-rate conversions).
+ *
+ * This function is only called for dynamic profile. If called for
+ * non-dynamic profile is should return NOT_SUPPORTED or the same list
+ * as in audio_policy_configuration.xml.
+ *
+ * Calling this method is equivalent to getting
+ * AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES on the legacy HAL.
+ *
+ *
+ * @param format audio format for which the sample rates are supported.
+ * @return retval operation completion status.
+ * Must be OK if the format is supported.
+ * @return sampleRateHz supported sample rates.
+ */
+ getSupportedSampleRates(AudioFormat format)
+ generates (Result retval, vec<uint32_t> sampleRates);
+
+ /**
+ * Sets the sampling rate of the stream. Calling this method is equivalent
+ * to setting AUDIO_PARAMETER_STREAM_SAMPLING_RATE on the legacy HAL.
+ * Optional method. If implemented, only called on a stopped stream.
+ *
+ * @param sampleRateHz sample rate in Hz.
+ * @return retval operation completion status.
+ */
+ setSampleRate(uint32_t sampleRateHz) generates (Result retval);
+
+ /**
+ * Return the channel mask of the stream.
+ *
+ * @return mask channel mask.
+ */
+ getChannelMask() generates (bitfield<AudioChannelMask> mask);
+
+ /**
+ * Return supported channel masks of the stream. Calling this method is
+ * equivalent to getting AUDIO_PARAMETER_STREAM_SUP_CHANNELS on the legacy
+ * HAL.
+ *
+ * @param format audio format for which the channel masks are supported.
+ * @return retval operation completion status.
+ * Must be OK if the format is supported.
+ * @return masks supported audio masks.
+ */
+ getSupportedChannelMasks(AudioFormat format)
+ generates (Result retval, vec<bitfield<AudioChannelMask>> masks);
+
+ /**
+ * Sets the channel mask of the stream. Calling this method is equivalent to
+ * setting AUDIO_PARAMETER_STREAM_CHANNELS on the legacy HAL.
+ * Optional method
+ *
+ * @param format audio format.
+ * @return retval operation completion status.
+ */
+ setChannelMask(bitfield<AudioChannelMask> mask) generates (Result retval);
+
+ /**
+ * Return the audio format of the stream.
+ *
+ * @return format audio format.
+ */
+ getFormat() generates (AudioFormat format);
+
+ /**
+ * Return supported audio formats of the stream. Calling this method is
+ * equivalent to getting AUDIO_PARAMETER_STREAM_SUP_FORMATS on the legacy
+ * HAL.
+ *
+ * @return formats supported audio formats.
+ */
+ getSupportedFormats() generates (vec<AudioFormat> formats);
+
+ /**
+ * Sets the audio format of the stream. Calling this method is equivalent to
+ * setting AUDIO_PARAMETER_STREAM_FORMAT on the legacy HAL.
+ * Optional method
+ *
+ * @param format audio format.
+ * @return retval operation completion status.
+ */
+ setFormat(AudioFormat format) generates (Result retval);
+
+ /**
+ * Convenience method for retrieving several stream parameters in
+ * one transaction.
+ *
+ * @return sampleRateHz sample rate in Hz.
+ * @return mask channel mask.
+ * @return format audio format.
+ */
+ getAudioProperties() generates (
+ uint32_t sampleRateHz, bitfield<AudioChannelMask> mask, AudioFormat format);
+
+ /**
+ * Applies audio effect to the stream.
+ *
+ * @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
+ * the effect to apply.
+ * @return retval operation completion status.
+ */
+ addEffect(uint64_t effectId) generates (Result retval);
+
+ /**
+ * Stops application of the effect to the stream.
+ *
+ * @param effectId effect ID (obtained from IEffectsFactory.createEffect) of
+ * the effect to remove.
+ * @return retval operation completion status.
+ */
+ removeEffect(uint64_t effectId) generates (Result retval);
+
+ /**
+ * Put the audio hardware input/output into standby mode.
+ * Driver must exit from standby mode at the next I/O operation.
+ *
+ * @return retval operation completion status.
+ */
+ standby() generates (Result retval);
+
+ /**
+ * Return the set of devices which this stream is connected to.
+ * Optional method
+ *
+ * @return retval operation completion status: OK or NOT_SUPPORTED.
+ * @return device set of devices which this stream is connected to.
+ */
+ getDevices() generates (Result retval, vec<DeviceAddress> devices);
+
+ /**
+ * Connects the stream to one or multiple devices.
+ *
+ * This method must only be used for HALs that do not support
+ * 'IDevice.createAudioPatch' method. Calling this method is
+ * equivalent to setting AUDIO_PARAMETER_STREAM_ROUTING preceded
+ * with a device address in the legacy HAL interface.
+ *
+ * @param address device to connect the stream to.
+ * @return retval operation completion status.
+ */
+ setDevices(vec<DeviceAddress> devices) generates (Result retval);
+
+ /**
+ * Sets the HW synchronization source. Calling this method is equivalent to
+ * setting AUDIO_PARAMETER_STREAM_HW_AV_SYNC on the legacy HAL.
+ * Optional method
+ *
+ * @param hwAvSync HW synchronization source
+ * @return retval operation completion status.
+ */
+ setHwAvSync(AudioHwSync hwAvSync) generates (Result retval);
+
+ /**
+ * Generic method for retrieving vendor-specific parameter values.
+ * The framework does not interpret the parameters, they are passed
+ * in an opaque manner between a vendor application and HAL.
+ *
+ * Multiple parameters can be retrieved at the same time.
+ * The implementation should return as many requested parameters
+ * as possible, even if one or more is not supported
+ *
+ * @param context provides more information about the request
+ * @param keys keys of the requested parameters
+ * @return retval operation completion status.
+ * OK must be returned if keys is empty.
+ * NOT_SUPPORTED must be returned if at least one key is unknown.
+ * @return parameters parameter key value pairs.
+ * Must contain the value of all requested keys if retval == OK
+ */
+ getParameters(vec<ParameterValue> context, vec<string> keys)
+ generates (Result retval, vec<ParameterValue> parameters);
+
+ /**
+ * Generic method for setting vendor-specific parameter values.
+ * The framework does not interpret the parameters, they are passed
+ * in an opaque manner between a vendor application and HAL.
+ *
+ * Multiple parameters can be set at the same time though this is
+ * discouraged as it make failure analysis harder.
+ *
+ * If possible, a failed setParameters should not impact the platform state.
+ *
+ * @param context provides more information about the request
+ * @param parameters parameter key value pairs.
+ * @return retval operation completion status.
+ * All parameters must be successfully set for OK to be returned
+ */
+ setParameters(vec<ParameterValue> context, vec<ParameterValue> parameters)
+ generates (Result retval);
+
+ /**
+ * Called by the framework to start a stream operating in mmap mode.
+ * createMmapBuffer() must be called before calling start().
+ * Function only implemented by streams operating in mmap mode.
+ *
+ * @return retval OK in case the success.
+ * NOT_SUPPORTED on non mmap mode streams
+ * INVALID_STATE if called out of sequence
+ */
+ start() generates (Result retval);
+
+ /**
+ * Called by the framework to stop a stream operating in mmap mode.
+ * Function only implemented by streams operating in mmap mode.
+ *
+ * @return retval OK in case the success.
+ * NOT_SUPPORTED on non mmap mode streams
+ * INVALID_STATE if called out of sequence
+ */
+ stop() generates (Result retval) ;
+
+ /**
+ * Called by the framework to retrieve information on the mmap buffer used for audio
+ * samples transfer.
+ * Function only implemented by streams operating in mmap mode.
+ *
+ * @param minSizeFrames minimum buffer size requested. The actual buffer
+ * size returned in struct MmapBufferInfo can be larger.
+ * @return retval OK in case the success.
+ * NOT_SUPPORTED on non mmap mode streams
+ * NOT_INITIALIZED in case of memory allocation error
+ * INVALID_ARGUMENTS if the requested buffer size is too large
+ * INVALID_STATE if called out of sequence
+ * @return info a MmapBufferInfo struct containing information on the MMMAP buffer created.
+ */
+ createMmapBuffer(int32_t minSizeFrames)
+ generates (Result retval, MmapBufferInfo info);
+
+ /**
+ * Called by the framework to read current read/write position in the mmap buffer
+ * with associated time stamp.
+ * Function only implemented by streams operating in mmap mode.
+ *
+ * @return retval OK in case the success.
+ * NOT_SUPPORTED on non mmap mode streams
+ * INVALID_STATE if called out of sequence
+ * @return position a MmapPosition struct containing current HW read/write position in frames
+ * with associated time stamp.
+ */
+ getMmapPosition()
+ generates (Result retval, MmapPosition position);
+
+ /**
+ * Called by the framework to deinitialize the stream and free up
+ * all the currently allocated resources. It is recommended to close
+ * the stream on the client side as soon as it is becomes unused.
+ *
+ * @return retval OK in case the success.
+ * NOT_SUPPORTED if called on IStream instead of input or
+ * output stream interface.
+ * INVALID_STATE if the stream was already closed.
+ */
+ close() generates (Result retval);
+};
diff --git a/audio/6.0/IStreamIn.hal b/audio/6.0/IStreamIn.hal
new file mode 100644
index 0000000..aadc370
--- /dev/null
+++ b/audio/6.0/IStreamIn.hal
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+import android.hardware.audio.common@6.0;
+import IStream;
+
+interface IStreamIn extends IStream {
+ /**
+ * Returns the source descriptor of the input stream. Calling this method is
+ * equivalent to getting AUDIO_PARAMETER_STREAM_INPUT_SOURCE on the legacy
+ * HAL.
+ * Optional method
+ *
+ * @return retval operation completion status.
+ * @return source audio source.
+ */
+ getAudioSource() generates (Result retval, AudioSource source);
+
+ /**
+ * Set the input gain for the audio driver.
+ * Optional method
+ *
+ * @param gain 1.0f is unity, 0.0f is zero.
+ * @result retval operation completion status.
+ */
+ setGain(float gain) generates (Result retval);
+
+ /**
+ * Commands that can be executed on the driver reader thread.
+ */
+ enum ReadCommand : int32_t {
+ READ,
+ GET_CAPTURE_POSITION
+ };
+
+ /**
+ * Data structure passed to the driver for executing commands
+ * on the driver reader thread.
+ */
+ struct ReadParameters {
+ ReadCommand command; // discriminator
+ union Params {
+ uint64_t read; // READ command, amount of bytes to read, >= 0.
+ // No parameters for GET_CAPTURE_POSITION.
+ } params;
+ };
+
+ /**
+ * Data structure passed back to the client via status message queue
+ * of 'read' operation.
+ *
+ * Possible values of 'retval' field:
+ * - OK, read operation was successful;
+ * - INVALID_ARGUMENTS, stream was not configured properly;
+ * - INVALID_STATE, stream is in a state that doesn't allow reads.
+ */
+ struct ReadStatus {
+ Result retval;
+ ReadCommand replyTo; // discriminator
+ union Reply {
+ uint64_t read; // READ command, amount of bytes read, >= 0.
+ struct CapturePosition { // same as generated by getCapturePosition.
+ uint64_t frames;
+ uint64_t time;
+ } capturePosition;
+ } reply;
+ };
+
+ /**
+ * Called when the metadata of the stream's sink has been changed.
+ * @param sinkMetadata Description of the audio that is suggested by the clients.
+ */
+ updateSinkMetadata(SinkMetadata sinkMetadata);
+
+ /**
+ * Set up required transports for receiving audio buffers from the driver.
+ *
+ * The transport consists of three message queues:
+ * -- command queue is used to instruct the reader thread what operation
+ * to perform;
+ * -- data queue is used for passing audio data from the driver
+ * to the client;
+ * -- status queue is used for reporting operation status
+ * (e.g. amount of bytes actually read or error code).
+ *
+ * The driver operates on a dedicated thread. The client must ensure that
+ * the thread is given an appropriate priority and assigned to correct
+ * scheduler and cgroup. For this purpose, the method returns identifiers
+ * of the driver thread.
+ *
+ * @param frameSize the size of a single frame, in bytes.
+ * @param framesCount the number of frames in a buffer.
+ * @param threadPriority priority of the driver thread.
+ * @return retval OK if both message queues were created successfully.
+ * INVALID_STATE if the method was already called.
+ * INVALID_ARGUMENTS if there was a problem setting up
+ * the queues.
+ * @return commandMQ a message queue used for passing commands.
+ * @return dataMQ a message queue used for passing audio data in the format
+ * specified at the stream opening.
+ * @return statusMQ a message queue used for passing status from the driver
+ * using ReadStatus structures.
+ * @return threadInfo identifiers of the driver's dedicated thread.
+ */
+ prepareForReading(uint32_t frameSize, uint32_t framesCount)
+ generates (
+ Result retval,
+ fmq_sync<ReadParameters> commandMQ,
+ fmq_sync<uint8_t> dataMQ,
+ fmq_sync<ReadStatus> statusMQ,
+ ThreadInfo threadInfo);
+
+ /**
+ * Return the amount of input frames lost in the audio driver since the last
+ * call of this function.
+ *
+ * Audio driver is expected to reset the value to 0 and restart counting
+ * upon returning the current value by this function call. Such loss
+ * typically occurs when the user space process is blocked longer than the
+ * capacity of audio driver buffers.
+ *
+ * @return framesLost the number of input audio frames lost.
+ */
+ getInputFramesLost() generates (uint32_t framesLost);
+
+ /**
+ * Return a recent count of the number of audio frames received and the
+ * clock time associated with that frame count.
+ *
+ * @return retval INVALID_STATE if the device is not ready/available,
+ * NOT_SUPPORTED if the command is not supported,
+ * OK otherwise.
+ * @return frames the total frame count received. This must be as early in
+ * the capture pipeline as possible. In general, frames
+ * must be non-negative and must not go "backwards".
+ * @return time is the clock monotonic time when frames was measured. In
+ * general, time must be a positive quantity and must not
+ * go "backwards".
+ */
+ getCapturePosition()
+ generates (Result retval, uint64_t frames, uint64_t time);
+
+ /**
+ * Returns an array with active microphones in the stream.
+ *
+ * @return retval INVALID_STATE if the call is not successful,
+ * OK otherwise.
+ *
+ * @return microphones array with microphones info
+ */
+ getActiveMicrophones()
+ generates(Result retval, vec<MicrophoneInfo> microphones);
+
+ /**
+ * Specifies the logical microphone (for processing).
+ *
+ * If the feature is not supported an error should be returned
+ * If multiple microphones are present, this should be treated as a preference
+ * for their combined direction.
+ *
+ * Optional method
+ *
+ * @param Direction constant
+ * @return retval OK if the call is successful, an error code otherwise.
+ */
+ setMicrophoneDirection(MicrophoneDirection direction)
+ generates(Result retval);
+
+ /**
+ * Specifies the zoom factor for the selected microphone (for processing).
+ *
+ * If the feature is not supported an error should be returned
+ * If multiple microphones are present, this should be treated as a preference
+ * for their combined field dimension.
+ *
+ * Optional method
+ *
+ * @param the desired field dimension of microphone capture. Range is from -1 (wide angle),
+ * though 0 (no zoom) to 1 (maximum zoom).
+ *
+ * @return retval OK if the call is not successful, an error code otherwise.
+ */
+ setMicrophoneFieldDimension(float zoom) generates(Result retval);
+};
diff --git a/audio/6.0/IStreamOut.hal b/audio/6.0/IStreamOut.hal
new file mode 100644
index 0000000..941ba61
--- /dev/null
+++ b/audio/6.0/IStreamOut.hal
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+import android.hardware.audio.common@6.0;
+import IStream;
+import IStreamOutCallback;
+
+interface IStreamOut extends IStream {
+ /**
+ * Return the audio hardware driver estimated latency in milliseconds.
+ *
+ * @return latencyMs latency in milliseconds.
+ */
+ getLatency() generates (uint32_t latencyMs);
+
+ /**
+ * This method is used in situations where audio mixing is done in the
+ * hardware. This method serves as a direct interface with hardware,
+ * allowing to directly set the volume as apposed to via the framework.
+ * This method might produce multiple PCM outputs or hardware accelerated
+ * codecs, such as MP3 or AAC.
+ * Optional method
+ *
+ * @param left left channel attenuation, 1.0f is unity, 0.0f is zero.
+ * @param right right channel attenuation, 1.0f is unity, 0.0f is zero.
+ * @return retval operation completion status.
+ * If a volume is outside [0,1], return INVALID_ARGUMENTS
+ */
+ setVolume(float left, float right) generates (Result retval);
+
+ /**
+ * Commands that can be executed on the driver writer thread.
+ */
+ enum WriteCommand : int32_t {
+ WRITE,
+ GET_PRESENTATION_POSITION,
+ GET_LATENCY
+ };
+
+ /**
+ * Data structure passed back to the client via status message queue
+ * of 'write' operation.
+ *
+ * Possible values of 'retval' field:
+ * - OK, write operation was successful;
+ * - INVALID_ARGUMENTS, stream was not configured properly;
+ * - INVALID_STATE, stream is in a state that doesn't allow writes;
+ * - INVALID_OPERATION, retrieving presentation position isn't supported.
+ */
+ struct WriteStatus {
+ Result retval;
+ WriteCommand replyTo; // discriminator
+ union Reply {
+ uint64_t written; // WRITE command, amount of bytes written, >= 0.
+ struct PresentationPosition { // same as generated by
+ uint64_t frames; // getPresentationPosition.
+ TimeSpec timeStamp;
+ } presentationPosition;
+ uint32_t latencyMs; // Same as generated by getLatency.
+ } reply;
+ };
+
+ /**
+ * Called when the metadata of the stream's source has been changed.
+ * @param sourceMetadata Description of the audio that is played by the clients.
+ */
+ updateSourceMetadata(SourceMetadata sourceMetadata);
+
+ /**
+ * Set up required transports for passing audio buffers to the driver.
+ *
+ * The transport consists of three message queues:
+ * -- command queue is used to instruct the writer thread what operation
+ * to perform;
+ * -- data queue is used for passing audio data from the client
+ * to the driver;
+ * -- status queue is used for reporting operation status
+ * (e.g. amount of bytes actually written or error code).
+ *
+ * The driver operates on a dedicated thread. The client must ensure that
+ * the thread is given an appropriate priority and assigned to correct
+ * scheduler and cgroup. For this purpose, the method returns identifiers
+ * of the driver thread.
+ *
+ * @param frameSize the size of a single frame, in bytes.
+ * @param framesCount the number of frames in a buffer.
+ * @return retval OK if both message queues were created successfully.
+ * INVALID_STATE if the method was already called.
+ * INVALID_ARGUMENTS if there was a problem setting up
+ * the queues.
+ * @return commandMQ a message queue used for passing commands.
+ * @return dataMQ a message queue used for passing audio data in the format
+ * specified at the stream opening.
+ * @return statusMQ a message queue used for passing status from the driver
+ * using WriteStatus structures.
+ * @return threadInfo identifiers of the driver's dedicated thread.
+ */
+ prepareForWriting(uint32_t frameSize, uint32_t framesCount)
+ generates (
+ Result retval,
+ fmq_sync<WriteCommand> commandMQ,
+ fmq_sync<uint8_t> dataMQ,
+ fmq_sync<WriteStatus> statusMQ,
+ ThreadInfo threadInfo);
+
+ /**
+ * Return the number of audio frames written by the audio DSP to DAC since
+ * the output has exited standby.
+ * Optional method
+ *
+ * @return retval operation completion status.
+ * @return dspFrames number of audio frames written.
+ */
+ getRenderPosition() generates (Result retval, uint32_t dspFrames);
+
+ /**
+ * Get the local time at which the next write to the audio driver will be
+ * presented. The units are microseconds, where the epoch is decided by the
+ * local audio HAL.
+ * Optional method
+ *
+ * @return retval operation completion status.
+ * @return timestampUs time of the next write.
+ */
+ getNextWriteTimestamp() generates (Result retval, int64_t timestampUs);
+
+ /**
+ * Set the callback interface for notifying completion of non-blocking
+ * write and drain.
+ *
+ * Calling this function implies that all future 'write' and 'drain'
+ * must be non-blocking and use the callback to signal completion.
+ *
+ * 'clearCallback' method needs to be called in order to release the local
+ * callback proxy on the server side and thus dereference the callback
+ * implementation on the client side.
+ *
+ * @return retval operation completion status.
+ */
+ setCallback(IStreamOutCallback callback) generates (Result retval);
+
+ /**
+ * Clears the callback previously set via 'setCallback' method.
+ *
+ * Warning: failure to call this method results in callback implementation
+ * on the client side being held until the HAL server termination.
+ *
+ * If no callback was previously set, the method should be a no-op
+ * and return OK.
+ *
+ * @return retval operation completion status: OK or NOT_SUPPORTED.
+ */
+ clearCallback() generates (Result retval);
+
+ /**
+ * Returns whether HAL supports pausing and resuming of streams.
+ *
+ * @return supportsPause true if pausing is supported.
+ * @return supportsResume true if resume is supported.
+ */
+ supportsPauseAndResume()
+ generates (bool supportsPause, bool supportsResume);
+
+ /**
+ * Notifies to the audio driver to stop playback however the queued buffers
+ * are retained by the hardware. Useful for implementing pause/resume. Empty
+ * implementation if not supported however must be implemented for hardware
+ * with non-trivial latency. In the pause state, some audio hardware may
+ * still be using power. Client code may consider calling 'suspend' after a
+ * timeout to prevent that excess power usage.
+ *
+ * Implementation of this function is mandatory for offloaded playback.
+ *
+ * @return retval operation completion status.
+ */
+ pause() generates (Result retval);
+
+ /**
+ * Notifies to the audio driver to resume playback following a pause.
+ * Returns error INVALID_STATE if called without matching pause.
+ *
+ * Implementation of this function is mandatory for offloaded playback.
+ *
+ * @return retval operation completion status.
+ */
+ resume() generates (Result retval);
+
+ /**
+ * Returns whether HAL supports draining of streams.
+ *
+ * @return supports true if draining is supported.
+ */
+ supportsDrain() generates (bool supports);
+
+ /**
+ * Requests notification when data buffered by the driver/hardware has been
+ * played. If 'setCallback' has previously been called to enable
+ * non-blocking mode, then 'drain' must not block, instead it must return
+ * quickly and completion of the drain is notified through the callback. If
+ * 'setCallback' has not been called, then 'drain' must block until
+ * completion.
+ *
+ * If 'type' is 'ALL', the drain completes when all previously written data
+ * has been played.
+ *
+ * If 'type' is 'EARLY_NOTIFY', the drain completes shortly before all data
+ * for the current track has played to allow time for the framework to
+ * perform a gapless track switch.
+ *
+ * Drain must return immediately on 'stop' and 'flush' calls.
+ *
+ * Implementation of this function is mandatory for offloaded playback.
+ *
+ * @param type type of drain.
+ * @return retval operation completion status.
+ */
+ drain(AudioDrain type) generates (Result retval);
+
+ /**
+ * Notifies to the audio driver to flush the queued data. Stream must
+ * already be paused before calling 'flush'.
+ * Optional method
+ *
+ * Implementation of this function is mandatory for offloaded playback.
+ *
+ * @return retval operation completion status.
+ */
+ flush() generates (Result retval);
+
+ /**
+ * Return a recent count of the number of audio frames presented to an
+ * external observer. This excludes frames which have been written but are
+ * still in the pipeline. The count is not reset to zero when output enters
+ * standby. Also returns the value of CLOCK_MONOTONIC as of this
+ * presentation count. The returned count is expected to be 'recent', but
+ * does not need to be the most recent possible value. However, the
+ * associated time must correspond to whatever count is returned.
+ *
+ * Example: assume that N+M frames have been presented, where M is a 'small'
+ * number. Then it is permissible to return N instead of N+M, and the
+ * timestamp must correspond to N rather than N+M. The terms 'recent' and
+ * 'small' are not defined. They reflect the quality of the implementation.
+ *
+ * Optional method
+ *
+ * @return retval operation completion status.
+ * @return frames count of presented audio frames.
+ * @return timeStamp associated clock time.
+ */
+ getPresentationPosition()
+ generates (Result retval, uint64_t frames, TimeSpec timeStamp);
+
+ /**
+ * Selects a presentation for decoding from a next generation media stream
+ * (as defined per ETSI TS 103 190-2) and a program within the presentation.
+ * Optional method
+ *
+ * @param presentationId selected audio presentation.
+ * @param programId refinement for the presentation.
+ * @return retval operation completion status.
+ */
+ selectPresentation(int32_t presentationId, int32_t programId)
+ generates (Result retval);
+};
diff --git a/audio/6.0/IStreamOutCallback.hal b/audio/6.0/IStreamOutCallback.hal
new file mode 100644
index 0000000..e393d9a
--- /dev/null
+++ b/audio/6.0/IStreamOutCallback.hal
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+/**
+ * Asynchronous write callback interface.
+ */
+interface IStreamOutCallback {
+ /**
+ * Non blocking write completed.
+ */
+ oneway onWriteReady();
+
+ /**
+ * Drain completed.
+ */
+ oneway onDrainReady();
+
+ /**
+ * Stream hit an error.
+ */
+ oneway onError();
+};
diff --git a/audio/6.0/config/Android.bp b/audio/6.0/config/Android.bp
new file mode 100644
index 0000000..182dfcc
--- /dev/null
+++ b/audio/6.0/config/Android.bp
@@ -0,0 +1,7 @@
+
+xsd_config {
+ name: "audio_policy_configuration_V6_0",
+ srcs: ["audio_policy_configuration.xsd"],
+ package_name: "audio.policy.configuration.V6_0",
+}
+
diff --git a/audio/6.0/config/api/current.txt b/audio/6.0/config/api/current.txt
new file mode 100644
index 0000000..7aa147c
--- /dev/null
+++ b/audio/6.0/config/api/current.txt
@@ -0,0 +1,422 @@
+// Signature format: 2.0
+package audio.policy.configuration.V6_0 {
+
+ public class AttachedDevices {
+ ctor public AttachedDevices();
+ method public java.util.List<java.lang.String> getItem();
+ }
+
+ public enum AudioDevice {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_AMBIENT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_AUX_DIGITAL;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_BACK_MIC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_BLUETOOTH_BLE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_BUILTIN_MIC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_BUS;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_COMMUNICATION;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_DEFAULT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_ECHO_REFERENCE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_FM_TUNER;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_HDMI;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_HDMI_ARC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_IP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_LINE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_LOOPBACK;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_PROXY;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_SPDIF;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_STUB;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_TELEPHONY_RX;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_TV_TUNER;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_USB_ACCESSORY;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_USB_DEVICE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_USB_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_VOICE_CALL;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_IN_WIRED_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_NONE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_AUX_LINE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_BUS;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_DEFAULT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_EARPIECE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_ECHO_CANCELLER;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_FM;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_HDMI;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_HDMI_ARC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_HEARING_AID;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_IP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_LINE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_PROXY;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_SPDIF;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_SPEAKER;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_STUB;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_TELEPHONY_TX;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_USB_ACCESSORY;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_USB_DEVICE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_USB_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioDevice AUDIO_DEVICE_OUT_WIRED_HEADSET;
+ }
+
+ public enum AudioFormat {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADIF;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_ELD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_ERLC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_HE_V1;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_HE_V2;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_LC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_LD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_LTP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_MAIN;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_SCALABLE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_SSR;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ADTS_XHE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ELD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_ERLC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_HE_V1;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_HE_V2;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_LATM;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_LATM_HE_V1;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_LATM_HE_V2;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_LATM_LC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_LC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_LD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_LTP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_MAIN;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_SCALABLE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_SSR;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AAC_XHE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AC3;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AC4;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_ALAC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AMR_NB;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AMR_WB;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_AMR_WB_PLUS;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_APE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_APTX;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_APTX_ADAPTIVE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_APTX_HD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_APTX_TWSP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_CELT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_DOLBY_TRUEHD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_DSD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_DTS;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_DTS_HD;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_EVRC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_EVRCB;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_EVRCNW;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_EVRCWB;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_E_AC3;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_E_AC3_JOC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_FLAC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_HE_AAC_V1;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_HE_AAC_V2;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_IEC61937;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_LDAC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_LHDC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_LHDC_LL;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_MAT_1_0;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_MAT_2_0;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_MAT_2_1;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_MP2;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_MP3;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_OPUS;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_PCM_16_BIT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_PCM_32_BIT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_PCM_8_24_BIT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_PCM_8_BIT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_PCM_FLOAT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_QCELP;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_SBC;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_VORBIS;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_WMA;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioFormat AUDIO_FORMAT_WMA_PRO;
+ }
+
+ public class AudioPolicyConfiguration {
+ ctor public AudioPolicyConfiguration();
+ method public audio.policy.configuration.V6_0.GlobalConfiguration getGlobalConfiguration();
+ method public java.util.List<audio.policy.configuration.V6_0.Modules> getModules();
+ method public audio.policy.configuration.V6_0.SurroundSound getSurroundSound();
+ method public audio.policy.configuration.V6_0.Version getVersion();
+ method public java.util.List<audio.policy.configuration.V6_0.Volumes> getVolumes();
+ method public void setGlobalConfiguration(audio.policy.configuration.V6_0.GlobalConfiguration);
+ method public void setSurroundSound(audio.policy.configuration.V6_0.SurroundSound);
+ method public void setVersion(audio.policy.configuration.V6_0.Version);
+ }
+
+ public enum AudioUsage {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_ALARM;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_ASSISTANCE_SONIFICATION;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_ASSISTANT;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_GAME;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_MEDIA;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_NOTIFICATION;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_UNKNOWN;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_VIRTUAL_SOURCE;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION;
+ enum_constant public static final audio.policy.configuration.V6_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
+ }
+
+ public enum DeviceCategory {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.DeviceCategory DEVICE_CATEGORY_EARPIECE;
+ enum_constant public static final audio.policy.configuration.V6_0.DeviceCategory DEVICE_CATEGORY_EXT_MEDIA;
+ enum_constant public static final audio.policy.configuration.V6_0.DeviceCategory DEVICE_CATEGORY_HEADSET;
+ enum_constant public static final audio.policy.configuration.V6_0.DeviceCategory DEVICE_CATEGORY_HEARING_AID;
+ enum_constant public static final audio.policy.configuration.V6_0.DeviceCategory DEVICE_CATEGORY_SPEAKER;
+ }
+
+ public class DevicePorts {
+ ctor public DevicePorts();
+ method public java.util.List<audio.policy.configuration.V6_0.DevicePorts.DevicePort> getDevicePort();
+ }
+
+ public static class DevicePorts.DevicePort {
+ ctor public DevicePorts.DevicePort();
+ method public String getAddress();
+ method public java.util.List<audio.policy.configuration.V6_0.AudioFormat> getEncodedFormats();
+ method public audio.policy.configuration.V6_0.Gains getGains();
+ method public java.util.List<audio.policy.configuration.V6_0.Profile> getProfile();
+ method public audio.policy.configuration.V6_0.Role getRole();
+ method public String getTagName();
+ method public String getType();
+ method public boolean get_default();
+ method public void setAddress(String);
+ method public void setEncodedFormats(java.util.List<audio.policy.configuration.V6_0.AudioFormat>);
+ method public void setGains(audio.policy.configuration.V6_0.Gains);
+ method public void setRole(audio.policy.configuration.V6_0.Role);
+ method public void setTagName(String);
+ method public void setType(String);
+ method public void set_default(boolean);
+ }
+
+ public enum GainMode {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.GainMode AUDIO_GAIN_MODE_CHANNELS;
+ enum_constant public static final audio.policy.configuration.V6_0.GainMode AUDIO_GAIN_MODE_JOINT;
+ enum_constant public static final audio.policy.configuration.V6_0.GainMode AUDIO_GAIN_MODE_RAMP;
+ }
+
+ public class Gains {
+ ctor public Gains();
+ method public java.util.List<audio.policy.configuration.V6_0.Gains.Gain> getGain();
+ }
+
+ public static class Gains.Gain {
+ ctor public Gains.Gain();
+ method public String getChannel_mask();
+ method public int getDefaultValueMB();
+ method public int getMaxRampMs();
+ method public int getMaxValueMB();
+ method public int getMinRampMs();
+ method public int getMinValueMB();
+ method public audio.policy.configuration.V6_0.GainMode getMode();
+ method public String getName();
+ method public int getStepValueMB();
+ method public void setChannel_mask(String);
+ method public void setDefaultValueMB(int);
+ method public void setMaxRampMs(int);
+ method public void setMaxValueMB(int);
+ method public void setMinRampMs(int);
+ method public void setMinValueMB(int);
+ method public void setMode(audio.policy.configuration.V6_0.GainMode);
+ method public void setName(String);
+ method public void setStepValueMB(int);
+ }
+
+ public class GlobalConfiguration {
+ ctor public GlobalConfiguration();
+ method public boolean getSpeaker_drc_enabled();
+ method public void setSpeaker_drc_enabled(boolean);
+ }
+
+ public enum HalVersion {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.HalVersion _2_0;
+ enum_constant public static final audio.policy.configuration.V6_0.HalVersion _3_0;
+ }
+
+ public class MixPorts {
+ ctor public MixPorts();
+ method public java.util.List<audio.policy.configuration.V6_0.MixPorts.MixPort> getMixPort();
+ }
+
+ public static class MixPorts.MixPort {
+ ctor public MixPorts.MixPort();
+ method public String getFlags();
+ method public audio.policy.configuration.V6_0.Gains getGains();
+ method public long getMaxActiveCount();
+ method public long getMaxOpenCount();
+ method public String getName();
+ method public java.util.List<audio.policy.configuration.V6_0.AudioUsage> getPreferredUsage();
+ method public java.util.List<audio.policy.configuration.V6_0.Profile> getProfile();
+ method public audio.policy.configuration.V6_0.Role getRole();
+ method public void setFlags(String);
+ method public void setGains(audio.policy.configuration.V6_0.Gains);
+ method public void setMaxActiveCount(long);
+ method public void setMaxOpenCount(long);
+ method public void setName(String);
+ method public void setPreferredUsage(java.util.List<audio.policy.configuration.V6_0.AudioUsage>);
+ method public void setRole(audio.policy.configuration.V6_0.Role);
+ }
+
+ public enum MixType {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.MixType mix;
+ enum_constant public static final audio.policy.configuration.V6_0.MixType mux;
+ }
+
+ public class Modules {
+ ctor public Modules();
+ method public java.util.List<audio.policy.configuration.V6_0.Modules.Module> getModule();
+ }
+
+ public static class Modules.Module {
+ ctor public Modules.Module();
+ method public audio.policy.configuration.V6_0.AttachedDevices getAttachedDevices();
+ method public String getDefaultOutputDevice();
+ method public audio.policy.configuration.V6_0.DevicePorts getDevicePorts();
+ method public audio.policy.configuration.V6_0.HalVersion getHalVersion();
+ method public audio.policy.configuration.V6_0.MixPorts getMixPorts();
+ method public String getName();
+ method public audio.policy.configuration.V6_0.Routes getRoutes();
+ method public void setAttachedDevices(audio.policy.configuration.V6_0.AttachedDevices);
+ method public void setDefaultOutputDevice(String);
+ method public void setDevicePorts(audio.policy.configuration.V6_0.DevicePorts);
+ method public void setHalVersion(audio.policy.configuration.V6_0.HalVersion);
+ method public void setMixPorts(audio.policy.configuration.V6_0.MixPorts);
+ method public void setName(String);
+ method public void setRoutes(audio.policy.configuration.V6_0.Routes);
+ }
+
+ public class Profile {
+ ctor public Profile();
+ method public String getChannelMasks();
+ method public String getFormat();
+ method public String getName();
+ method public String getSamplingRates();
+ method public void setChannelMasks(String);
+ method public void setFormat(String);
+ method public void setName(String);
+ method public void setSamplingRates(String);
+ }
+
+ public class Reference {
+ ctor public Reference();
+ method public String getName();
+ method public java.util.List<java.lang.String> getPoint();
+ method public void setName(String);
+ }
+
+ public enum Role {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.Role sink;
+ enum_constant public static final audio.policy.configuration.V6_0.Role source;
+ }
+
+ public class Routes {
+ ctor public Routes();
+ method public java.util.List<audio.policy.configuration.V6_0.Routes.Route> getRoute();
+ }
+
+ public static class Routes.Route {
+ ctor public Routes.Route();
+ method public String getSink();
+ method public String getSources();
+ method public audio.policy.configuration.V6_0.MixType getType();
+ method public void setSink(String);
+ method public void setSources(String);
+ method public void setType(audio.policy.configuration.V6_0.MixType);
+ }
+
+ public enum Stream {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_ACCESSIBILITY;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_ALARM;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_BLUETOOTH_SCO;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_DTMF;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_ENFORCED_AUDIBLE;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_MUSIC;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_NOTIFICATION;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_PATCH;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_REROUTING;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_RING;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_SYSTEM;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_TTS;
+ enum_constant public static final audio.policy.configuration.V6_0.Stream AUDIO_STREAM_VOICE_CALL;
+ }
+
+ public class SurroundFormats {
+ ctor public SurroundFormats();
+ method public java.util.List<audio.policy.configuration.V6_0.SurroundFormats.Format> getFormat();
+ }
+
+ public static class SurroundFormats.Format {
+ ctor public SurroundFormats.Format();
+ method public audio.policy.configuration.V6_0.AudioFormat getName();
+ method public java.util.List<audio.policy.configuration.V6_0.AudioFormat> getSubformats();
+ method public void setName(audio.policy.configuration.V6_0.AudioFormat);
+ method public void setSubformats(java.util.List<audio.policy.configuration.V6_0.AudioFormat>);
+ }
+
+ public class SurroundSound {
+ ctor public SurroundSound();
+ method public audio.policy.configuration.V6_0.SurroundFormats getFormats();
+ method public void setFormats(audio.policy.configuration.V6_0.SurroundFormats);
+ }
+
+ public enum Version {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V6_0.Version _1_0;
+ }
+
+ public class Volume {
+ ctor public Volume();
+ method public audio.policy.configuration.V6_0.DeviceCategory getDeviceCategory();
+ method public java.util.List<java.lang.String> getPoint();
+ method public String getRef();
+ method public audio.policy.configuration.V6_0.Stream getStream();
+ method public void setDeviceCategory(audio.policy.configuration.V6_0.DeviceCategory);
+ method public void setRef(String);
+ method public void setStream(audio.policy.configuration.V6_0.Stream);
+ }
+
+ public class Volumes {
+ ctor public Volumes();
+ method public java.util.List<audio.policy.configuration.V6_0.Reference> getReference();
+ method public java.util.List<audio.policy.configuration.V6_0.Volume> getVolume();
+ }
+
+ public class XmlParser {
+ ctor public XmlParser();
+ method public static audio.policy.configuration.V6_0.AudioPolicyConfiguration read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ }
+
+}
+
diff --git a/audio/6.0/config/api/last_current.txt b/audio/6.0/config/api/last_current.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/audio/6.0/config/api/last_current.txt
diff --git a/audio/6.0/config/api/last_removed.txt b/audio/6.0/config/api/last_removed.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/audio/6.0/config/api/last_removed.txt
diff --git a/audio/6.0/config/api/removed.txt b/audio/6.0/config/api/removed.txt
new file mode 100644
index 0000000..d802177
--- /dev/null
+++ b/audio/6.0/config/api/removed.txt
@@ -0,0 +1 @@
+// Signature format: 2.0
diff --git a/audio/6.0/config/audio_policy_configuration.xsd b/audio/6.0/config/audio_policy_configuration.xsd
new file mode 100644
index 0000000..0dc89bb
--- /dev/null
+++ b/audio/6.0/config/audio_policy_configuration.xsd
@@ -0,0 +1,624 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- TODO: define a targetNamespace. Note that it will break retrocompatibility -->
+<xs:schema version="2.0"
+ elementFormDefault="qualified"
+ attributeFormDefault="unqualified"
+ xmlns:xs="http://www.w3.org/2001/XMLSchema">
+ <!-- List the config versions supported by audio policy. -->
+ <xs:simpleType name="version">
+ <xs:restriction base="xs:decimal">
+ <xs:enumeration value="1.0"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:simpleType name="halVersion">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Version of the interface the hal implements.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:restriction base="xs:decimal">
+ <!-- List of HAL versions supported by the framework. -->
+ <xs:enumeration value="2.0"/>
+ <xs:enumeration value="3.0"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:element name="audioPolicyConfiguration">
+ <xs:complexType>
+ <xs:sequence>
+ <xs:element name="globalConfiguration" type="globalConfiguration"/>
+ <xs:element name="modules" type="modules" maxOccurs="unbounded"/>
+ <xs:element name="volumes" type="volumes" maxOccurs="unbounded"/>
+ <xs:element name="surroundSound" type="surroundSound" minOccurs="0" />
+ </xs:sequence>
+ <xs:attribute name="version" type="version"/>
+ </xs:complexType>
+ <xs:key name="moduleNameKey">
+ <xs:selector xpath="modules/module"/>
+ <xs:field xpath="@name"/>
+ </xs:key>
+ <xs:unique name="volumeTargetUniqueness">
+ <xs:selector xpath="volumes/volume"/>
+ <xs:field xpath="@stream"/>
+ <xs:field xpath="@deviceCategory"/>
+ </xs:unique>
+ <xs:key name="volumeCurveNameKey">
+ <xs:selector xpath="volumes/reference"/>
+ <xs:field xpath="@name"/>
+ </xs:key>
+ <xs:keyref name="volumeCurveRef" refer="volumeCurveNameKey">
+ <xs:selector xpath="volumes/volume"/>
+ <xs:field xpath="@ref"/>
+ </xs:keyref>
+ </xs:element>
+ <xs:complexType name="globalConfiguration">
+ <xs:attribute name="speaker_drc_enabled" type="xs:boolean" use="required"/>
+ </xs:complexType>
+ <xs:complexType name="modules">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ There should be one section per audio HW module present on the platform.
+ Each <module/> contains two mandatory tags: “halVersion” and “name”.
+ The module "name" is the same as in previous .conf file.
+ Each module must contain the following sections:
+ - <devicePorts/>: a list of device descriptors for all
+ input and output devices accessible via this module.
+ This contains both permanently attached devices and removable devices.
+ - <mixPorts/>: listing all output and input streams exposed by the audio HAL
+ - <routes/>: list of possible connections between input
+ and output devices or between stream and devices.
+ A <route/> is defined by a set of 3 attributes:
+ -"type": mux|mix means all sources are mutual exclusive (mux) or can be mixed (mix)
+ -"sink": the sink involved in this route
+ -"sources": all the sources than can be connected to the sink via this route
+ - <attachedDevices/>: permanently attached devices.
+ The attachedDevices section is a list of devices names.
+ Their names correspond to device names defined in "devicePorts" section.
+ - <defaultOutputDevice/> is the device to be used when no policy rule applies
+ </xs:documentation>
+ </xs:annotation>
+ <xs:sequence>
+ <xs:element name="module" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:sequence>
+ <xs:element name="attachedDevices" type="attachedDevices" minOccurs="0">
+ <xs:unique name="attachedDevicesUniqueness">
+ <xs:selector xpath="item"/>
+ <xs:field xpath="."/>
+ </xs:unique>
+ </xs:element>
+ <xs:element name="defaultOutputDevice" type="xs:token" minOccurs="0"/>
+ <xs:element name="mixPorts" type="mixPorts" minOccurs="0"/>
+ <xs:element name="devicePorts" type="devicePorts" minOccurs="0"/>
+ <xs:element name="routes" type="routes" minOccurs="0"/>
+ </xs:sequence>
+ <xs:attribute name="name" type="xs:string" use="required"/>
+ <xs:attribute name="halVersion" type="halVersion" use="required"/>
+ </xs:complexType>
+ <xs:unique name="mixPortNameUniqueness">
+ <xs:selector xpath="mixPorts/mixPort"/>
+ <xs:field xpath="@name"/>
+ </xs:unique>
+ <xs:key name="devicePortNameKey">
+ <xs:selector xpath="devicePorts/devicePort"/>
+ <xs:field xpath="@tagName"/>
+ </xs:key>
+ <xs:unique name="devicePortUniqueness">
+ <xs:selector xpath="devicePorts/devicePort"/>
+ <xs:field xpath="@type"/>
+ <xs:field xpath="@address"/>
+ </xs:unique>
+ <xs:keyref name="defaultOutputDeviceRef" refer="devicePortNameKey">
+ <xs:selector xpath="defaultOutputDevice"/>
+ <xs:field xpath="."/>
+ </xs:keyref>
+ <xs:keyref name="attachedDeviceRef" refer="devicePortNameKey">
+ <xs:selector xpath="attachedDevices/item"/>
+ <xs:field xpath="."/>
+ </xs:keyref>
+ <!-- The following 3 constraints try to make sure each sink port
+ is reference in one an only one route. -->
+ <xs:key name="routeSinkKey">
+ <!-- predicate [@type='sink'] does not work in xsd 1.0 -->
+ <xs:selector xpath="devicePorts/devicePort|mixPorts/mixPort"/>
+ <xs:field xpath="@tagName|@name"/>
+ </xs:key>
+ <xs:keyref name="routeSinkRef" refer="routeSinkKey">
+ <xs:selector xpath="routes/route"/>
+ <xs:field xpath="@sink"/>
+ </xs:keyref>
+ <xs:unique name="routeUniqueness">
+ <xs:selector xpath="routes/route"/>
+ <xs:field xpath="@sink"/>
+ </xs:unique>
+ </xs:element>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="attachedDevices">
+ <xs:sequence>
+ <xs:element name="item" type="xs:token" minOccurs="0" maxOccurs="unbounded"/>
+ </xs:sequence>
+ </xs:complexType>
+ <!-- TODO: separate values by space for better xsd validations. -->
+ <xs:simpleType name="audioInOutFlags">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ "|" separated list of audio_output_flags_t or audio_input_flags_t.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:restriction base="xs:string">
+ <xs:pattern value="|[_A-Z]+(\|[_A-Z]+)*"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:simpleType name="role">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="sink"/>
+ <xs:enumeration value="source"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:complexType name="mixPorts">
+ <xs:sequence>
+ <xs:element name="mixPort" minOccurs="0" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:sequence>
+ <xs:element name="profile" type="profile" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="gains" type="gains" minOccurs="0"/>
+ </xs:sequence>
+ <xs:attribute name="name" type="xs:token" use="required"/>
+ <xs:attribute name="role" type="role" use="required"/>
+ <xs:attribute name="flags" type="audioInOutFlags"/>
+ <xs:attribute name="maxOpenCount" type="xs:unsignedInt"/>
+ <xs:attribute name="maxActiveCount" type="xs:unsignedInt"/>
+ <xs:attribute name="preferredUsage" type="audioUsageList">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ When choosing the mixPort of an audio track, the audioPolicy
+ first considers the mixPorts with a preferredUsage including
+ the track AudioUsage preferred .
+ If non support the track format, the other mixPorts are considered.
+ Eg: a <mixPort preferredUsage="AUDIO_USAGE_MEDIA" /> will receive
+ the audio of all apps playing with a MEDIA usage.
+ It may receive audio from ALARM if there are no audio compatible
+ <mixPort preferredUsage="AUDIO_USAGE_ALARM" />.
+ </xs:documentation>
+ </xs:annotation>
+ </xs:attribute>
+ </xs:complexType>
+ <xs:unique name="mixPortProfileUniqueness">
+ <xs:selector xpath="profile"/>
+ <xs:field xpath="format"/>
+ <xs:field xpath="samplingRate"/>
+ <xs:field xpath="channelMasks"/>
+ </xs:unique>
+ <xs:unique name="mixPortGainUniqueness">
+ <xs:selector xpath="gains/gain"/>
+ <xs:field xpath="@name"/>
+ </xs:unique>
+ </xs:element>
+ </xs:sequence>
+ </xs:complexType>
+ <!-- Enum values of audio_device_t in audio.h
+ TODO: generate from hidl to avoid manual sync.
+ TODO: separate source and sink in the xml for better xsd validations. -->
+ <xs:simpleType name="audioDevice">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_DEVICE_NONE"/>
+
+ <xs:enumeration value="AUDIO_DEVICE_OUT_EARPIECE"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_WIRED_HEADPHONE"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_AUX_DIGITAL"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_HDMI"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_USB_ACCESSORY"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_USB_DEVICE"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_REMOTE_SUBMIX"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_TELEPHONY_TX"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_LINE"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_HDMI_ARC"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_SPDIF"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_FM"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_AUX_LINE"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_SPEAKER_SAFE"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_IP"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_BUS"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_PROXY"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_USB_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_HEARING_AID"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_ECHO_CANCELLER"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_DEFAULT"/>
+ <xs:enumeration value="AUDIO_DEVICE_OUT_STUB"/>
+
+ <!-- Due to the xml format, IN types can not be a separated from OUT types -->
+ <xs:enumeration value="AUDIO_DEVICE_IN_COMMUNICATION"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_AMBIENT"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_BUILTIN_MIC"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_WIRED_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_AUX_DIGITAL"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_HDMI"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_VOICE_CALL"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_TELEPHONY_RX"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_BACK_MIC"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_REMOTE_SUBMIX"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_USB_ACCESSORY"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_USB_DEVICE"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_FM_TUNER"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_TV_TUNER"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_LINE"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_SPDIF"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_A2DP"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_LOOPBACK"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_IP"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_BUS"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_PROXY"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_USB_HEADSET"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_BLUETOOTH_BLE"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_HDMI_ARC"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_ECHO_REFERENCE"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_DEFAULT"/>
+ <xs:enumeration value="AUDIO_DEVICE_IN_STUB"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:simpleType name="vendorExtension">
+ <!-- Vendor extension names must be prefixed by "VX_" to distinguish them from AOSP values.
+ Vendor are encouraged to namespace their module names to avoid conflicts.
+ Example for an hypothetical Google virtual reality device:
+ <devicePort tagName="VR" type="VX_GOOGLE_VR" role="sink">
+ -->
+ <xs:restriction base="xs:string">
+ <xs:pattern value="VX_[_a-zA-Z0-9]+"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:simpleType name="extendableAudioDevice">
+ <xs:union memberTypes="audioDevice vendorExtension"/>
+ </xs:simpleType>
+ <!-- Enum values of audio_format_t in audio.h
+ TODO: generate from hidl to avoid manual sync. -->
+ <xs:simpleType name="audioFormat">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_FORMAT_PCM_16_BIT" />
+ <xs:enumeration value="AUDIO_FORMAT_PCM_8_BIT"/>
+ <xs:enumeration value="AUDIO_FORMAT_PCM_32_BIT"/>
+ <xs:enumeration value="AUDIO_FORMAT_PCM_8_24_BIT"/>
+ <xs:enumeration value="AUDIO_FORMAT_PCM_FLOAT"/>
+ <xs:enumeration value="AUDIO_FORMAT_PCM_24_BIT_PACKED"/>
+ <xs:enumeration value="AUDIO_FORMAT_MP3"/>
+ <xs:enumeration value="AUDIO_FORMAT_AMR_NB"/>
+ <xs:enumeration value="AUDIO_FORMAT_AMR_WB"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_MAIN"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_LC"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_SSR"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_LTP"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_HE_V1"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_SCALABLE"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ERLC"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_LD"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_HE_V2"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ELD"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_MAIN"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LC"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_SSR"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LTP"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_HE_V1"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_SCALABLE"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_ERLC"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_LD"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_HE_V2"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_ELD"/>
+ <xs:enumeration value="AUDIO_FORMAT_VORBIS"/>
+ <xs:enumeration value="AUDIO_FORMAT_HE_AAC_V1"/>
+ <xs:enumeration value="AUDIO_FORMAT_HE_AAC_V2"/>
+ <xs:enumeration value="AUDIO_FORMAT_OPUS"/>
+ <xs:enumeration value="AUDIO_FORMAT_AC3"/>
+ <xs:enumeration value="AUDIO_FORMAT_E_AC3"/>
+ <xs:enumeration value="AUDIO_FORMAT_DTS"/>
+ <xs:enumeration value="AUDIO_FORMAT_DTS_HD"/>
+ <xs:enumeration value="AUDIO_FORMAT_IEC61937"/>
+ <xs:enumeration value="AUDIO_FORMAT_DOLBY_TRUEHD"/>
+ <xs:enumeration value="AUDIO_FORMAT_EVRC"/>
+ <xs:enumeration value="AUDIO_FORMAT_EVRCB"/>
+ <xs:enumeration value="AUDIO_FORMAT_EVRCWB"/>
+ <xs:enumeration value="AUDIO_FORMAT_EVRCNW"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADIF"/>
+ <xs:enumeration value="AUDIO_FORMAT_WMA"/>
+ <xs:enumeration value="AUDIO_FORMAT_WMA_PRO"/>
+ <xs:enumeration value="AUDIO_FORMAT_AMR_WB_PLUS"/>
+ <xs:enumeration value="AUDIO_FORMAT_MP2"/>
+ <xs:enumeration value="AUDIO_FORMAT_QCELP"/>
+ <xs:enumeration value="AUDIO_FORMAT_DSD"/>
+ <xs:enumeration value="AUDIO_FORMAT_FLAC"/>
+ <xs:enumeration value="AUDIO_FORMAT_ALAC"/>
+ <xs:enumeration value="AUDIO_FORMAT_APE"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS"/>
+ <xs:enumeration value="AUDIO_FORMAT_SBC"/>
+ <xs:enumeration value="AUDIO_FORMAT_APTX"/>
+ <xs:enumeration value="AUDIO_FORMAT_APTX_HD"/>
+ <xs:enumeration value="AUDIO_FORMAT_AC4"/>
+ <xs:enumeration value="AUDIO_FORMAT_LDAC"/>
+ <xs:enumeration value="AUDIO_FORMAT_E_AC3_JOC"/>
+ <xs:enumeration value="AUDIO_FORMAT_MAT_1_0"/>
+ <xs:enumeration value="AUDIO_FORMAT_MAT_2_0"/>
+ <xs:enumeration value="AUDIO_FORMAT_MAT_2_1"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_XHE"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_ADTS_XHE"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_LATM"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_LATM_LC"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_LATM_HE_V1"/>
+ <xs:enumeration value="AUDIO_FORMAT_AAC_LATM_HE_V2"/>
+ <xs:enumeration value="AUDIO_FORMAT_CELT"/>
+ <xs:enumeration value="AUDIO_FORMAT_APTX_ADAPTIVE"/>
+ <xs:enumeration value="AUDIO_FORMAT_LHDC"/>
+ <xs:enumeration value="AUDIO_FORMAT_LHDC_LL"/>
+ <xs:enumeration value="AUDIO_FORMAT_APTX_TWSP"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:simpleType name="extendableAudioFormat">
+ <xs:union memberTypes="audioFormat vendorExtension"/>
+ </xs:simpleType>
+ <!-- Enum values of audio::common::4_0::AudioUsage
+ TODO: generate from HIDL to avoid manual sync. -->
+ <xs:simpleType name="audioUsage">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_USAGE_UNKNOWN" />
+ <xs:enumeration value="AUDIO_USAGE_MEDIA" />
+ <xs:enumeration value="AUDIO_USAGE_VOICE_COMMUNICATION" />
+ <xs:enumeration value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING" />
+ <xs:enumeration value="AUDIO_USAGE_ALARM" />
+ <xs:enumeration value="AUDIO_USAGE_NOTIFICATION" />
+ <xs:enumeration value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE" />
+ <xs:enumeration value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY" />
+ <xs:enumeration value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE" />
+ <xs:enumeration value="AUDIO_USAGE_ASSISTANCE_SONIFICATION" />
+ <xs:enumeration value="AUDIO_USAGE_GAME" />
+ <xs:enumeration value="AUDIO_USAGE_VIRTUAL_SOURCE" />
+ <xs:enumeration value="AUDIO_USAGE_ASSISTANT" />
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:simpleType name="audioUsageList">
+ <xs:list itemType="audioUsage"/>
+ </xs:simpleType>
+ <!-- TODO: Change to a space separated list to xsd enforce correctness. -->
+ <xs:simpleType name="samplingRates">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="[0-9]+(,[0-9]+)*"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <!-- TODO: Change to a space separated list to xsd enforce correctness. -->
+ <xs:simpleType name="channelMask">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Comma (",") separated list of channel flags
+ from audio_channel_mask_t.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:restriction base="xs:string">
+ <xs:pattern value="[_A-Z][_A-Z0-9]*(,[_A-Z][_A-Z0-9]*)*"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:complexType name="profile">
+ <xs:attribute name="name" type="xs:token" use="optional"/>
+ <xs:attribute name="format" type="extendableAudioFormat" use="optional"/>
+ <xs:attribute name="samplingRates" type="samplingRates" use="optional"/>
+ <xs:attribute name="channelMasks" type="channelMask" use="optional"/>
+ </xs:complexType>
+ <xs:simpleType name="gainMode">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_GAIN_MODE_JOINT"/>
+ <xs:enumeration value="AUDIO_GAIN_MODE_CHANNELS"/>
+ <xs:enumeration value="AUDIO_GAIN_MODE_RAMP"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:complexType name="gains">
+ <xs:sequence>
+ <xs:element name="gain" minOccurs="0" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:attribute name="name" type="xs:token" use="required"/>
+ <xs:attribute name="mode" type="gainMode" use="required"/>
+ <xs:attribute name="channel_mask" type="channelMask" use="optional"/>
+ <xs:attribute name="minValueMB" type="xs:int" use="optional"/>
+ <xs:attribute name="maxValueMB" type="xs:int" use="optional"/>
+ <xs:attribute name="defaultValueMB" type="xs:int" use="optional"/>
+ <xs:attribute name="stepValueMB" type="xs:int" use="optional"/>
+ <xs:attribute name="minRampMs" type="xs:int" use="optional"/>
+ <xs:attribute name="maxRampMs" type="xs:int" use="optional"/>
+ </xs:complexType>
+ </xs:element>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="devicePorts">
+ <xs:sequence>
+ <xs:element name="devicePort" minOccurs="0" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:sequence>
+ <xs:element name="profile" type="profile" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="gains" type="gains" minOccurs="0"/>
+ </xs:sequence>
+ <xs:attribute name="tagName" type="xs:token" use="required"/>
+ <xs:attribute name="type" type="extendableAudioDevice" use="required"/>
+ <xs:attribute name="role" type="role" use="required"/>
+ <xs:attribute name="address" type="xs:string" use="optional" default=""/>
+ <!-- Note that XSD 1.0 can not check that a type only has one default. -->
+ <xs:attribute name="default" type="xs:boolean" use="optional">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ The default device will be used if multiple have the same type
+ and no explicit route request exists for a specific device of
+ that type.
+ </xs:documentation>
+ </xs:annotation>
+ </xs:attribute>
+ <xs:attribute name="encodedFormats" type="audioFormatsList" use="optional"
+ default="" />
+ </xs:complexType>
+ <xs:unique name="devicePortProfileUniqueness">
+ <xs:selector xpath="profile"/>
+ <xs:field xpath="format"/>
+ <xs:field xpath="samplingRate"/>
+ <xs:field xpath="channelMasks"/>
+ </xs:unique>
+ <xs:unique name="devicePortGainUniqueness">
+ <xs:selector xpath="gains/gain"/>
+ <xs:field xpath="@name"/>
+ </xs:unique>
+ </xs:element>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:simpleType name="mixType">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="mix"/>
+ <xs:enumeration value="mux"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:complexType name="routes">
+ <xs:sequence>
+ <xs:element name="route" minOccurs="0" maxOccurs="unbounded">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ List all available sources for a given sink.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:complexType>
+ <xs:attribute name="type" type="mixType" use="required"/>
+ <xs:attribute name="sink" type="xs:string" use="required"/>
+ <xs:attribute name="sources" type="xs:string" use="required"/>
+ </xs:complexType>
+ </xs:element>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="volumes">
+ <xs:sequence>
+ <xs:element name="volume" type="volume" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="reference" type="reference" minOccurs="0" maxOccurs="unbounded">
+ </xs:element>
+ </xs:sequence>
+ </xs:complexType>
+ <!-- TODO: Always require a ref for better xsd validations.
+ Currently a volume could have no points nor ref
+ as it can not be forbidden by xsd 1.0.-->
+ <xs:simpleType name="volumePoint">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Comma separated pair of number.
+ The fist one is the framework level (between 0 and 100).
+ The second one is the volume to send to the HAL.
+ The framework will interpolate volumes not specified.
+ Their MUST be at least 2 points specified.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:restriction base="xs:string">
+ <xs:pattern value="([0-9]{1,2}|100),-?[0-9]+"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <!-- Enum values of audio_stream_type_t in audio-base.h
+ TODO: generate from hidl to avoid manual sync. -->
+ <xs:simpleType name="stream">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_STREAM_VOICE_CALL"/>
+ <xs:enumeration value="AUDIO_STREAM_SYSTEM"/>
+ <xs:enumeration value="AUDIO_STREAM_RING"/>
+ <xs:enumeration value="AUDIO_STREAM_MUSIC"/>
+ <xs:enumeration value="AUDIO_STREAM_ALARM"/>
+ <xs:enumeration value="AUDIO_STREAM_NOTIFICATION"/>
+ <xs:enumeration value="AUDIO_STREAM_BLUETOOTH_SCO"/>
+ <xs:enumeration value="AUDIO_STREAM_ENFORCED_AUDIBLE"/>
+ <xs:enumeration value="AUDIO_STREAM_DTMF"/>
+ <xs:enumeration value="AUDIO_STREAM_TTS"/>
+ <xs:enumeration value="AUDIO_STREAM_ACCESSIBILITY"/>
+ <xs:enumeration value="AUDIO_STREAM_REROUTING"/>
+ <xs:enumeration value="AUDIO_STREAM_PATCH"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <!-- Enum values of device_category from Volume.h.
+ TODO: generate from hidl to avoid manual sync. -->
+ <xs:simpleType name="deviceCategory">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="DEVICE_CATEGORY_HEADSET"/>
+ <xs:enumeration value="DEVICE_CATEGORY_SPEAKER"/>
+ <xs:enumeration value="DEVICE_CATEGORY_EARPIECE"/>
+ <xs:enumeration value="DEVICE_CATEGORY_EXT_MEDIA"/>
+ <xs:enumeration value="DEVICE_CATEGORY_HEARING_AID"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:complexType name="volume">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Volume section defines a volume curve for a given use case and device category.
+ It contains a list of points of this curve expressing the attenuation in Millibels
+ for a given volume index from 0 to 100.
+ <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-9600</point>
+ <point>100,0</point>
+ </volume>
+
+ It may also reference a reference/@name to avoid duplicating curves.
+ <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+ ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+ <point>0,-9600</point>
+ <point>100,0</point>
+ </reference>
+ </xs:documentation>
+ </xs:annotation>
+ <xs:sequence>
+ <xs:element name="point" type="volumePoint" minOccurs="0" maxOccurs="unbounded"/>
+ </xs:sequence>
+ <xs:attribute name="stream" type="stream"/>
+ <xs:attribute name="deviceCategory" type="deviceCategory"/>
+ <xs:attribute name="ref" type="xs:token" use="optional"/>
+ </xs:complexType>
+ <xs:complexType name="reference">
+ <xs:sequence>
+ <xs:element name="point" type="volumePoint" minOccurs="2" maxOccurs="unbounded"/>
+ </xs:sequence>
+ <xs:attribute name="name" type="xs:token" use="required"/>
+ </xs:complexType>
+ <xs:complexType name="surroundSound">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Surround Sound section provides configuration related to handling of
+ multi-channel formats.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:sequence>
+ <xs:element name="formats" type="surroundFormats"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:simpleType name="audioFormatsList">
+ <xs:list itemType="audioFormat" />
+ </xs:simpleType>
+ <xs:complexType name="surroundFormats">
+ <xs:sequence>
+ <xs:element name="format" minOccurs="0" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:attribute name="name" type="audioFormat" use="required"/>
+ <xs:attribute name="subformats" type="audioFormatsList" />
+ </xs:complexType>
+ </xs:element>
+ </xs:sequence>
+ </xs:complexType>
+</xs:schema>
diff --git a/audio/6.0/types.hal b/audio/6.0/types.hal
new file mode 100644
index 0000000..1a704f8
--- /dev/null
+++ b/audio/6.0/types.hal
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@6.0;
+
+import android.hardware.audio.common@6.0;
+
+enum Result : int32_t {
+ OK,
+ NOT_INITIALIZED,
+ INVALID_ARGUMENTS,
+ INVALID_STATE,
+ /**
+ * Methods marked as "Optional method" must return this result value
+ * if the operation is not supported by HAL.
+ */
+ NOT_SUPPORTED
+};
+
+@export(name="audio_drain_type_t", value_prefix="AUDIO_DRAIN_")
+enum AudioDrain : int32_t {
+ /** drain() returns when all data has been played. */
+ ALL,
+ /**
+ * drain() returns a short time before all data from the current track has
+ * been played to give time for gapless track switch.
+ */
+ EARLY_NOTIFY
+};
+
+/**
+ * A substitute for POSIX timespec.
+ */
+struct TimeSpec {
+ uint64_t tvSec; // seconds
+ uint64_t tvNSec; // nanoseconds
+};
+
+struct ParameterValue {
+ string key;
+ string value;
+};
+
+enum MmapBufferFlag : uint32_t {
+ NONE = 0x0,
+ /**
+ * If the buffer can be securely shared to untrusted applications
+ * through the AAudio exclusive mode.
+ * Only set this flag if applications are restricted from accessing the
+ * memory surrounding the audio data buffer by a kernel mechanism.
+ * See Linux kernel's dma_buf.
+ */
+ APPLICATION_SHAREABLE = 0x1,
+};
+
+/**
+ * Mmap buffer descriptor returned by IStream.createMmapBuffer().
+ * Used by streams opened in mmap mode.
+ */
+struct MmapBufferInfo {
+ /** Mmap memory buffer */
+ memory sharedMemory;
+ /** Total buffer size in frames */
+ uint32_t bufferSizeFrames;
+ /** Transfer size granularity in frames */
+ uint32_t burstSizeFrames;
+ /** Attributes describing the buffer. */
+ bitfield<MmapBufferFlag> flags;
+};
+
+/**
+ * Mmap buffer read/write position returned by IStream.getMmapPosition().
+ * Used by streams opened in mmap mode.
+ */
+struct MmapPosition {
+ int64_t timeNanoseconds; // time stamp in ns, CLOCK_MONOTONIC
+ int32_t positionFrames; // increasing 32 bit frame count reset when IStream.stop() is called
+};
+
+/**
+ * The message queue flags used to synchronize reads and writes from
+ * message queues used by StreamIn and StreamOut.
+ */
+enum MessageQueueFlagBits : uint32_t {
+ NOT_EMPTY = 1 << 0,
+ NOT_FULL = 1 << 1
+};
+
+/*
+ * Microphone information
+ *
+ */
+
+/**
+ * A 3D point used to represent position or orientation of a microphone.
+ *
+ * Position: Coordinates of the microphone's capsule, in meters, from the
+ * bottom-left-back corner of the bounding box of android device in natural
+ * orientation (PORTRAIT for phones, LANDSCAPE for tablets, tvs, etc).
+ * The orientation musth match the reported by the api Display.getRotation().
+ *
+ * Orientation: Normalized vector to signal the main orientation of the
+ * microphone's capsule. Magnitude = sqrt(x^2 + y^2 + z^2) = 1
+ */
+struct AudioMicrophoneCoordinate {
+ float x;
+ float y;
+ float z;
+};
+
+/**
+ * Enum to identify the type of channel mapping for active microphones.
+ * Used channels further identify if the microphone has any significative
+ * process (e.g. High Pass Filtering, dynamic compression)
+ * Simple processing as constant gain adjustment must be DIRECT.
+ */
+enum AudioMicrophoneChannelMapping : uint32_t {
+ UNUSED = 0, /* Channel not used */
+ DIRECT = 1, /* Channel used and signal not processed */
+ PROCESSED = 2, /* Channel used and signal has some process */
+};
+
+/**
+ * Enum to identify locations of microphones in regards to the body of the
+ * android device.
+ */
+enum AudioMicrophoneLocation : uint32_t {
+ UNKNOWN = 0,
+ MAINBODY = 1,
+ MAINBODY_MOVABLE = 2,
+ PERIPHERAL = 3,
+};
+
+/**
+ * Identifier to help group related microphones together
+ * e.g. microphone arrays should belong to the same group
+ */
+typedef int32_t AudioMicrophoneGroup;
+
+/**
+ * Enum with standard polar patterns of microphones
+ */
+enum AudioMicrophoneDirectionality : uint32_t {
+ UNKNOWN = 0,
+ OMNI = 1,
+ BI_DIRECTIONAL = 2,
+ CARDIOID = 3,
+ HYPER_CARDIOID = 4,
+ SUPER_CARDIOID = 5,
+};
+
+/**
+ * A (frequency, level) pair. Used to represent frequency response.
+ */
+struct AudioFrequencyResponsePoint {
+ /** In Hz */
+ float frequency;
+ /** In dB */
+ float level;
+};
+
+/**
+ * Structure used by the HAL to describe microphone's characteristics
+ * Used by StreamIn and Device
+ */
+struct MicrophoneInfo {
+ /** Unique alphanumeric id for microphone. Guaranteed to be the same
+ * even after rebooting.
+ */
+ string deviceId;
+ /**
+ * Device specific information
+ */
+ DeviceAddress deviceAddress;
+ /** Each element of the vector must describe the channel with the same
+ * index.
+ */
+ vec<AudioMicrophoneChannelMapping> channelMapping;
+ /** Location of the microphone in regard to the body of the device */
+ AudioMicrophoneLocation location;
+ /** Identifier to help group related microphones together
+ * e.g. microphone arrays should belong to the same group
+ */
+ AudioMicrophoneGroup group;
+ /** Index of this microphone within the group.
+ * (group, index) must be unique within the same device.
+ */
+ uint32_t indexInTheGroup;
+ /** Level in dBFS produced by a 1000 Hz tone at 94 dB SPL */
+ float sensitivity;
+ /** Level in dB of the max SPL supported at 1000 Hz */
+ float maxSpl;
+ /** Level in dB of the min SPL supported at 1000 Hz */
+ float minSpl;
+ /** Standard polar pattern of the microphone */
+ AudioMicrophoneDirectionality directionality;
+ /** Vector with ordered frequency responses (from low to high frequencies)
+ * with the frequency response of the microphone.
+ * Levels are in dB, relative to level at 1000 Hz
+ */
+ vec<AudioFrequencyResponsePoint> frequencyResponse;
+ /** Position of the microphone's capsule in meters, from the
+ * bottom-left-back corner of the bounding box of device.
+ */
+ AudioMicrophoneCoordinate position;
+ /** Normalized point to signal the main orientation of the microphone's
+ * capsule. sqrt(x^2 + y^2 + z^2) = 1
+ */
+ AudioMicrophoneCoordinate orientation;
+};
+
+/**
+ * Constants used by the HAL to determine how to select microphones and process those inputs in
+ * order to optimize for capture in the specified direction.
+ *
+ * MicrophoneDirection Constants are defined in MicrophoneDirection.java.
+ */
+@export(name="audio_microphone_direction_t", value_prefix="MIC_DIRECTION_")
+enum MicrophoneDirection : int32_t {
+ /**
+ * Don't do any directionality processing of the activated microphone(s).
+ */
+ UNSPECIFIED = 0,
+ /**
+ * Optimize capture for audio coming from the screen-side of the device.
+ */
+ FRONT = 1,
+ /**
+ * Optimize capture for audio coming from the side of the device opposite the screen.
+ */
+ BACK = 2,
+ /**
+ * Optimize capture for audio coming from an off-device microphone.
+ */
+ EXTERNAL = 3,
+};
diff --git a/audio/common/6.0/Android.bp b/audio/common/6.0/Android.bp
new file mode 100644
index 0000000..1a4e054
--- /dev/null
+++ b/audio/common/6.0/Android.bp
@@ -0,0 +1,17 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.audio.common@6.0",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ ],
+ interfaces: [
+ "android.hidl.safe_union@1.0",
+ ],
+ gen_java: false,
+ gen_java_constants: true,
+}
diff --git a/audio/common/6.0/types.hal b/audio/common/6.0/types.hal
new file mode 100644
index 0000000..132f86d
--- /dev/null
+++ b/audio/common/6.0/types.hal
@@ -0,0 +1,1032 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.common@6.0;
+
+import android.hidl.safe_union@1.0;
+
+/*
+ *
+ * IDs and Handles
+ *
+ */
+
+/**
+ * Handle type for identifying audio sources and sinks.
+ */
+typedef int32_t AudioIoHandle;
+
+/**
+ * Audio hw module handle functions or structures referencing a module.
+ */
+typedef int32_t AudioModuleHandle;
+
+/**
+ * Each port has a unique ID or handle allocated by policy manager.
+ */
+typedef int32_t AudioPortHandle;
+
+/**
+ * Each patch is identified by a handle at the interface used to create that
+ * patch. For instance, when a patch is created by the audio HAL, the HAL
+ * allocates and returns a handle. This handle is unique to a given audio HAL
+ * hardware module. But the same patch receives another system wide unique
+ * handle allocated by the framework. This unique handle is used for all
+ * transactions inside the framework.
+ */
+typedef int32_t AudioPatchHandle;
+
+/**
+ * A HW synchronization source returned by the audio HAL.
+ */
+typedef uint32_t AudioHwSync;
+
+/**
+ * Each port has a unique ID or handle allocated by policy manager.
+ */
+@export(name="")
+enum AudioHandleConsts : int32_t {
+ AUDIO_IO_HANDLE_NONE = 0,
+ AUDIO_MODULE_HANDLE_NONE = 0,
+ AUDIO_PORT_HANDLE_NONE = 0,
+ AUDIO_PATCH_HANDLE_NONE = 0,
+};
+
+/**
+ * Commonly used structure for passing unique identifieds (UUID).
+ * For the definition of UUID, refer to ITU-T X.667 spec.
+ */
+struct Uuid {
+ uint32_t timeLow;
+ uint16_t timeMid;
+ uint16_t versionAndTimeHigh;
+ uint16_t variantAndClockSeqHigh;
+ uint8_t[6] node;
+};
+
+
+/*
+ *
+ * Audio streams
+ *
+ */
+
+/**
+ * Audio stream type describing the intended use case of a stream.
+ */
+@export(name="audio_stream_type_t", value_prefix="AUDIO_STREAM_")
+enum AudioStreamType : int32_t {
+ // These values must kept in sync with
+ // frameworks/base/media/java/android/media/AudioSystem.java
+ DEFAULT = -1,
+ MIN = 0,
+ VOICE_CALL = 0,
+ SYSTEM = 1,
+ RING = 2,
+ MUSIC = 3,
+ ALARM = 4,
+ NOTIFICATION = 5,
+ BLUETOOTH_SCO = 6,
+ ENFORCED_AUDIBLE = 7, // Sounds that cannot be muted by user and must be
+ // routed to speaker
+ DTMF = 8,
+ TTS = 9, // Transmitted Through Speaker. Plays over speaker
+ // only, silent on other devices
+ ACCESSIBILITY = 10, // For accessibility talk back prompts
+};
+
+@export(name="audio_source_t", value_prefix="AUDIO_SOURCE_")
+enum AudioSource : int32_t {
+ // These values must kept in sync with
+ // frameworks/base/media/java/android/media/MediaRecorder.java,
+ // frameworks/av/services/audiopolicy/AudioPolicyService.cpp,
+ // system/media/audio_effects/include/audio_effects/audio_effects_conf.h
+ DEFAULT = 0,
+ MIC = 1,
+ VOICE_UPLINK = 2,
+ VOICE_DOWNLINK = 3,
+ VOICE_CALL = 4,
+ CAMCORDER = 5,
+ VOICE_RECOGNITION = 6,
+ VOICE_COMMUNICATION = 7,
+ /**
+ * Source for the mix to be presented remotely. An example of remote
+ * presentation is Wifi Display where a dongle attached to a TV can be used
+ * to play the mix captured by this audio source.
+ */
+ REMOTE_SUBMIX = 8,
+ /**
+ * Source for unprocessed sound. Usage examples include level measurement
+ * and raw signal analysis.
+ */
+ UNPROCESSED = 9,
+ /**
+ * Source for capturing audio meant to be processed in real time and played back for live
+ * performance (e.g karaoke). The capture path will minimize latency and coupling with
+ * playback path.
+ */
+ VOICE_PERFORMANCE = 10,
+ /**
+ * Source for an echo canceller to capture the reference signal to be cancelled.
+ * The echo reference signal will be captured as close as possible to the DAC in order
+ * to include all post processing applied to the playback path.
+ */
+ ECHO_REFERENCE = 1997,
+ FM_TUNER = 1998,
+ HOTWORD = 1999,
+};
+
+typedef int32_t AudioSession;
+/**
+ * Special audio session values.
+ */
+@export(name="audio_session_t", value_prefix="AUDIO_SESSION_")
+enum AudioSessionConsts : int32_t {
+ /**
+ * Session for effects attached to a particular output stream
+ * (value must be less than 0)
+ */
+ OUTPUT_STAGE = -1,
+ /**
+ * Session for effects applied to output mix. These effects can
+ * be moved by audio policy manager to another output stream
+ * (value must be 0)
+ */
+ OUTPUT_MIX = 0,
+ /**
+ * Application does not specify an explicit session ID to be used, and
+ * requests a new session ID to be allocated. Corresponds to
+ * AudioManager.AUDIO_SESSION_ID_GENERATE and
+ * AudioSystem.AUDIO_SESSION_ALLOCATE.
+ */
+ ALLOCATE = 0,
+ /**
+ * For use with AudioRecord::start(), this indicates no trigger session.
+ * It is also used with output tracks and patch tracks, which never have a
+ * session.
+ */
+ NONE = 0
+};
+
+/**
+ * Audio format is a 32-bit word that consists of:
+ * main format field (upper 8 bits)
+ * sub format field (lower 24 bits).
+ *
+ * The main format indicates the main codec type. The sub format field indicates
+ * options and parameters for each format. The sub format is mainly used for
+ * record to indicate for instance the requested bitrate or profile. It can
+ * also be used for certain formats to give informations not present in the
+ * encoded audio stream (e.g. octet alignement for AMR).
+ */
+@export(name="audio_format_t", value_prefix="AUDIO_FORMAT_")
+enum AudioFormat : uint32_t {
+ INVALID = 0xFFFFFFFFUL,
+ DEFAULT = 0,
+ PCM = 0x00000000UL,
+ MP3 = 0x01000000UL,
+ AMR_NB = 0x02000000UL,
+ AMR_WB = 0x03000000UL,
+ AAC = 0x04000000UL,
+ /** Deprecated, Use AAC_HE_V1 */
+ HE_AAC_V1 = 0x05000000UL,
+ /** Deprecated, Use AAC_HE_V2 */
+ HE_AAC_V2 = 0x06000000UL,
+ VORBIS = 0x07000000UL,
+ OPUS = 0x08000000UL,
+ AC3 = 0x09000000UL,
+ E_AC3 = 0x0A000000UL,
+ DTS = 0x0B000000UL,
+ DTS_HD = 0x0C000000UL,
+ /** IEC61937 is encoded audio wrapped in 16-bit PCM. */
+ IEC61937 = 0x0D000000UL,
+ DOLBY_TRUEHD = 0x0E000000UL,
+ EVRC = 0x10000000UL,
+ EVRCB = 0x11000000UL,
+ EVRCWB = 0x12000000UL,
+ EVRCNW = 0x13000000UL,
+ AAC_ADIF = 0x14000000UL,
+ WMA = 0x15000000UL,
+ WMA_PRO = 0x16000000UL,
+ AMR_WB_PLUS = 0x17000000UL,
+ MP2 = 0x18000000UL,
+ QCELP = 0x19000000UL,
+ DSD = 0x1A000000UL,
+ FLAC = 0x1B000000UL,
+ ALAC = 0x1C000000UL,
+ APE = 0x1D000000UL,
+ AAC_ADTS = 0x1E000000UL,
+ SBC = 0x1F000000UL,
+ APTX = 0x20000000UL,
+ APTX_HD = 0x21000000UL,
+ AC4 = 0x22000000UL,
+ LDAC = 0x23000000UL,
+ /** Dolby Metadata-enhanced Audio Transmission */
+ MAT = 0x24000000UL,
+ AAC_LATM = 0x25000000UL,
+ CELT = 0x26000000UL,
+ APTX_ADAPTIVE = 0x27000000UL,
+ LHDC = 0x28000000UL,
+ LHDC_LL = 0x29000000UL,
+ APTX_TWSP = 0x2A000000UL,
+
+ /** Deprecated */
+ MAIN_MASK = 0xFF000000UL,
+ SUB_MASK = 0x00FFFFFFUL,
+
+ /* Subformats */
+ PCM_SUB_16_BIT = 0x1, // PCM signed 16 bits
+ PCM_SUB_8_BIT = 0x2, // PCM unsigned 8 bits
+ PCM_SUB_32_BIT = 0x3, // PCM signed .31 fixed point
+ PCM_SUB_8_24_BIT = 0x4, // PCM signed 8.23 fixed point
+ PCM_SUB_FLOAT = 0x5, // PCM single-precision float pt
+ PCM_SUB_24_BIT_PACKED = 0x6, // PCM signed .23 fix pt (3 bytes)
+
+ MP3_SUB_NONE = 0x0,
+
+ AMR_SUB_NONE = 0x0,
+
+ AAC_SUB_MAIN = 0x1,
+ AAC_SUB_LC = 0x2,
+ AAC_SUB_SSR = 0x4,
+ AAC_SUB_LTP = 0x8,
+ AAC_SUB_HE_V1 = 0x10,
+ AAC_SUB_SCALABLE = 0x20,
+ AAC_SUB_ERLC = 0x40,
+ AAC_SUB_LD = 0x80,
+ AAC_SUB_HE_V2 = 0x100,
+ AAC_SUB_ELD = 0x200,
+ AAC_SUB_XHE = 0x300,
+
+ VORBIS_SUB_NONE = 0x0,
+
+ E_AC3_SUB_JOC = 0x1,
+
+ MAT_SUB_1_0 = 0x1,
+ MAT_SUB_2_0 = 0x2,
+ MAT_SUB_2_1 = 0x3,
+
+ /* Aliases */
+ /** note != AudioFormat.ENCODING_PCM_16BIT */
+ PCM_16_BIT = (PCM | PCM_SUB_16_BIT),
+ /** note != AudioFormat.ENCODING_PCM_8BIT */
+ PCM_8_BIT = (PCM | PCM_SUB_8_BIT),
+ PCM_32_BIT = (PCM | PCM_SUB_32_BIT),
+ PCM_8_24_BIT = (PCM | PCM_SUB_8_24_BIT),
+ PCM_FLOAT = (PCM | PCM_SUB_FLOAT),
+ PCM_24_BIT_PACKED = (PCM | PCM_SUB_24_BIT_PACKED),
+ AAC_MAIN = (AAC | AAC_SUB_MAIN),
+ AAC_LC = (AAC | AAC_SUB_LC),
+ AAC_SSR = (AAC | AAC_SUB_SSR),
+ AAC_LTP = (AAC | AAC_SUB_LTP),
+ AAC_HE_V1 = (AAC | AAC_SUB_HE_V1),
+ AAC_SCALABLE = (AAC | AAC_SUB_SCALABLE),
+ AAC_ERLC = (AAC | AAC_SUB_ERLC),
+ AAC_LD = (AAC | AAC_SUB_LD),
+ AAC_HE_V2 = (AAC | AAC_SUB_HE_V2),
+ AAC_ELD = (AAC | AAC_SUB_ELD),
+ AAC_XHE = (AAC | AAC_SUB_XHE),
+ AAC_ADTS_MAIN = (AAC_ADTS | AAC_SUB_MAIN),
+ AAC_ADTS_LC = (AAC_ADTS | AAC_SUB_LC),
+ AAC_ADTS_SSR = (AAC_ADTS | AAC_SUB_SSR),
+ AAC_ADTS_LTP = (AAC_ADTS | AAC_SUB_LTP),
+ AAC_ADTS_HE_V1 = (AAC_ADTS | AAC_SUB_HE_V1),
+ AAC_ADTS_SCALABLE = (AAC_ADTS | AAC_SUB_SCALABLE),
+ AAC_ADTS_ERLC = (AAC_ADTS | AAC_SUB_ERLC),
+ AAC_ADTS_LD = (AAC_ADTS | AAC_SUB_LD),
+ AAC_ADTS_HE_V2 = (AAC_ADTS | AAC_SUB_HE_V2),
+ AAC_ADTS_ELD = (AAC_ADTS | AAC_SUB_ELD),
+ AAC_ADTS_XHE = (AAC_ADTS | AAC_SUB_XHE),
+ E_AC3_JOC = (E_AC3 | E_AC3_SUB_JOC),
+ MAT_1_0 = (MAT | MAT_SUB_1_0),
+ MAT_2_0 = (MAT | MAT_SUB_2_0),
+ MAT_2_1 = (MAT | MAT_SUB_2_1),
+ AAC_LATM_LC = (AAC_LATM | AAC_SUB_LC),
+ AAC_LATM_HE_V1 = (AAC_LATM | AAC_SUB_HE_V1),
+ AAC_LATM_HE_V2 = (AAC_LATM | AAC_SUB_HE_V2),
+};
+
+/**
+ * Usage of these values highlights places in the code that use 2- or 8- channel
+ * assumptions.
+ */
+@export(name="")
+enum FixedChannelCount : int32_t {
+ FCC_2 = 2, // This is typically due to legacy implementation of stereo I/O
+ FCC_8 = 8 // This is typically due to audio mixer and resampler limitations
+};
+
+/**
+ * A channel mask per se only defines the presence or absence of a channel, not
+ * the order.
+ *
+ * The channel order convention is that channels are interleaved in order from
+ * least significant channel mask bit to most significant channel mask bit,
+ * with unused bits skipped. For example for stereo, LEFT would be first,
+ * followed by RIGHT.
+ * Any exceptions to this convention are noted at the appropriate API.
+ *
+ * AudioChannelMask is an opaque type and its internal layout should not be
+ * assumed as it may change in the future. Instead, always use functions
+ * to examine it.
+ *
+ * These are the current representations:
+ *
+ * REPRESENTATION_POSITION
+ * is a channel mask representation for position assignment. Each low-order
+ * bit corresponds to the spatial position of a transducer (output), or
+ * interpretation of channel (input). The user of a channel mask needs to
+ * know the context of whether it is for output or input. The constants
+ * OUT_* or IN_* apply to the bits portion. It is not permitted for no bits
+ * to be set.
+ *
+ * REPRESENTATION_INDEX
+ * is a channel mask representation for index assignment. Each low-order
+ * bit corresponds to a selected channel. There is no platform
+ * interpretation of the various bits. There is no concept of output or
+ * input. It is not permitted for no bits to be set.
+ *
+ * All other representations are reserved for future use.
+ *
+ * Warning: current representation distinguishes between input and output, but
+ * this will not the be case in future revisions of the platform. Wherever there
+ * is an ambiguity between input and output that is currently resolved by
+ * checking the channel mask, the implementer should look for ways to fix it
+ * with additional information outside of the mask.
+ */
+@export(name="", value_prefix="AUDIO_CHANNEL_")
+enum AudioChannelMask : uint32_t {
+ /** must be 0 for compatibility */
+ REPRESENTATION_POSITION = 0,
+ /** 1 is reserved for future use */
+ REPRESENTATION_INDEX = 2,
+ /* 3 is reserved for future use */
+
+ /** These can be a complete value of AudioChannelMask */
+ NONE = 0x0,
+ INVALID = 0xC0000000,
+
+ /*
+ * These can be the bits portion of an AudioChannelMask
+ * with representation REPRESENTATION_POSITION.
+ */
+
+ /** output channels */
+ OUT_FRONT_LEFT = 0x1,
+ OUT_FRONT_RIGHT = 0x2,
+ OUT_FRONT_CENTER = 0x4,
+ OUT_LOW_FREQUENCY = 0x8,
+ OUT_BACK_LEFT = 0x10,
+ OUT_BACK_RIGHT = 0x20,
+ OUT_FRONT_LEFT_OF_CENTER = 0x40,
+ OUT_FRONT_RIGHT_OF_CENTER = 0x80,
+ OUT_BACK_CENTER = 0x100,
+ OUT_SIDE_LEFT = 0x200,
+ OUT_SIDE_RIGHT = 0x400,
+ OUT_TOP_CENTER = 0x800,
+ OUT_TOP_FRONT_LEFT = 0x1000,
+ OUT_TOP_FRONT_CENTER = 0x2000,
+ OUT_TOP_FRONT_RIGHT = 0x4000,
+ OUT_TOP_BACK_LEFT = 0x8000,
+ OUT_TOP_BACK_CENTER = 0x10000,
+ OUT_TOP_BACK_RIGHT = 0x20000,
+ OUT_TOP_SIDE_LEFT = 0x40000,
+ OUT_TOP_SIDE_RIGHT = 0x80000,
+
+ /**
+ * Haptic channel characteristics are specific to a device and
+ * only used to play device specific resources (eg: ringtones).
+ * The HAL can freely map A and B to haptic controllers, the
+ * framework shall not interpret those values and forward them
+ * from the device audio assets.
+ */
+ OUT_HAPTIC_A = 0x20000000,
+ OUT_HAPTIC_B = 0x10000000,
+
+ OUT_MONO = OUT_FRONT_LEFT,
+ OUT_STEREO = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT),
+ OUT_2POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_LOW_FREQUENCY),
+ OUT_2POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
+ OUT_2POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
+ OUT_LOW_FREQUENCY),
+ OUT_3POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER |
+ OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
+ OUT_3POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER |
+ OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
+ OUT_LOW_FREQUENCY),
+ OUT_QUAD = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_BACK_LEFT | OUT_BACK_RIGHT),
+ OUT_QUAD_BACK = OUT_QUAD,
+ /** like OUT_QUAD_BACK with *_SIDE_* instead of *_BACK_* */
+ OUT_QUAD_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
+ OUT_SURROUND = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_FRONT_CENTER | OUT_BACK_CENTER),
+ OUT_PENTA = (OUT_QUAD | OUT_FRONT_CENTER),
+ OUT_5POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
+ OUT_BACK_LEFT | OUT_BACK_RIGHT),
+ OUT_5POINT1_BACK = OUT_5POINT1,
+ /** like OUT_5POINT1_BACK with *_SIDE_* instead of *_BACK_* */
+ OUT_5POINT1_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
+ OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
+ OUT_5POINT1POINT2 = (OUT_5POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
+ OUT_5POINT1POINT4 = (OUT_5POINT1 |
+ OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
+ OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
+ OUT_6POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
+ OUT_BACK_LEFT | OUT_BACK_RIGHT |
+ OUT_BACK_CENTER),
+ /** matches the correct AudioFormat.CHANNEL_OUT_7POINT1_SURROUND */
+ OUT_7POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
+ OUT_BACK_LEFT | OUT_BACK_RIGHT |
+ OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
+ OUT_7POINT1POINT2 = (OUT_7POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
+ OUT_7POINT1POINT4 = (OUT_7POINT1 |
+ OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
+ OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
+ OUT_MONO_HAPTIC_A = (OUT_FRONT_LEFT | OUT_HAPTIC_A),
+ OUT_STEREO_HAPTIC_A = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_HAPTIC_A),
+ OUT_HAPTIC_AB = (OUT_HAPTIC_A | OUT_HAPTIC_B),
+ OUT_MONO_HAPTIC_AB = (OUT_FRONT_LEFT | OUT_HAPTIC_A | OUT_HAPTIC_B),
+ OUT_STEREO_HAPTIC_AB = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
+ OUT_HAPTIC_A | OUT_HAPTIC_B),
+ // Note that the 2.0 OUT_ALL* have been moved to helper functions
+
+ /* These are bits only, not complete values */
+
+ /** input channels */
+ IN_LEFT = 0x4,
+ IN_RIGHT = 0x8,
+ IN_FRONT = 0x10,
+ IN_BACK = 0x20,
+ IN_LEFT_PROCESSED = 0x40,
+ IN_RIGHT_PROCESSED = 0x80,
+ IN_FRONT_PROCESSED = 0x100,
+ IN_BACK_PROCESSED = 0x200,
+ IN_PRESSURE = 0x400,
+ IN_X_AXIS = 0x800,
+ IN_Y_AXIS = 0x1000,
+ IN_Z_AXIS = 0x2000,
+ IN_BACK_LEFT = 0x10000,
+ IN_BACK_RIGHT = 0x20000,
+ IN_CENTER = 0x40000,
+ IN_LOW_FREQUENCY = 0x100000,
+ IN_TOP_LEFT = 0x200000,
+ IN_TOP_RIGHT = 0x400000,
+
+ IN_VOICE_UPLINK = 0x4000,
+ IN_VOICE_DNLINK = 0x8000,
+
+ IN_MONO = IN_FRONT,
+ IN_STEREO = (IN_LEFT | IN_RIGHT),
+ IN_FRONT_BACK = (IN_FRONT | IN_BACK),
+ IN_6 = (IN_LEFT | IN_RIGHT |
+ IN_FRONT | IN_BACK |
+ IN_LEFT_PROCESSED | IN_RIGHT_PROCESSED),
+ IN_2POINT0POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
+ IN_2POINT1POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT |
+ IN_LOW_FREQUENCY),
+ IN_3POINT0POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
+ IN_3POINT1POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT |
+ IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY),
+ IN_5POINT1 = (IN_LEFT | IN_CENTER | IN_RIGHT |
+ IN_BACK_LEFT | IN_BACK_RIGHT | IN_LOW_FREQUENCY),
+ IN_VOICE_UPLINK_MONO = (IN_VOICE_UPLINK | IN_MONO),
+ IN_VOICE_DNLINK_MONO = (IN_VOICE_DNLINK | IN_MONO),
+ IN_VOICE_CALL_MONO = (IN_VOICE_UPLINK_MONO |
+ IN_VOICE_DNLINK_MONO),
+ // Note that the 2.0 IN_ALL* have been moved to helper functions
+
+ COUNT_MAX = 30,
+ INDEX_HDR = REPRESENTATION_INDEX << COUNT_MAX,
+ INDEX_MASK_1 = INDEX_HDR | ((1 << 1) - 1),
+ INDEX_MASK_2 = INDEX_HDR | ((1 << 2) - 1),
+ INDEX_MASK_3 = INDEX_HDR | ((1 << 3) - 1),
+ INDEX_MASK_4 = INDEX_HDR | ((1 << 4) - 1),
+ INDEX_MASK_5 = INDEX_HDR | ((1 << 5) - 1),
+ INDEX_MASK_6 = INDEX_HDR | ((1 << 6) - 1),
+ INDEX_MASK_7 = INDEX_HDR | ((1 << 7) - 1),
+ INDEX_MASK_8 = INDEX_HDR | ((1 << 8) - 1),
+ INDEX_MASK_9 = INDEX_HDR | ((1 << 9) - 1),
+ INDEX_MASK_10 = INDEX_HDR | ((1 << 10) - 1),
+ INDEX_MASK_11 = INDEX_HDR | ((1 << 11) - 1),
+ INDEX_MASK_12 = INDEX_HDR | ((1 << 12) - 1),
+ INDEX_MASK_13 = INDEX_HDR | ((1 << 13) - 1),
+ INDEX_MASK_14 = INDEX_HDR | ((1 << 14) - 1),
+ INDEX_MASK_15 = INDEX_HDR | ((1 << 15) - 1),
+ INDEX_MASK_16 = INDEX_HDR | ((1 << 16) - 1),
+ INDEX_MASK_17 = INDEX_HDR | ((1 << 17) - 1),
+ INDEX_MASK_18 = INDEX_HDR | ((1 << 18) - 1),
+ INDEX_MASK_19 = INDEX_HDR | ((1 << 19) - 1),
+ INDEX_MASK_20 = INDEX_HDR | ((1 << 20) - 1),
+ INDEX_MASK_21 = INDEX_HDR | ((1 << 21) - 1),
+ INDEX_MASK_22 = INDEX_HDR | ((1 << 22) - 1),
+ INDEX_MASK_23 = INDEX_HDR | ((1 << 23) - 1),
+ INDEX_MASK_24 = INDEX_HDR | ((1 << 24) - 1),
+};
+
+/**
+ * Major modes for a mobile device. The current mode setting affects audio
+ * routing.
+ */
+@export(name="audio_mode_t", value_prefix="AUDIO_MODE_")
+enum AudioMode : int32_t {
+ NORMAL = 0,
+ RINGTONE = 1,
+ /** Calls handled by the telephony stack (Eg: PSTN). */
+ IN_CALL = 2,
+ /** Calls handled by apps (Eg: Hangout). */
+ IN_COMMUNICATION = 3,
+};
+
+@export(name="", value_prefix="AUDIO_DEVICE_")
+enum AudioDevice : uint32_t {
+ NONE = 0x0,
+ /** reserved bits */
+ BIT_IN = 0x80000000,
+ BIT_DEFAULT = 0x40000000,
+ /** output devices */
+ OUT_EARPIECE = 0x1,
+ OUT_SPEAKER = 0x2,
+ OUT_WIRED_HEADSET = 0x4,
+ OUT_WIRED_HEADPHONE = 0x8,
+ OUT_BLUETOOTH_SCO = 0x10,
+ OUT_BLUETOOTH_SCO_HEADSET = 0x20,
+ OUT_BLUETOOTH_SCO_CARKIT = 0x40,
+ OUT_BLUETOOTH_A2DP = 0x80,
+ OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
+ OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
+ OUT_AUX_DIGITAL = 0x400,
+ OUT_HDMI = OUT_AUX_DIGITAL,
+ /** uses an analog connection (multiplexed over the USB pins for instance) */
+ OUT_ANLG_DOCK_HEADSET = 0x800,
+ OUT_DGTL_DOCK_HEADSET = 0x1000,
+ /** USB accessory mode: Android device is USB device and dock is USB host */
+ OUT_USB_ACCESSORY = 0x2000,
+ /** USB host mode: Android device is USB host and dock is USB device */
+ OUT_USB_DEVICE = 0x4000,
+ OUT_REMOTE_SUBMIX = 0x8000,
+ /** Telephony voice TX path */
+ OUT_TELEPHONY_TX = 0x10000,
+ /** Analog jack with line impedance detected */
+ OUT_LINE = 0x20000,
+ /** HDMI Audio Return Channel */
+ OUT_HDMI_ARC = 0x40000,
+ /** S/PDIF out */
+ OUT_SPDIF = 0x80000,
+ /** FM transmitter out */
+ OUT_FM = 0x100000,
+ /** Line out for av devices */
+ OUT_AUX_LINE = 0x200000,
+ /** limited-output speaker device for acoustic safety */
+ OUT_SPEAKER_SAFE = 0x400000,
+ OUT_IP = 0x800000,
+ /** audio bus implemented by the audio system (e.g an MOST stereo channel) */
+ OUT_BUS = 0x1000000,
+ OUT_PROXY = 0x2000000,
+ OUT_USB_HEADSET = 0x4000000,
+ OUT_HEARING_AID = 0x8000000,
+ OUT_ECHO_CANCELLER = 0x10000000,
+ OUT_DEFAULT = BIT_DEFAULT,
+ // Note that the 2.0 OUT_ALL* have been moved to helper functions
+
+ /** input devices */
+ IN_COMMUNICATION = BIT_IN | 0x1,
+ IN_AMBIENT = BIT_IN | 0x2,
+ IN_BUILTIN_MIC = BIT_IN | 0x4,
+ IN_BLUETOOTH_SCO_HEADSET = BIT_IN | 0x8,
+ IN_WIRED_HEADSET = BIT_IN | 0x10,
+ IN_AUX_DIGITAL = BIT_IN | 0x20,
+ IN_HDMI = IN_AUX_DIGITAL,
+ /** Telephony voice RX path */
+ IN_VOICE_CALL = BIT_IN | 0x40,
+ IN_TELEPHONY_RX = IN_VOICE_CALL,
+ IN_BACK_MIC = BIT_IN | 0x80,
+ IN_REMOTE_SUBMIX = BIT_IN | 0x100,
+ IN_ANLG_DOCK_HEADSET = BIT_IN | 0x200,
+ IN_DGTL_DOCK_HEADSET = BIT_IN | 0x400,
+ IN_USB_ACCESSORY = BIT_IN | 0x800,
+ IN_USB_DEVICE = BIT_IN | 0x1000,
+ /** FM tuner input */
+ IN_FM_TUNER = BIT_IN | 0x2000,
+ /** TV tuner input */
+ IN_TV_TUNER = BIT_IN | 0x4000,
+ /** Analog jack with line impedance detected */
+ IN_LINE = BIT_IN | 0x8000,
+ /** S/PDIF in */
+ IN_SPDIF = BIT_IN | 0x10000,
+ IN_BLUETOOTH_A2DP = BIT_IN | 0x20000,
+ IN_LOOPBACK = BIT_IN | 0x40000,
+ IN_IP = BIT_IN | 0x80000,
+ /** audio bus implemented by the audio system (e.g an MOST stereo channel) */
+ IN_BUS = BIT_IN | 0x100000,
+ IN_PROXY = BIT_IN | 0x1000000,
+ IN_USB_HEADSET = BIT_IN | 0x2000000,
+ IN_BLUETOOTH_BLE = BIT_IN | 0x4000000,
+ IN_ECHO_REFERENCE = BIT_IN | 0x10000000,
+ IN_DEFAULT = BIT_IN | BIT_DEFAULT,
+
+ // Note that the 2.0 IN_ALL* have been moved to helper functions
+};
+
+/**
+ * IEEE 802 MAC address.
+ */
+typedef uint8_t[6] MacAddress;
+
+/**
+ * Specifies a device address in case when several devices of the same type
+ * can be connected (e.g. BT A2DP, USB).
+ */
+struct DeviceAddress {
+ AudioDevice device; // discriminator
+ union Address {
+ MacAddress mac; // used for BLUETOOTH_A2DP_*
+ uint8_t[4] ipv4; // used for IP
+ struct Alsa {
+ int32_t card;
+ int32_t device;
+ } alsa; // used for USB_*
+ } address;
+ /** Arbitrary BUS device unique address. Should not be interpreted by the framework. */
+ string busAddress;
+ /** Arbitrary REMOTE_SUBMIX device unique address. Should not be interpreted by the HAL. */
+ string rSubmixAddress;
+};
+
+/**
+ * The audio output flags serve two purposes:
+ *
+ * - when an AudioTrack is created they indicate a "wish" to be connected to an
+ * output stream with attributes corresponding to the specified flags;
+ *
+ * - when present in an output profile descriptor listed for a particular audio
+ * hardware module, they indicate that an output stream can be opened that
+ * supports the attributes indicated by the flags.
+ *
+ * The audio policy manager will try to match the flags in the request
+ * (when getOuput() is called) to an available output stream.
+ */
+@export(name="audio_output_flags_t", value_prefix="AUDIO_OUTPUT_FLAG_")
+enum AudioOutputFlag : int32_t {
+ NONE = 0x0, // no attributes
+ DIRECT = 0x1, // this output directly connects a track
+ // to one output stream: no software mixer
+ PRIMARY = 0x2, // this output is the primary output of the device. It is
+ // unique and must be present. It is opened by default and
+ // receives routing, audio mode and volume controls related
+ // to voice calls.
+ FAST = 0x4, // output supports "fast tracks", defined elsewhere
+ DEEP_BUFFER = 0x8, // use deep audio buffers
+ COMPRESS_OFFLOAD = 0x10, // offload playback of compressed streams to
+ // hardware codec
+ NON_BLOCKING = 0x20, // use non-blocking write
+ HW_AV_SYNC = 0x40, // output uses a hardware A/V sync
+ TTS = 0x80, // output for streams transmitted through speaker at a
+ // sample rate high enough to accommodate lower-range
+ // ultrasonic p/b
+ RAW = 0x100, // minimize signal processing
+ SYNC = 0x200, // synchronize I/O streams
+ IEC958_NONAUDIO = 0x400, // Audio stream contains compressed audio in SPDIF
+ // data bursts, not PCM.
+ DIRECT_PCM = 0x2000, // Audio stream containing PCM data that needs
+ // to pass through compress path for DSP post proc.
+ MMAP_NOIRQ = 0x4000, // output operates in MMAP no IRQ mode.
+ VOIP_RX = 0x8000, // preferred output for VoIP calls.
+ /** preferred output for call music */
+ INCALL_MUSIC = 0x10000,
+};
+
+/**
+ * The audio input flags are analogous to audio output flags.
+ * Currently they are used only when an AudioRecord is created,
+ * to indicate a preference to be connected to an input stream with
+ * attributes corresponding to the specified flags.
+ */
+@export(name="audio_input_flags_t", value_prefix="AUDIO_INPUT_FLAG_")
+enum AudioInputFlag : int32_t {
+ NONE = 0x0, // no attributes
+ FAST = 0x1, // prefer an input that supports "fast tracks"
+ HW_HOTWORD = 0x2, // prefer an input that captures from hw hotword source
+ RAW = 0x4, // minimize signal processing
+ SYNC = 0x8, // synchronize I/O streams
+ MMAP_NOIRQ = 0x10, // input operates in MMAP no IRQ mode.
+ VOIP_TX = 0x20, // preferred input for VoIP calls.
+ HW_AV_SYNC = 0x40, // input connected to an output that uses a hardware A/V sync
+};
+
+@export(name="audio_usage_t", value_prefix="AUDIO_USAGE_")
+enum AudioUsage : int32_t {
+ // These values must kept in sync with
+ // frameworks/base/media/java/android/media/AudioAttributes.java
+ // Note that not all framework values are exposed
+ UNKNOWN = 0,
+ MEDIA = 1,
+ VOICE_COMMUNICATION = 2,
+ VOICE_COMMUNICATION_SIGNALLING = 3,
+ ALARM = 4,
+ NOTIFICATION = 5,
+ NOTIFICATION_TELEPHONY_RINGTONE = 6,
+ ASSISTANCE_ACCESSIBILITY = 11,
+ ASSISTANCE_NAVIGATION_GUIDANCE = 12,
+ ASSISTANCE_SONIFICATION = 13,
+ GAME = 14,
+ VIRTUAL_SOURCE = 15,
+ ASSISTANT = 16,
+};
+
+/** Type of audio generated by an application. */
+@export(name="audio_content_type_t", value_prefix="AUDIO_CONTENT_TYPE_")
+enum AudioContentType : uint32_t {
+ // Do not change these values without updating their counterparts
+ // in frameworks/base/media/java/android/media/AudioAttributes.java
+ UNKNOWN = 0,
+ SPEECH = 1,
+ MUSIC = 2,
+ MOVIE = 3,
+ SONIFICATION = 4,
+};
+
+/**
+ * Additional information about the stream passed to hardware decoders.
+ */
+struct AudioOffloadInfo {
+ uint32_t sampleRateHz;
+ bitfield<AudioChannelMask> channelMask;
+ AudioFormat format;
+ AudioStreamType streamType;
+ uint32_t bitRatePerSecond;
+ int64_t durationMicroseconds; // -1 if unknown
+ bool hasVideo;
+ bool isStreaming;
+ uint32_t bitWidth;
+ uint32_t bufferSize;
+ AudioUsage usage;
+};
+
+/**
+ * Commonly used audio stream configuration parameters.
+ */
+struct AudioConfig {
+ uint32_t sampleRateHz;
+ bitfield<AudioChannelMask> channelMask;
+ AudioFormat format;
+ AudioOffloadInfo offloadInfo;
+ uint64_t frameCount;
+};
+
+/** Metadata of a playback track for a StreamOut. */
+struct PlaybackTrackMetadata {
+ AudioUsage usage;
+ AudioContentType contentType;
+ /**
+ * Positive linear gain applied to the track samples. 0 being muted and 1 is no attenuation,
+ * 2 means double amplification...
+ * Must not be negative.
+ */
+ float gain;
+};
+
+/** Metadatas of the source of a StreamOut. */
+struct SourceMetadata {
+ vec<PlaybackTrackMetadata> tracks;
+};
+
+/** Metadata of a record track for a StreamIn. */
+struct RecordTrackMetadata {
+ AudioSource source;
+ /**
+ * Positive linear gain applied to the track samples. 0 being muted and 1 is no attenuation,
+ * 2 means double amplification...
+ * Must not be negative.
+ */
+ float gain;
+ /**
+ * Indicates the destination of an input stream, can be left unspecified.
+ */
+ safe_union Destination {
+ Monostate unspecified;
+ DeviceAddress device;
+ };
+ Destination destination;
+};
+
+/** Metadatas of the sink of a StreamIn. */
+struct SinkMetadata {
+ vec<RecordTrackMetadata> tracks;
+};
+
+
+/*
+ *
+ * Volume control
+ *
+ */
+
+/**
+ * Type of gain control exposed by an audio port.
+ */
+@export(name="", value_prefix="AUDIO_GAIN_MODE_")
+enum AudioGainMode : uint32_t {
+ JOINT = 0x1, // supports joint channel gain control
+ CHANNELS = 0x2, // supports separate channel gain control
+ RAMP = 0x4 // supports gain ramps
+};
+
+/**
+ * An audio_gain struct is a representation of a gain stage.
+ * A gain stage is always attached to an audio port.
+ */
+struct AudioGain {
+ bitfield<AudioGainMode> mode;
+ bitfield<AudioChannelMask> channelMask; // channels which gain an be controlled
+ int32_t minValue; // minimum gain value in millibels
+ int32_t maxValue; // maximum gain value in millibels
+ int32_t defaultValue; // default gain value in millibels
+ uint32_t stepValue; // gain step in millibels
+ uint32_t minRampMs; // minimum ramp duration in ms
+ uint32_t maxRampMs; // maximum ramp duration in ms
+};
+
+/**
+ * The gain configuration structure is used to get or set the gain values of a
+ * given port.
+ */
+struct AudioGainConfig {
+ int32_t index; // index of the corresponding AudioGain in AudioPort.gains
+ AudioGainMode mode;
+ AudioChannelMask channelMask; // channels which gain value follows
+ /**
+ * 4 = sizeof(AudioChannelMask),
+ * 8 is not "FCC_8", so it won't need to be changed for > 8 channels.
+ * Gain values in millibels for each channel ordered from LSb to MSb in
+ * channel mask. The number of values is 1 in joint mode or
+ * popcount(channel_mask).
+ */
+ int32_t[4 * 8] values;
+ uint32_t rampDurationMs; // ramp duration in ms
+};
+
+
+/*
+ *
+ * Routing control
+ *
+ */
+
+/*
+ * Types defined here are used to describe an audio source or sink at internal
+ * framework interfaces (audio policy, patch panel) or at the audio HAL.
+ * Sink and sources are grouped in a concept of “audio port” representing an
+ * audio end point at the edge of the system managed by the module exposing
+ * the interface.
+ */
+
+/** Audio port role: either source or sink */
+@export(name="audio_port_role_t", value_prefix="AUDIO_PORT_ROLE_")
+enum AudioPortRole : int32_t {
+ NONE,
+ SOURCE,
+ SINK,
+};
+
+/**
+ * Audio port type indicates if it is a session (e.g AudioTrack), a mix (e.g
+ * PlaybackThread output) or a physical device (e.g OUT_SPEAKER)
+ */
+@export(name="audio_port_type_t", value_prefix="AUDIO_PORT_TYPE_")
+enum AudioPortType : int32_t {
+ NONE,
+ DEVICE,
+ MIX,
+ SESSION,
+};
+
+/**
+ * Extension for audio port configuration structure when the audio port is a
+ * hardware device.
+ */
+struct AudioPortConfigDeviceExt {
+ AudioModuleHandle hwModule; // module the device is attached to
+ AudioDevice type; // device type (e.g OUT_SPEAKER)
+ uint8_t[32] address; // device address. "" if N/A
+};
+
+/**
+ * Extension for audio port configuration structure when the audio port is an
+ * audio session.
+ */
+struct AudioPortConfigSessionExt {
+ AudioSession session;
+};
+
+/**
+ * Flags indicating which fields are to be considered in AudioPortConfig.
+ */
+@export(name="", value_prefix="AUDIO_PORT_CONFIG_")
+enum AudioPortConfigMask : uint32_t {
+ SAMPLE_RATE = 0x1,
+ CHANNEL_MASK = 0x2,
+ FORMAT = 0x4,
+ GAIN = 0x8,
+};
+
+/**
+ * Audio port configuration structure used to specify a particular configuration
+ * of an audio port.
+ */
+struct AudioPortConfig {
+ AudioPortHandle id;
+ bitfield<AudioPortConfigMask> configMask;
+ uint32_t sampleRateHz;
+ bitfield<AudioChannelMask> channelMask;
+ AudioFormat format;
+ AudioGainConfig gain;
+ AudioPortType type; // type is used as a discriminator for Ext union
+ AudioPortRole role; // role is used as a discriminator for UseCase union
+ union Ext {
+ AudioPortConfigDeviceExt device;
+ struct AudioPortConfigMixExt {
+ AudioModuleHandle hwModule; // module the stream is attached to
+ AudioIoHandle ioHandle; // I/O handle of the input/output stream
+ union UseCase {
+ AudioStreamType stream;
+ AudioSource source;
+ } useCase;
+ } mix;
+ AudioPortConfigSessionExt session;
+ } ext;
+};
+
+/**
+ * Extension for audio port structure when the audio port is a hardware device.
+ */
+struct AudioPortDeviceExt {
+ AudioModuleHandle hwModule; // module the device is attached to
+ AudioDevice type;
+ /** 32 byte string identifying the port. */
+ uint8_t[32] address;
+};
+
+/**
+ * Latency class of the audio mix.
+ */
+@export(name="audio_mix_latency_class_t", value_prefix="AUDIO_LATENCY_")
+enum AudioMixLatencyClass : int32_t {
+ LOW,
+ NORMAL
+};
+
+struct AudioPortMixExt {
+ AudioModuleHandle hwModule; // module the stream is attached to
+ AudioIoHandle ioHandle; // I/O handle of the stream
+ AudioMixLatencyClass latencyClass;
+};
+
+/**
+ * Extension for audio port structure when the audio port is an audio session.
+ */
+struct AudioPortSessionExt {
+ AudioSession session;
+};
+
+struct AudioPort {
+ AudioPortHandle id;
+ AudioPortRole role;
+ string name;
+ vec<uint32_t> sampleRates;
+ vec<bitfield<AudioChannelMask>> channelMasks;
+ vec<AudioFormat> formats;
+ vec<AudioGain> gains;
+ AudioPortConfig activeConfig; // current audio port configuration
+ AudioPortType type; // type is used as a discriminator
+ union Ext {
+ AudioPortDeviceExt device;
+ AudioPortMixExt mix;
+ AudioPortSessionExt session;
+ } ext;
+};
+
+struct ThreadInfo {
+ int64_t pid;
+ int64_t tid;
+};
diff --git a/audio/common/all-versions/copyHAL.sh b/audio/common/all-versions/copyHAL.sh
new file mode 100755
index 0000000..d07012f
--- /dev/null
+++ b/audio/common/all-versions/copyHAL.sh
@@ -0,0 +1,204 @@
+#/usr/bin/env bash
+
+set -euo pipefail
+
+if (echo "$@" |grep -qe -h); then
+ echo "This script will generate a new HAL version identical to an other one."
+ echo "This helps creating the boilerplate for a new version."
+ echo
+ echo "USAGE: $0 [BASE_VERSION] [NEW_VERSION]"
+ echo " BASE_VERSION default value is the highest version currently existing"
+ echo " NEW_VERSION default value is BASE_VERSION + 1"
+ echo
+ echo "Example: to generate a V6.0 by copying V5, do: $0 5.0 6.0"
+ exit
+fi
+readonly HAL_DIRECTORY=hardware/interfaces/audio
+readonly HAL_VTS_DIRECTORY=core/all-versions/vts/functional
+readonly HAL_VTS_FILE=AudioPrimaryHidlHalTest.cpp
+readonly HAL_SERVICE_DIRECTORY=common/all-versions/default/service/
+readonly HAL_SERVICE_CPP=service.cpp
+
+readonly FWK_DIRECTORY=frameworks/av/media/libaudiohal
+readonly IMPL_DIRECTORY=impl
+readonly IMPL_FACTORYHAL=$IMPL_DIRECTORY/include/libaudiohal/FactoryHalHidl.h
+
+readonly VTS_DIRECTORY=test/vts-testcase/hal/audio
+readonly VTS_LIST=test/vts/tools/build/tasks/list/vts_test_lib_hidl_package_list.mk
+readonly WATCHDOG=frameworks/base/services/core/java/com/android/server/Watchdog.cpp
+readonly GSI_CURRENT=build/make/target/product/gsi/current.txt
+
+readonly BASE_VERSION=${1:-$(ls $ANDROID_BUILD_TOP/$HAL_DIRECTORY | grep -E '[0-9]+\.[0-9]+' |
+ sort -n |tail -n1)}
+readonly BASE_MAJOR_VERSION=${BASE_VERSION%.*}
+readonly BASE_MINOR_VERSION=${BASE_VERSION##*.}
+
+readonly NEW_VERSION="${2:-$((${BASE_MAJOR_VERSION} + 1)).0}"
+readonly NEW_MAJOR_VERSION=${NEW_VERSION%.*}
+readonly NEW_MINOR_VERSION=${NEW_VERSION##*.}
+
+
+readonly BASE_VERSION_REGEX="${BASE_MAJOR_VERSION}[._]${BASE_MINOR_VERSION}"
+readonly NEW_VERSION_REGEX="${NEW_MAJOR_VERSION}[._]${NEW_MINOR_VERSION}"
+
+readonly BASE_VERSION_ESCAPE="${BASE_MAJOR_VERSION}\.${BASE_MINOR_VERSION}"
+readonly BASE_VERSION_UNDERSCORE="${BASE_MAJOR_VERSION}_${BASE_MINOR_VERSION}"
+readonly NEW_VERSION_UNDERSCORE="${NEW_MAJOR_VERSION}_${NEW_MINOR_VERSION}"
+updateVersion() {
+ if [ $1 == "-e" ]; then
+ local -r REGEX="$2"; shift 2
+ else
+ local -r REGEX="$BASE_VERSION_REGEX"
+ fi
+ awk -i inplace -e "{if (!/$REGEX/) print; else {
+ if (original_before) print
+ if (original_after) original_line=\$0;
+
+ gsub(/$BASE_VERSION_ESCAPE/,\"$NEW_VERSION\");
+ gsub(/$BASE_VERSION_UNDERSCORE/,\"$NEW_VERSION_UNDERSCORE\");
+ gsub(/MAJOR_VERSION=$BASE_MAJOR_VERSION/,
+ \"MAJOR_VERSION=$NEW_MAJOR_VERSION\");
+ gsub(/MINOR_VERSION=$BASE_MINOR_VERSION/,
+ \"MINOR_VERSION=$NEW_MINOR_VERSION\");
+
+ print
+ if (original_after) print original_line
+ }}" "$@"
+}
+
+updateAudioVersion() {
+ updateVersion -e "audio.*$BASE_VERSION_REGEX" "$@"
+}
+
+updateLicenceDates() {
+ # Update date on the 2 first lines
+ sed -i "1,2 s/20[0-9][0-9]/$(date +"%Y")/g" "$@"
+}
+
+echo "Creating new audio HAL V$NEW_VERSION based on V$BASE_VERSION"
+echo "Press Ctrl-C to cancel, Enter to continue"
+read
+
+MODIFIED=
+runIfNeeded() {
+ local -r TARGET=$1; shift
+ cd $ANDROID_BUILD_TOP/$TARGET
+ if grep -q -r "audio.*$NEW_VERSION_REGEX"; then
+ echo " Skipping $TARGET as already up to date"
+ else
+ echo " Updating $PWD"
+ MODIFIED+=$'\n - '$TARGET
+ "$@"
+ fi
+}
+
+createHALVersion() {
+ local -r DIRS=". common effect"
+ local COPY=
+ echo "Copy $BASE_VERSION to $NEW_VERSION in $DIRS"
+ for DIR in $DIRS; do
+ cp -Tar $DIR/$BASE_VERSION $DIR/$NEW_VERSION
+ COPY+=" $DIR/$NEW_VERSION"
+ done
+
+ echo "Replacing $BASE_VERSION by $NEW_VERSION in the copied files"
+ updateVersion $(find $COPY -type f)
+ updateLicenceDates $(find $COPY -type f)
+
+ echo "Update implementation and VTS generic code"
+ local -r FILES="*/all-versions/default/Android.bp */all-versions/vts/functional/Android.bp"
+ updateVersion -v original_before=1 -v RS= -v ORS='\n\n' $FILES
+ sed -i '${/^$/d}' $FILES # Remove \n at the end of the files
+
+ updateVersion -v original_before=1 $HAL_SERVICE_DIRECTORY/Android.bp
+
+ updateVersion -e "audio::.*$BASE_VERSION_REGEX" -v original_after=1 \
+ $HAL_SERVICE_DIRECTORY/$HAL_SERVICE_CPP
+ updateVersion -e "audio\/.*$BASE_VERSION_REGEX" -v original_before=1 \
+ $HAL_SERVICE_DIRECTORY/$HAL_SERVICE_CPP
+
+ local -r HAL_VTS_PATH=$HAL_VTS_DIRECTORY/$NEW_VERSION/$HAL_VTS_FILE
+ mkdir -p $(dirname $HAL_VTS_PATH)
+ cat > $HAL_VTS_PATH <<EOF
+/*
+ * Copyright (C) $(date +"%Y") The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// pull in all the <= $BASE_VERSION tests
+#include "$BASE_VERSION/$(basename AudioPrimaryHidlHalTest.cpp)"
+EOF
+
+ echo "New HAL version $NEW_VERSION successfully created"
+}
+
+echo "Creating new audio HAL definition, default impl and VTS"
+runIfNeeded $HAL_DIRECTORY createHALVersion
+
+
+createFrameworkAdapter() {
+ updateVersion -v original_before=1 Android.bp
+ updateVersion -v original_before=1 -v RS= -v ORS='\n\n' $IMPL_FACTORYHAL/Android.bp
+ updateVersion -v original_after=1 $IMPL_FACTORYHAL
+}
+echo "Now creating the framework adapter version"
+runIfNeeded $FWK_DIRECTORY createFrameworkAdapter
+
+createVTSXML() {
+ cp -Tar V$BASE_VERSION_UNDERSCORE V$NEW_VERSION_UNDERSCORE
+ cp -Tar effect/{V$BASE_VERSION_UNDERSCORE,V$NEW_VERSION_UNDERSCORE}
+ local -r FILES=$(find {.,effect}/V$NEW_VERSION_UNDERSCORE -type f)
+ updateVersion $FILES
+ updateLicenceDates $FILES
+}
+echo "Now update VTS XML"
+runIfNeeded $VTS_DIRECTORY createVTSXML
+
+echo "Now register new VTS"
+runIfNeeded $(dirname $VTS_LIST) updateAudioVersion -v original_before=1 $(basename $VTS_LIST)
+
+echo "Now update watchdog"
+runIfNeeded $(dirname $WATCHDOG) updateAudioVersion -v original_before=1 $(basename $WATCHDOG)
+
+echo "Now update GSI current.txt"
+runIfNeeded $(dirname $GSI_CURRENT) update-vndk-list.sh
+
+if ! [ "$MODIFIED" ]; then
+ echo
+ echo "$NEW_VERSION already exist, nothing to do"
+ exit
+fi
+
+cat << EOF
+
+All File generated successfully. Please submit a patch in all those directories: $MODIFIED
+
+-----------------------------------------------------------
+WHAT WAS *NOT* DONE, and you need to do now:
+ 1) You need to choose if the new HAL is optional or not for new devices.
+ Either add or replace $BASE_VERSION by $NEW_VERSION in
+ compatibility_matrices/compatibility_matrix.current.xml
+ Do not forget to update both the "audio" and "audio.effects" HAL'
+
+ 2) Then you need to choose a device to update its audio HAL implementation:
+ a) Update the HAL manifest of your device: open your device manifest.xml
+ and replace $BASE_VERSION by $NEW_VERSION for both
+ - android.hardware.audio
+ - android.hardware.audio.effect
+ b) Go to your device device.mk (usually next to the manifest) and replace:
+ - android.hardware.audio@$BASE_VERSION-impl by
+ android.hardware.audio@$NEW_VERSION-impl
+ - android.hardware.audio.effect@$BASE_VERSION-impl by
+ android.hardware.audio.effect@$NEW_VERSION-impl
+EOF
diff --git a/audio/common/all-versions/default/Android.bp b/audio/common/all-versions/default/Android.bp
index c0bd34c..0eb4a71 100644
--- a/audio/common/all-versions/default/Android.bp
+++ b/audio/common/all-versions/default/Android.bp
@@ -69,7 +69,6 @@
cc_library_shared {
name: "android.hardware.audio.common@2.0-util",
defaults: ["android.hardware.audio.common-util_default"],
-
shared_libs: [
"android.hardware.audio.common@2.0",
],
@@ -83,7 +82,6 @@
cc_library_shared {
name: "android.hardware.audio.common@4.0-util",
defaults: ["android.hardware.audio.common-util_default"],
-
shared_libs: [
"android.hardware.audio.common@4.0",
],
@@ -97,7 +95,6 @@
cc_library_shared {
name: "android.hardware.audio.common@5.0-util",
defaults: ["android.hardware.audio.common-util_default"],
-
shared_libs: [
"android.hardware.audio.common@5.0",
],
@@ -107,3 +104,16 @@
"-include common/all-versions/VersionMacro.h",
]
}
+
+cc_library_shared {
+ name: "android.hardware.audio.common@6.0-util",
+ defaults: ["android.hardware.audio.common-util_default"],
+ shared_libs: [
+ "android.hardware.audio.common@6.0",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=6",
+ "-DMINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
diff --git a/audio/common/all-versions/default/service/Android.bp b/audio/common/all-versions/default/service/Android.bp
new file mode 100644
index 0000000..4565730
--- /dev/null
+++ b/audio/common/all-versions/default/service/Android.bp
@@ -0,0 +1,51 @@
+cc_binary {
+ name: "android.hardware.audio.service",
+
+ init_rc: ["android.hardware.audio.service.rc"],
+ relative_install_path: "hw",
+ vendor: true,
+ // Only support 32 bit as the binary must always be installed at the same
+ // location for init to start it and the build system does not support
+ // having two binaries installable to the same location even if they are
+ // not installed in the same build.
+ compile_multilib: "32",
+ srcs: ["service.cpp"],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+
+ shared_libs: [
+ "libcutils",
+ "libbinder",
+ "libhidlbase",
+ "liblog",
+ "libutils",
+ "libhardware",
+ "android.hardware.audio@2.0",
+ "android.hardware.audio@4.0",
+ "android.hardware.audio@5.0",
+ "android.hardware.audio@6.0",
+ "android.hardware.audio.common@2.0",
+ "android.hardware.audio.common@4.0",
+ "android.hardware.audio.common@5.0",
+ "android.hardware.audio.common@6.0",
+ "android.hardware.audio.effect@2.0",
+ "android.hardware.audio.effect@4.0",
+ "android.hardware.audio.effect@5.0",
+ "android.hardware.audio.effect@6.0",
+ "android.hardware.bluetooth.a2dp@1.0",
+ "android.hardware.bluetooth.audio@2.0",
+ "android.hardware.soundtrigger@2.0",
+ "android.hardware.soundtrigger@2.1",
+ "android.hardware.soundtrigger@2.2",
+ ],
+}
+
+// Legacy service name, use android.hardware.audio.service instead
+phony {
+ name: "android.hardware.audio@2.0-service",
+ required: ["android.hardware.audio.service"],
+}
diff --git a/audio/common/all-versions/default/service/Android.mk b/audio/common/all-versions/default/service/Android.mk
deleted file mode 100644
index 236f1fd..0000000
--- a/audio/common/all-versions/default/service/Android.mk
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-LOCAL_PATH := $(call my-dir)
-
-#
-# Service
-#
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := android.hardware.audio@2.0-service
-LOCAL_INIT_RC := android.hardware.audio@2.0-service.rc
-LOCAL_MODULE_RELATIVE_PATH := hw
-LOCAL_PROPRIETARY_MODULE := true
-LOCAL_SRC_FILES := \
- service.cpp
-
-LOCAL_CFLAGS := -Wall -Werror
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libbinder \
- libhidlbase \
- liblog \
- libutils \
- libhardware \
- android.hardware.audio@2.0 \
- android.hardware.audio@4.0 \
- android.hardware.audio@5.0 \
- android.hardware.audio.common@2.0 \
- android.hardware.audio.common@4.0 \
- android.hardware.audio.common@5.0 \
- android.hardware.audio.effect@2.0 \
- android.hardware.audio.effect@4.0 \
- android.hardware.audio.effect@5.0 \
- android.hardware.bluetooth.a2dp@1.0 \
- android.hardware.bluetooth.audio@2.0 \
- android.hardware.soundtrigger@2.0 \
- android.hardware.soundtrigger@2.1 \
- android.hardware.soundtrigger@2.2
-
-# Can not switch to Android.bp until AUDIOSERVER_MULTILIB
-# is deprecated as build config variable are not supported
-ifeq ($(strip $(AUDIOSERVER_MULTILIB)),)
-LOCAL_MULTILIB := 32
-else
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-endif
-
-include $(BUILD_EXECUTABLE)
diff --git a/audio/common/all-versions/default/service/android.hardware.audio@2.0-service.rc b/audio/common/all-versions/default/service/android.hardware.audio.service.rc
similarity index 81%
rename from audio/common/all-versions/default/service/android.hardware.audio@2.0-service.rc
rename to audio/common/all-versions/default/service/android.hardware.audio.service.rc
index 72b4d19..63d2542 100644
--- a/audio/common/all-versions/default/service/android.hardware.audio@2.0-service.rc
+++ b/audio/common/all-versions/default/service/android.hardware.audio.service.rc
@@ -1,4 +1,4 @@
-service vendor.audio-hal-2-0 /vendor/bin/hw/android.hardware.audio@2.0-service
+service vendor.audio-hal /vendor/bin/hw/android.hardware.audio.service
class hal
user audioserver
# media gid needed for /dev/fm (radio) and for /data/misc/media (tee)
diff --git a/audio/common/all-versions/default/service/service.cpp b/audio/common/all-versions/default/service/service.cpp
index 8a7b2ea..2730f3b 100644
--- a/audio/common/all-versions/default/service/service.cpp
+++ b/audio/common/all-versions/default/service/service.cpp
@@ -19,9 +19,11 @@
#include <android/hardware/audio/2.0/IDevicesFactory.h>
#include <android/hardware/audio/4.0/IDevicesFactory.h>
#include <android/hardware/audio/5.0/IDevicesFactory.h>
+#include <android/hardware/audio/6.0/IDevicesFactory.h>
#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
#include <android/hardware/audio/effect/5.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/6.0/IEffectsFactory.h>
#include <android/hardware/bluetooth/a2dp/1.0/IBluetoothAudioOffload.h>
#include <android/hardware/bluetooth/audio/2.0/IBluetoothAudioProvidersFactory.h>
#include <android/hardware/soundtrigger/2.0/ISoundTriggerHw.h>
@@ -36,6 +38,15 @@
using namespace android::hardware;
using android::OK;
+/** Try to register the provided factories in the provided order.
+ * If any registers successfully, do not register any other and return true.
+ * If all fail, return false.
+ */
+template <class... Factories>
+bool registerPassthroughServiceImplementations() {
+ return ((registerPassthroughServiceImplementation<Factories>() != OK) && ...);
+}
+
int main(int /* argc */, char* /* argv */ []) {
::android::ProcessState::initWithDriver("/dev/vndbinder");
// start a threadpool for vndbinder interactions
@@ -50,30 +61,36 @@
}
configureRpcThreadpool(16, true /*callerWillJoin*/);
- bool fail = registerPassthroughServiceImplementation<audio::V5_0::IDevicesFactory>() != OK &&
- registerPassthroughServiceImplementation<audio::V4_0::IDevicesFactory>() != OK &&
- registerPassthroughServiceImplementation<audio::V2_0::IDevicesFactory>() != OK;
- LOG_ALWAYS_FATAL_IF(fail, "Could not register audio core API 2, 4 nor 5");
+ // Keep versions on a separate line for easier parsing
+ // clang-format off
+ LOG_ALWAYS_FATAL_IF((registerPassthroughServiceImplementations<
+ audio::V6_0::IDevicesFactory,
+ audio::V5_0::IDevicesFactory,
+ audio::V4_0::IDevicesFactory,
+ audio::V2_0::IDevicesFactory>()),
+ "Could not register audio core API");
- fail = registerPassthroughServiceImplementation<audio::effect::V5_0::IEffectsFactory>() != OK &&
- registerPassthroughServiceImplementation<audio::effect::V4_0::IEffectsFactory>() != OK &&
- registerPassthroughServiceImplementation<audio::effect::V2_0::IEffectsFactory>() != OK,
- LOG_ALWAYS_FATAL_IF(fail, "Could not register audio effect API 2, 4 nor 5");
+ LOG_ALWAYS_FATAL_IF((registerPassthroughServiceImplementations<
+ audio::effect::V6_0::IEffectsFactory,
+ audio::effect::V5_0::IEffectsFactory,
+ audio::effect::V4_0::IEffectsFactory,
+ audio::effect::V2_0::IEffectsFactory>()),
+ "Could not register audio effect API");
+ // clang-format on
- fail = registerPassthroughServiceImplementation<soundtrigger::V2_2::ISoundTriggerHw>() != OK &&
- registerPassthroughServiceImplementation<soundtrigger::V2_1::ISoundTriggerHw>() != OK &&
- registerPassthroughServiceImplementation<soundtrigger::V2_0::ISoundTriggerHw>() != OK,
- ALOGW_IF(fail, "Could not register soundtrigger API 2.0, 2.1 nor 2.2");
+ ALOGW_IF((registerPassthroughServiceImplementations<soundtrigger::V2_2::ISoundTriggerHw,
+ soundtrigger::V2_1::ISoundTriggerHw,
+ soundtrigger::V2_0::ISoundTriggerHw>()),
+ "Could not register soundtrigger API");
- fail = registerPassthroughServiceImplementation<
- bluetooth::audio::V2_0::IBluetoothAudioProvidersFactory>() != OK;
- ALOGW_IF(fail, "Could not register Bluetooth Audio API 2.0");
+ ALOGW_IF(registerPassthroughServiceImplementations<
+ bluetooth::audio::V2_0::IBluetoothAudioProvidersFactory>(),
+ "Could not register Bluetooth audio API");
// remove the old HIDL when Bluetooth Audio Hal V2 has offloading supported
- fail =
- registerPassthroughServiceImplementation<bluetooth::a2dp::V1_0::IBluetoothAudioOffload>() !=
- OK;
- ALOGW_IF(fail, "Could not register Bluetooth audio offload 1.0");
+ ALOGW_IF(registerPassthroughServiceImplementations<
+ bluetooth::a2dp::V1_0::IBluetoothAudioOffload>(),
+ "Could not register Bluetooth audio offload API");
joinRpcThreadpool();
}
diff --git a/audio/core/all-versions/default/Android.bp b/audio/core/all-versions/default/Android.bp
index 000ce18..007ad85 100644
--- a/audio/core/all-versions/default/Android.bp
+++ b/audio/core/all-versions/default/Android.bp
@@ -45,13 +45,11 @@
cc_library_shared {
name: "android.hardware.audio@2.0-impl",
defaults: ["android.hardware.audio-impl_default"],
-
shared_libs: [
"android.hardware.audio@2.0",
"android.hardware.audio.common@2.0",
"android.hardware.audio.common@2.0-util",
],
-
cflags: [
"-DMAJOR_VERSION=2",
"-DMINOR_VERSION=0",
@@ -68,7 +66,6 @@
"android.hardware.audio.common@4.0",
"android.hardware.audio.common@4.0-util",
],
-
cflags: [
"-DMAJOR_VERSION=4",
"-DMINOR_VERSION=0",
@@ -79,16 +76,29 @@
cc_library_shared {
name: "android.hardware.audio@5.0-impl",
defaults: ["android.hardware.audio-impl_default"],
-
shared_libs: [
"android.hardware.audio@5.0",
"android.hardware.audio.common@5.0",
"android.hardware.audio.common@5.0-util",
],
-
cflags: [
"-DMAJOR_VERSION=5",
"-DMINOR_VERSION=0",
"-include common/all-versions/VersionMacro.h",
]
}
+
+cc_library_shared {
+ name: "android.hardware.audio@6.0-impl",
+ defaults: ["android.hardware.audio-impl_default"],
+ shared_libs: [
+ "android.hardware.audio@6.0",
+ "android.hardware.audio.common@6.0",
+ "android.hardware.audio.common@6.0-util",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=6",
+ "-DMINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
diff --git a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
new file mode 100644
index 0000000..6314ea7
--- /dev/null
+++ b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// pull in all the <= 5.0 tests
+#include "5.0/AudioPrimaryHidlHalTest.cpp"
diff --git a/audio/core/all-versions/vts/functional/Android.bp b/audio/core/all-versions/vts/functional/Android.bp
index 88fdb5a..3286ecb 100644
--- a/audio/core/all-versions/vts/functional/Android.bp
+++ b/audio/core/all-versions/vts/functional/Android.bp
@@ -82,3 +82,20 @@
"-include common/all-versions/VersionMacro.h",
]
}
+
+cc_test {
+ name: "VtsHalAudioV6_0TargetTest",
+ defaults: ["VtsHalAudioTargetTest_defaults"],
+ srcs: [
+ "6.0/AudioPrimaryHidlHalTest.cpp",
+ ],
+ static_libs: [
+ "android.hardware.audio@6.0",
+ "android.hardware.audio.common@6.0",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=6",
+ "-DMINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
diff --git a/audio/effect/5.0/xml/Android.bp b/audio/effect/5.0/xml/Android.bp
index 967135c..eb2bcee 100644
--- a/audio/effect/5.0/xml/Android.bp
+++ b/audio/effect/5.0/xml/Android.bp
@@ -1,6 +1,6 @@
xsd_config {
- name: "audio_effects_conf",
+ name: "audio_effects_conf_V5_0",
srcs: ["audio_effects_conf.xsd"],
package_name: "audio.effects.V5_0",
}
diff --git a/audio/effect/6.0/Android.bp b/audio/effect/6.0/Android.bp
new file mode 100644
index 0000000..b6184f3
--- /dev/null
+++ b/audio/effect/6.0/Android.bp
@@ -0,0 +1,33 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.audio.effect@6.0",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IAcousticEchoCancelerEffect.hal",
+ "IAutomaticGainControlEffect.hal",
+ "IBassBoostEffect.hal",
+ "IDownmixEffect.hal",
+ "IEffect.hal",
+ "IEffectBufferProviderCallback.hal",
+ "IEffectsFactory.hal",
+ "IEnvironmentalReverbEffect.hal",
+ "IEqualizerEffect.hal",
+ "ILoudnessEnhancerEffect.hal",
+ "INoiseSuppressionEffect.hal",
+ "IPresetReverbEffect.hal",
+ "IVirtualizerEffect.hal",
+ "IVisualizerEffect.hal",
+ ],
+ interfaces: [
+ "android.hardware.audio.common@6.0",
+ "android.hidl.base@1.0",
+ "android.hidl.safe_union@1.0",
+ ],
+ gen_java: false,
+ gen_java_constants: true,
+}
diff --git a/audio/effect/6.0/IAcousticEchoCancelerEffect.hal b/audio/effect/6.0/IAcousticEchoCancelerEffect.hal
new file mode 100644
index 0000000..f7d769f
--- /dev/null
+++ b/audio/effect/6.0/IAcousticEchoCancelerEffect.hal
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IAcousticEchoCancelerEffect extends IEffect {
+ /**
+ * Sets echo delay value in milliseconds.
+ */
+ setEchoDelay(uint32_t echoDelayMs) generates (Result retval);
+
+ /**
+ * Gets echo delay value in milliseconds.
+ */
+ getEchoDelay() generates (Result retval, uint32_t echoDelayMs);
+};
diff --git a/audio/effect/6.0/IAutomaticGainControlEffect.hal b/audio/effect/6.0/IAutomaticGainControlEffect.hal
new file mode 100644
index 0000000..d264cd4
--- /dev/null
+++ b/audio/effect/6.0/IAutomaticGainControlEffect.hal
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IAutomaticGainControlEffect extends IEffect {
+ /**
+ * Sets target level in millibels.
+ */
+ setTargetLevel(int16_t targetLevelMb) generates (Result retval);
+
+ /**
+ * Gets target level.
+ */
+ getTargetLevel() generates (Result retval, int16_t targetLevelMb);
+
+ /**
+ * Sets gain in the compression range in millibels.
+ */
+ setCompGain(int16_t compGainMb) generates (Result retval);
+
+ /**
+ * Gets gain in the compression range.
+ */
+ getCompGain() generates (Result retval, int16_t compGainMb);
+
+ /**
+ * Enables or disables limiter.
+ */
+ setLimiterEnabled(bool enabled) generates (Result retval);
+
+ /**
+ * Returns whether limiter is enabled.
+ */
+ isLimiterEnabled() generates (Result retval, bool enabled);
+
+ struct AllProperties {
+ int16_t targetLevelMb;
+ int16_t compGainMb;
+ bool limiterEnabled;
+ };
+
+ /**
+ * Sets all properties at once.
+ */
+ setAllProperties(AllProperties properties) generates (Result retval);
+
+ /**
+ * Gets all properties at once.
+ */
+ getAllProperties() generates (Result retval, AllProperties properties);
+};
diff --git a/audio/effect/6.0/IBassBoostEffect.hal b/audio/effect/6.0/IBassBoostEffect.hal
new file mode 100644
index 0000000..91c4092
--- /dev/null
+++ b/audio/effect/6.0/IBassBoostEffect.hal
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IBassBoostEffect extends IEffect {
+ /**
+ * Returns whether setting bass boost strength is supported.
+ */
+ isStrengthSupported() generates (Result retval, bool strengthSupported);
+
+ enum StrengthRange : uint16_t {
+ MIN = 0,
+ MAX = 1000
+ };
+
+ /**
+ * Sets bass boost strength.
+ *
+ * @param strength strength of the effect. The valid range for strength
+ * strength is [0, 1000], where 0 per mille designates the
+ * mildest effect and 1000 per mille designates the
+ * strongest.
+ * @return retval operation completion status.
+ */
+ setStrength(uint16_t strength) generates (Result retval);
+
+ /**
+ * Gets virtualization strength.
+ */
+ getStrength() generates (Result retval, uint16_t strength);
+};
diff --git a/audio/effect/6.0/IDownmixEffect.hal b/audio/effect/6.0/IDownmixEffect.hal
new file mode 100644
index 0000000..e3a38e2
--- /dev/null
+++ b/audio/effect/6.0/IDownmixEffect.hal
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IDownmixEffect extends IEffect {
+ enum Type : int32_t {
+ STRIP, // throw away the extra channels
+ FOLD // mix the extra channels with FL/FR
+ };
+
+ /**
+ * Sets the current downmix preset.
+ */
+ setType(Type preset) generates (Result retval);
+
+ /**
+ * Gets the current downmix preset.
+ */
+ getType() generates (Result retval, Type preset);
+};
diff --git a/audio/effect/6.0/IEffect.hal b/audio/effect/6.0/IEffect.hal
new file mode 100644
index 0000000..b35afee
--- /dev/null
+++ b/audio/effect/6.0/IEffect.hal
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffectBufferProviderCallback;
+
+interface IEffect {
+ /**
+ * Initialize effect engine--all configurations return to default.
+ *
+ * @return retval operation completion status.
+ */
+ @entry
+ init() generates (Result retval);
+
+ /**
+ * Apply new audio parameters configurations for input and output buffers.
+ * The provider callbacks may be empty, but in this case the buffer
+ * must be provided in the EffectConfig structure.
+ *
+ * @param config configuration descriptor.
+ * @param inputBufferProvider optional buffer provider reference.
+ * @param outputBufferProvider optional buffer provider reference.
+ * @return retval operation completion status.
+ */
+ setConfig(EffectConfig config,
+ IEffectBufferProviderCallback inputBufferProvider,
+ IEffectBufferProviderCallback outputBufferProvider)
+ generates (Result retval);
+
+ /**
+ * Reset the effect engine. Keep configuration but resets state and buffer
+ * content.
+ *
+ * @return retval operation completion status.
+ */
+ reset() generates (Result retval);
+
+ /**
+ * Enable processing.
+ *
+ * @return retval operation completion status.
+ */
+ @callflow(next={"prepareForProcessing"})
+ enable() generates (Result retval);
+
+ /**
+ * Disable processing.
+ *
+ * @return retval operation completion status.
+ */
+ @callflow(next={"close"})
+ disable() generates (Result retval);
+
+ /**
+ * Set the rendering device the audio output path is connected to. The
+ * effect implementation must set EFFECT_FLAG_DEVICE_IND flag in its
+ * descriptor to receive this command when the device changes.
+ *
+ * Note: this method is only supported for effects inserted into
+ * the output chain.
+ *
+ * @param device output device specification.
+ * @return retval operation completion status.
+ */
+ setDevice(bitfield<AudioDevice> device) generates (Result retval);
+
+ /**
+ * Set and get volume. Used by audio framework to delegate volume control to
+ * effect engine. The effect implementation must set EFFECT_FLAG_VOLUME_CTRL
+ * flag in its descriptor to receive this command. The effect engine must
+ * return the volume that should be applied before the effect is
+ * processed. The overall volume (the volume actually applied by the effect
+ * engine multiplied by the returned value) should match the value indicated
+ * in the command.
+ *
+ * @param volumes vector containing volume for each channel defined in
+ * EffectConfig for output buffer expressed in 8.24 fixed
+ * point format.
+ * @return result updated volume values.
+ * @return retval operation completion status.
+ */
+ setAndGetVolume(vec<uint32_t> volumes)
+ generates (Result retval, vec<uint32_t> result);
+
+ /**
+ * Notify the effect of the volume change. The effect implementation must
+ * set EFFECT_FLAG_VOLUME_IND flag in its descriptor to receive this
+ * command.
+ *
+ * @param volumes vector containing volume for each channel defined in
+ * EffectConfig for output buffer expressed in 8.24 fixed
+ * point format.
+ * @return retval operation completion status.
+ */
+ volumeChangeNotification(vec<uint32_t> volumes)
+ generates (Result retval);
+
+ /**
+ * Set the audio mode. The effect implementation must set
+ * EFFECT_FLAG_AUDIO_MODE_IND flag in its descriptor to receive this command
+ * when the audio mode changes.
+ *
+ * @param mode desired audio mode.
+ * @return retval operation completion status.
+ */
+ setAudioMode(AudioMode mode) generates (Result retval);
+
+ /**
+ * Apply new audio parameters configurations for input and output buffers of
+ * reverse stream. An example of reverse stream is the echo reference
+ * supplied to an Acoustic Echo Canceler.
+ *
+ * @param config configuration descriptor.
+ * @param inputBufferProvider optional buffer provider reference.
+ * @param outputBufferProvider optional buffer provider reference.
+ * @return retval operation completion status.
+ */
+ setConfigReverse(EffectConfig config,
+ IEffectBufferProviderCallback inputBufferProvider,
+ IEffectBufferProviderCallback outputBufferProvider)
+ generates (Result retval);
+
+ /**
+ * Set the capture device the audio input path is connected to. The effect
+ * implementation must set EFFECT_FLAG_DEVICE_IND flag in its descriptor to
+ * receive this command when the device changes.
+ *
+ * Note: this method is only supported for effects inserted into
+ * the input chain.
+ *
+ * @param device input device specification.
+ * @return retval operation completion status.
+ */
+ setInputDevice(bitfield<AudioDevice> device) generates (Result retval);
+
+ /**
+ * Read audio parameters configurations for input and output buffers.
+ *
+ * @return retval operation completion status.
+ * @return config configuration descriptor.
+ */
+ getConfig() generates (Result retval, EffectConfig config);
+
+ /**
+ * Read audio parameters configurations for input and output buffers of
+ * reverse stream.
+ *
+ * @return retval operation completion status.
+ * @return config configuration descriptor.
+ */
+ getConfigReverse() generates (Result retval, EffectConfig config);
+
+ /**
+ * Queries for supported combinations of main and auxiliary channels
+ * (e.g. for a multi-microphone noise suppressor).
+ *
+ * @param maxConfigs maximum number of the combinations to return.
+ * @return retval absence of the feature support is indicated using
+ * NOT_SUPPORTED code. RESULT_TOO_BIG is returned if
+ * the number of supported combinations exceeds 'maxConfigs'.
+ * @return result list of configuration descriptors.
+ */
+ getSupportedAuxChannelsConfigs(uint32_t maxConfigs)
+ generates (Result retval, vec<EffectAuxChannelsConfig> result);
+
+ /**
+ * Retrieves the current configuration of main and auxiliary channels.
+ *
+ * @return retval absence of the feature support is indicated using
+ * NOT_SUPPORTED code.
+ * @return result configuration descriptor.
+ */
+ getAuxChannelsConfig()
+ generates (Result retval, EffectAuxChannelsConfig result);
+
+ /**
+ * Sets the current configuration of main and auxiliary channels.
+ *
+ * @return retval operation completion status; absence of the feature
+ * support is indicated using NOT_SUPPORTED code.
+ */
+ setAuxChannelsConfig(EffectAuxChannelsConfig config)
+ generates (Result retval);
+
+ /**
+ * Set the audio source the capture path is configured for (Camcorder, voice
+ * recognition...).
+ *
+ * Note: this method is only supported for effects inserted into
+ * the input chain.
+ *
+ * @param source source descriptor.
+ * @return retval operation completion status.
+ */
+ setAudioSource(AudioSource source) generates (Result retval);
+
+ /**
+ * This command indicates if the playback thread the effect is attached to
+ * is offloaded or not, and updates the I/O handle of the playback thread
+ * the effect is attached to.
+ *
+ * @param param effect offload descriptor.
+ * @return retval operation completion status.
+ */
+ offload(EffectOffloadParameter param) generates (Result retval);
+
+ /**
+ * Returns the effect descriptor.
+ *
+ * @return retval operation completion status.
+ * @return descriptor effect descriptor.
+ */
+ getDescriptor() generates (Result retval, EffectDescriptor descriptor);
+
+ /**
+ * Set up required transports for passing audio buffers to the effect.
+ *
+ * The transport consists of shared memory and a message queue for reporting
+ * effect processing operation status. The shared memory is set up
+ * separately using 'setProcessBuffers' method.
+ *
+ * Processing is requested by setting 'REQUEST_PROCESS' or
+ * 'REQUEST_PROCESS_REVERSE' EventFlags associated with the status message
+ * queue. The result of processing may be one of the following:
+ * OK if there were no errors during processing;
+ * INVALID_ARGUMENTS if audio buffers are invalid;
+ * INVALID_STATE if the engine has finished the disable phase;
+ * NOT_INITIALIZED if the audio buffers were not set;
+ * NOT_SUPPORTED if the requested processing type is not supported by
+ * the effect.
+ *
+ * @return retval OK if both message queues were created successfully.
+ * INVALID_STATE if the method was already called.
+ * INVALID_ARGUMENTS if there was a problem setting up
+ * the queue.
+ * @return statusMQ a message queue used for passing status from the effect.
+ */
+ @callflow(next={"setProcessBuffers"})
+ prepareForProcessing() generates (Result retval, fmq_sync<Result> statusMQ);
+
+ /**
+ * Set up input and output buffers for processing audio data. The effect
+ * may modify both the input and the output buffer during the operation.
+ * Buffers may be set multiple times during effect lifetime.
+ *
+ * The input and the output buffer may be reused between different effects,
+ * and the input buffer may be used as an output buffer. Buffers are
+ * distinguished using 'AudioBuffer.id' field.
+ *
+ * @param inBuffer input audio buffer.
+ * @param outBuffer output audio buffer.
+ * @return retval OK if both buffers were mapped successfully.
+ * INVALID_ARGUMENTS if there was a problem with mapping
+ * any of the buffers.
+ */
+ setProcessBuffers(AudioBuffer inBuffer, AudioBuffer outBuffer)
+ generates (Result retval);
+
+ /**
+ * Execute a vendor specific command on the effect. The command code
+ * and data, as well as result data are not interpreted by Android
+ * Framework and are passed as-is between the application and the effect.
+ *
+ * The effect must use standard POSIX.1-2001 error codes for the operation
+ * completion status.
+ *
+ * Use this method only if the effect is provided by a third party, and
+ * there is no interface defined for it. This method only works for effects
+ * implemented in software.
+ *
+ * @param commandId the ID of the command.
+ * @param data command data.
+ * @param resultMaxSize maximum size in bytes of the result; can be 0.
+ * @return status command completion status.
+ * @return result result data.
+ */
+ command(uint32_t commandId, vec<uint8_t> data, uint32_t resultMaxSize)
+ generates (int32_t status, vec<uint8_t> result);
+
+ /**
+ * Set a vendor-specific parameter and apply it immediately. The parameter
+ * code and data are not interpreted by Android Framework and are passed
+ * as-is between the application and the effect.
+ *
+ * The effect must use INVALID_ARGUMENTS return code if the parameter ID is
+ * unknown or if provided parameter data is invalid. If the effect does not
+ * support setting vendor-specific parameters, it must return NOT_SUPPORTED.
+ *
+ * Use this method only if the effect is provided by a third party, and
+ * there is no interface defined for it. This method only works for effects
+ * implemented in software.
+ *
+ * @param parameter identifying data of the parameter.
+ * @param value the value of the parameter.
+ * @return retval operation completion status.
+ */
+ setParameter(vec<uint8_t> parameter, vec<uint8_t> value)
+ generates (Result retval);
+
+ /**
+ * Get a vendor-specific parameter value. The parameter code and returned
+ * data are not interpreted by Android Framework and are passed as-is
+ * between the application and the effect.
+ *
+ * The effect must use INVALID_ARGUMENTS return code if the parameter ID is
+ * unknown. If the effect does not support setting vendor-specific
+ * parameters, it must return NOT_SUPPORTED.
+ *
+ * Use this method only if the effect is provided by a third party, and
+ * there is no interface defined for it. This method only works for effects
+ * implemented in software.
+ *
+ * @param parameter identifying data of the parameter.
+ * @param valueMaxSize maximum size in bytes of the value.
+ * @return retval operation completion status.
+ * @return result the value of the parameter.
+ */
+ getParameter(vec<uint8_t> parameter, uint32_t valueMaxSize)
+ generates (Result retval, vec<uint8_t> value);
+
+ /**
+ * Get supported configs for a vendor-specific feature. The configs returned
+ * are not interpreted by Android Framework and are passed as-is between the
+ * application and the effect.
+ *
+ * The effect must use INVALID_ARGUMENTS return code if the feature ID is
+ * unknown. If the effect does not support getting vendor-specific feature
+ * configs, it must return NOT_SUPPORTED. If the feature is supported but
+ * the total number of supported configurations exceeds the maximum number
+ * indicated by the caller, the method must return RESULT_TOO_BIG.
+ *
+ * Use this method only if the effect is provided by a third party, and
+ * there is no interface defined for it. This method only works for effects
+ * implemented in software.
+ *
+ * @param featureId feature identifier.
+ * @param maxConfigs maximum number of configs to return.
+ * @param configSize size of each config in bytes.
+ * @return retval operation completion status.
+ * @return configsCount number of configs returned.
+ * @return configsData data for all the configs returned.
+ */
+ getSupportedConfigsForFeature(
+ uint32_t featureId,
+ uint32_t maxConfigs,
+ uint32_t configSize) generates (
+ Result retval,
+ uint32_t configsCount,
+ vec<uint8_t> configsData);
+
+ /**
+ * Get the current config for a vendor-specific feature. The config returned
+ * is not interpreted by Android Framework and is passed as-is between the
+ * application and the effect.
+ *
+ * The effect must use INVALID_ARGUMENTS return code if the feature ID is
+ * unknown. If the effect does not support getting vendor-specific
+ * feature configs, it must return NOT_SUPPORTED.
+ *
+ * Use this method only if the effect is provided by a third party, and
+ * there is no interface defined for it. This method only works for effects
+ * implemented in software.
+ *
+ * @param featureId feature identifier.
+ * @param configSize size of the config in bytes.
+ * @return retval operation completion status.
+ * @return configData config data.
+ */
+ getCurrentConfigForFeature(uint32_t featureId, uint32_t configSize)
+ generates (Result retval, vec<uint8_t> configData);
+
+ /**
+ * Set the current config for a vendor-specific feature. The config data
+ * is not interpreted by Android Framework and is passed as-is between the
+ * application and the effect.
+ *
+ * The effect must use INVALID_ARGUMENTS return code if the feature ID is
+ * unknown. If the effect does not support getting vendor-specific
+ * feature configs, it must return NOT_SUPPORTED.
+ *
+ * Use this method only if the effect is provided by a third party, and
+ * there is no interface defined for it. This method only works for effects
+ * implemented in software.
+ *
+ * @param featureId feature identifier.
+ * @param configData config data.
+ * @return retval operation completion status.
+ */
+ setCurrentConfigForFeature(uint32_t featureId, vec<uint8_t> configData)
+ generates (Result retval);
+
+ /**
+ * Called by the framework to deinitialize the effect and free up
+ * all the currently allocated resources. It is recommended to close
+ * the effect on the client side as soon as it is becomes unused.
+ *
+ * @return retval OK in case the success.
+ * INVALID_STATE if the effect was already closed.
+ */
+ @exit
+ close() generates (Result retval);
+};
diff --git a/audio/effect/6.0/IEffectBufferProviderCallback.hal b/audio/effect/6.0/IEffectBufferProviderCallback.hal
new file mode 100644
index 0000000..097528d
--- /dev/null
+++ b/audio/effect/6.0/IEffectBufferProviderCallback.hal
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+/**
+ * This callback interface contains functions that can be used by the effect
+ * engine 'process' function to exchange input and output audio buffers.
+ */
+interface IEffectBufferProviderCallback {
+ /**
+ * Called to retrieve a buffer where data should read from by 'process'
+ * function.
+ *
+ * @return buffer audio buffer for processing
+ */
+ getBuffer() generates (AudioBuffer buffer);
+
+ /**
+ * Called to provide a buffer with the data written by 'process' function.
+ *
+ * @param buffer audio buffer for processing
+ */
+ putBuffer(AudioBuffer buffer);
+};
diff --git a/audio/effect/6.0/IEffectsFactory.hal b/audio/effect/6.0/IEffectsFactory.hal
new file mode 100644
index 0000000..e08b2de
--- /dev/null
+++ b/audio/effect/6.0/IEffectsFactory.hal
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IEffectsFactory {
+ /**
+ * Returns descriptors of different effects in all loaded libraries.
+ *
+ * @return retval operation completion status.
+ * @return result list of effect descriptors.
+ */
+ getAllDescriptors() generates(Result retval, vec<EffectDescriptor> result);
+
+ /**
+ * Returns a descriptor of a particular effect.
+ *
+ * @return retval operation completion status.
+ * @return result effect descriptor.
+ */
+ getDescriptor(Uuid uid) generates(Result retval, EffectDescriptor result);
+
+ /**
+ * Creates an effect engine of the specified type. To release the effect
+ * engine, it is necessary to release references to the returned effect
+ * object.
+ *
+ * @param uid effect uuid.
+ * @param session audio session to which this effect instance will be
+ * attached. All effects created with the same session ID
+ * are connected in series and process the same signal
+ * stream.
+ * @param ioHandle identifies the output or input stream this effect is
+ * directed to in audio HAL.
+ * @return retval operation completion status.
+ * @return result the interface for the created effect.
+ * @return effectId the unique ID of the effect to be used with
+ * IStream::addEffect and IStream::removeEffect methods.
+ */
+ createEffect(Uuid uid, AudioSession session, AudioIoHandle ioHandle)
+ generates (Result retval, IEffect result, uint64_t effectId);
+};
diff --git a/audio/effect/6.0/IEnvironmentalReverbEffect.hal b/audio/effect/6.0/IEnvironmentalReverbEffect.hal
new file mode 100644
index 0000000..8887533
--- /dev/null
+++ b/audio/effect/6.0/IEnvironmentalReverbEffect.hal
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IEnvironmentalReverbEffect extends IEffect {
+ /**
+ * Sets whether the effect should be bypassed.
+ */
+ setBypass(bool bypass) generates (Result retval);
+
+ /**
+ * Gets whether the effect should be bypassed.
+ */
+ getBypass() generates (Result retval, bool bypass);
+
+ enum ParamRange : int16_t {
+ ROOM_LEVEL_MIN = -6000,
+ ROOM_LEVEL_MAX = 0,
+ ROOM_HF_LEVEL_MIN = -4000,
+ ROOM_HF_LEVEL_MAX = 0,
+ DECAY_TIME_MIN = 100,
+ DECAY_TIME_MAX = 20000,
+ DECAY_HF_RATIO_MIN = 100,
+ DECAY_HF_RATIO_MAX = 1000,
+ REFLECTIONS_LEVEL_MIN = -6000,
+ REFLECTIONS_LEVEL_MAX = 0,
+ REFLECTIONS_DELAY_MIN = 0,
+ REFLECTIONS_DELAY_MAX = 65,
+ REVERB_LEVEL_MIN = -6000,
+ REVERB_LEVEL_MAX = 0,
+ REVERB_DELAY_MIN = 0,
+ REVERB_DELAY_MAX = 65,
+ DIFFUSION_MIN = 0,
+ DIFFUSION_MAX = 1000,
+ DENSITY_MIN = 0,
+ DENSITY_MAX = 1000
+ };
+
+ /**
+ * Sets the room level.
+ */
+ setRoomLevel(int16_t roomLevel) generates (Result retval);
+
+ /**
+ * Gets the room level.
+ */
+ getRoomLevel() generates (Result retval, int16_t roomLevel);
+
+ /**
+ * Sets the room high frequencies level.
+ */
+ setRoomHfLevel(int16_t roomHfLevel) generates (Result retval);
+
+ /**
+ * Gets the room high frequencies level.
+ */
+ getRoomHfLevel() generates (Result retval, int16_t roomHfLevel);
+
+ /**
+ * Sets the room decay time.
+ */
+ setDecayTime(uint32_t decayTime) generates (Result retval);
+
+ /**
+ * Gets the room decay time.
+ */
+ getDecayTime() generates (Result retval, uint32_t decayTime);
+
+ /**
+ * Sets the ratio of high frequencies decay.
+ */
+ setDecayHfRatio(int16_t decayHfRatio) generates (Result retval);
+
+ /**
+ * Gets the ratio of high frequencies decay.
+ */
+ getDecayHfRatio() generates (Result retval, int16_t decayHfRatio);
+
+ /**
+ * Sets the level of reflections in the room.
+ */
+ setReflectionsLevel(int16_t reflectionsLevel) generates (Result retval);
+
+ /**
+ * Gets the level of reflections in the room.
+ */
+ getReflectionsLevel() generates (Result retval, int16_t reflectionsLevel);
+
+ /**
+ * Sets the reflections delay in the room.
+ */
+ setReflectionsDelay(uint32_t reflectionsDelay) generates (Result retval);
+
+ /**
+ * Gets the reflections delay in the room.
+ */
+ getReflectionsDelay() generates (Result retval, uint32_t reflectionsDelay);
+
+ /**
+ * Sets the reverb level of the room.
+ */
+ setReverbLevel(int16_t reverbLevel) generates (Result retval);
+
+ /**
+ * Gets the reverb level of the room.
+ */
+ getReverbLevel() generates (Result retval, int16_t reverbLevel);
+
+ /**
+ * Sets the reverb delay of the room.
+ */
+ setReverbDelay(uint32_t reverDelay) generates (Result retval);
+
+ /**
+ * Gets the reverb delay of the room.
+ */
+ getReverbDelay() generates (Result retval, uint32_t reverbDelay);
+
+ /**
+ * Sets room diffusion.
+ */
+ setDiffusion(int16_t diffusion) generates (Result retval);
+
+ /**
+ * Gets room diffusion.
+ */
+ getDiffusion() generates (Result retval, int16_t diffusion);
+
+ /**
+ * Sets room wall density.
+ */
+ setDensity(int16_t density) generates (Result retval);
+
+ /**
+ * Gets room wall density.
+ */
+ getDensity() generates (Result retval, int16_t density);
+
+ struct AllProperties {
+ int16_t roomLevel; // in millibels, range -6000 to 0
+ int16_t roomHfLevel; // in millibels, range -4000 to 0
+ uint32_t decayTime; // in milliseconds, range 100 to 20000
+ int16_t decayHfRatio; // in permilles, range 100 to 1000
+ int16_t reflectionsLevel; // in millibels, range -6000 to 0
+ uint32_t reflectionsDelay; // in milliseconds, range 0 to 65
+ int16_t reverbLevel; // in millibels, range -6000 to 0
+ uint32_t reverbDelay; // in milliseconds, range 0 to 65
+ int16_t diffusion; // in permilles, range 0 to 1000
+ int16_t density; // in permilles, range 0 to 1000
+ };
+
+ /**
+ * Sets all properties at once.
+ */
+ setAllProperties(AllProperties properties) generates (Result retval);
+
+ /**
+ * Gets all properties at once.
+ */
+ getAllProperties() generates (Result retval, AllProperties properties);
+};
diff --git a/audio/effect/6.0/IEqualizerEffect.hal b/audio/effect/6.0/IEqualizerEffect.hal
new file mode 100644
index 0000000..962d518
--- /dev/null
+++ b/audio/effect/6.0/IEqualizerEffect.hal
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IEqualizerEffect extends IEffect {
+ /**
+ * Gets the number of frequency bands that the equalizer supports.
+ */
+ getNumBands() generates (Result retval, uint16_t numBands);
+
+ /**
+ * Returns the minimum and maximum band levels supported.
+ */
+ getLevelRange()
+ generates (Result retval, int16_t minLevel, int16_t maxLevel);
+
+ /**
+ * Sets the gain for the given equalizer band.
+ */
+ setBandLevel(uint16_t band, int16_t level) generates (Result retval);
+
+ /**
+ * Gets the gain for the given equalizer band.
+ */
+ getBandLevel(uint16_t band) generates (Result retval, int16_t level);
+
+ /**
+ * Gets the center frequency of the given band, in milliHertz.
+ */
+ getBandCenterFrequency(uint16_t band)
+ generates (Result retval, uint32_t centerFreqmHz);
+
+ /**
+ * Gets the frequency range of the given frequency band, in milliHertz.
+ */
+ getBandFrequencyRange(uint16_t band)
+ generates (Result retval, uint32_t minFreqmHz, uint32_t maxFreqmHz);
+
+ /**
+ * Gets the band that has the most effect on the given frequency
+ * in milliHertz.
+ */
+ getBandForFrequency(uint32_t freqmHz)
+ generates (Result retval, uint16_t band);
+
+ /**
+ * Gets the names of all presets the equalizer supports.
+ */
+ getPresetNames() generates (Result retval, vec<string> names);
+
+ /**
+ * Sets the current preset using the index of the preset in the names
+ * vector returned via 'getPresetNames'.
+ */
+ setCurrentPreset(uint16_t preset) generates (Result retval);
+
+ /**
+ * Gets the current preset.
+ */
+ getCurrentPreset() generates (Result retval, uint16_t preset);
+
+ struct AllProperties {
+ uint16_t curPreset;
+ vec<int16_t> bandLevels;
+ };
+
+ /**
+ * Sets all properties at once.
+ */
+ setAllProperties(AllProperties properties) generates (Result retval);
+
+ /**
+ * Gets all properties at once.
+ */
+ getAllProperties() generates (Result retval, AllProperties properties);
+};
diff --git a/audio/effect/6.0/ILoudnessEnhancerEffect.hal b/audio/effect/6.0/ILoudnessEnhancerEffect.hal
new file mode 100644
index 0000000..73dc818
--- /dev/null
+++ b/audio/effect/6.0/ILoudnessEnhancerEffect.hal
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface ILoudnessEnhancerEffect extends IEffect {
+ /**
+ * Sets target gain expressed in millibels.
+ */
+ setTargetGain(int32_t targetGainMb) generates (Result retval);
+
+ /**
+ * Gets target gain expressed in millibels.
+ */
+ getTargetGain() generates (Result retval, int32_t targetGainMb);
+};
diff --git a/audio/effect/6.0/INoiseSuppressionEffect.hal b/audio/effect/6.0/INoiseSuppressionEffect.hal
new file mode 100644
index 0000000..16dee57
--- /dev/null
+++ b/audio/effect/6.0/INoiseSuppressionEffect.hal
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface INoiseSuppressionEffect extends IEffect {
+ enum Level : int32_t {
+ LOW,
+ MEDIUM,
+ HIGH
+ };
+
+ /**
+ * Sets suppression level.
+ */
+ setSuppressionLevel(Level level) generates (Result retval);
+
+ /**
+ * Gets suppression level.
+ */
+ getSuppressionLevel() generates (Result retval, Level level);
+
+ enum Type : int32_t {
+ SINGLE_CHANNEL,
+ MULTI_CHANNEL
+ };
+
+ /**
+ * Set suppression type.
+ */
+ setSuppressionType(Type type) generates (Result retval);
+
+ /**
+ * Get suppression type.
+ */
+ getSuppressionType() generates (Result retval, Type type);
+
+ struct AllProperties {
+ Level level;
+ Type type;
+ };
+
+ /**
+ * Sets all properties at once.
+ */
+ setAllProperties(AllProperties properties) generates (Result retval);
+
+ /**
+ * Gets all properties at once.
+ */
+ getAllProperties() generates (Result retval, AllProperties properties);
+};
diff --git a/audio/effect/6.0/IPresetReverbEffect.hal b/audio/effect/6.0/IPresetReverbEffect.hal
new file mode 100644
index 0000000..d00c8af
--- /dev/null
+++ b/audio/effect/6.0/IPresetReverbEffect.hal
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IPresetReverbEffect extends IEffect {
+ enum Preset : int32_t {
+ NONE, // no reverb or reflections
+ SMALLROOM, // a small room less than five meters in length
+ MEDIUMROOM, // a medium room with a length of ten meters or less
+ LARGEROOM, // a large-sized room suitable for live performances
+ MEDIUMHALL, // a medium-sized hall
+ LARGEHALL, // a large-sized hall suitable for a full orchestra
+ PLATE, // synthesis of the traditional plate reverb
+ LAST = PLATE
+ };
+
+ /**
+ * Sets the current preset.
+ */
+ setPreset(Preset preset) generates (Result retval);
+
+ /**
+ * Gets the current preset.
+ */
+ getPreset() generates (Result retval, Preset preset);
+};
diff --git a/audio/effect/6.0/IVirtualizerEffect.hal b/audio/effect/6.0/IVirtualizerEffect.hal
new file mode 100644
index 0000000..5967b33
--- /dev/null
+++ b/audio/effect/6.0/IVirtualizerEffect.hal
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IVirtualizerEffect extends IEffect {
+ /**
+ * Returns whether setting virtualization strength is supported.
+ */
+ isStrengthSupported() generates (bool strengthSupported);
+
+ enum StrengthRange : uint16_t {
+ MIN = 0,
+ MAX = 1000
+ };
+
+ /**
+ * Sets virtualization strength.
+ *
+ * @param strength strength of the effect. The valid range for strength
+ * strength is [0, 1000], where 0 per mille designates the
+ * mildest effect and 1000 per mille designates the
+ * strongest.
+ * @return retval operation completion status.
+ */
+ setStrength(uint16_t strength) generates (Result retval);
+
+ /**
+ * Gets virtualization strength.
+ */
+ getStrength() generates (Result retval, uint16_t strength);
+
+ struct SpeakerAngle {
+ /** Speaker channel mask */
+ bitfield<AudioChannelMask> mask;
+ // all angles are expressed in degrees and
+ // are relative to the listener.
+ int16_t azimuth; // 0 is the direction the listener faces
+ // 180 is behind the listener
+ // -90 is to their left
+ int16_t elevation; // 0 is the horizontal plane
+ // +90 is above the listener, -90 is below
+ };
+ /**
+ * Retrieves virtual speaker angles for the given channel mask on the
+ * specified device.
+ */
+ getVirtualSpeakerAngles(bitfield<AudioChannelMask> mask, AudioDevice device)
+ generates (Result retval, vec<SpeakerAngle> speakerAngles);
+
+ /**
+ * Forces the virtualizer effect for the given output device.
+ */
+ forceVirtualizationMode(AudioDevice device) generates (Result retval);
+
+ /**
+ * Returns audio device reflecting the current virtualization mode,
+ * AUDIO_DEVICE_NONE when not virtualizing.
+ */
+ getVirtualizationMode() generates (Result retval, AudioDevice device);
+};
diff --git a/audio/effect/6.0/IVisualizerEffect.hal b/audio/effect/6.0/IVisualizerEffect.hal
new file mode 100644
index 0000000..3df3e6f
--- /dev/null
+++ b/audio/effect/6.0/IVisualizerEffect.hal
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+import IEffect;
+
+interface IVisualizerEffect extends IEffect {
+ enum CaptureSizeRange : int32_t {
+ MAX = 1024, // maximum capture size in samples
+ MIN = 128 // minimum capture size in samples
+ };
+
+ /**
+ * Sets the number PCM samples in the capture.
+ */
+ setCaptureSize(uint16_t captureSize) generates (Result retval);
+
+ /**
+ * Gets the number PCM samples in the capture.
+ */
+ getCaptureSize() generates (Result retval, uint16_t captureSize);
+
+ enum ScalingMode : int32_t {
+ // Keep in sync with SCALING_MODE_... in
+ // frameworks/base/media/java/android/media/audiofx/Visualizer.java
+ NORMALIZED = 0,
+ AS_PLAYED = 1
+ };
+
+ /**
+ * Specifies the way the captured data is scaled.
+ */
+ setScalingMode(ScalingMode scalingMode) generates (Result retval);
+
+ /**
+ * Retrieves the way the captured data is scaled.
+ */
+ getScalingMode() generates (Result retval, ScalingMode scalingMode);
+
+ /**
+ * Informs the visualizer about the downstream latency.
+ */
+ setLatency(uint32_t latencyMs) generates (Result retval);
+
+ /**
+ * Gets the downstream latency.
+ */
+ getLatency() generates (Result retval, uint32_t latencyMs);
+
+ enum MeasurementMode : int32_t {
+ // Keep in sync with MEASUREMENT_MODE_... in
+ // frameworks/base/media/java/android/media/audiofx/Visualizer.java
+ NONE = 0x0,
+ PEAK_RMS = 0x1
+ };
+
+ /**
+ * Specifies which measurements are to be made.
+ */
+ setMeasurementMode(MeasurementMode measurementMode)
+ generates (Result retval);
+
+ /**
+ * Retrieves which measurements are to be made.
+ */
+ getMeasurementMode() generates (
+ Result retval, MeasurementMode measurementMode);
+
+ /**
+ * Retrieves the latest PCM snapshot captured by the visualizer engine. The
+ * number of samples to capture is specified by 'setCaptureSize' parameter.
+ *
+ * @return retval operation completion status.
+ * @return samples samples in 8 bit unsigned format (0 = 0x80)
+ */
+ capture() generates (Result retval, vec<uint8_t> samples);
+
+ struct Measurement {
+ MeasurementMode mode; // discriminator
+ union Values {
+ struct PeakAndRms {
+ int32_t peakMb; // millibels
+ int32_t rmsMb; // millibels
+ } peakAndRms;
+ } value;
+ };
+ /**
+ * Retrieves the latest measurements. The measurements to be made
+ * are specified by 'setMeasurementMode' parameter.
+ *
+ * @return retval operation completion status.
+ * @return result measurement.
+ */
+ measure() generates (Result retval, Measurement result);
+};
diff --git a/audio/effect/6.0/types.hal b/audio/effect/6.0/types.hal
new file mode 100644
index 0000000..dd5a4ad
--- /dev/null
+++ b/audio/effect/6.0/types.hal
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio.effect@6.0;
+
+import android.hardware.audio.common@6.0;
+
+enum Result : int32_t {
+ OK,
+ NOT_INITIALIZED,
+ INVALID_ARGUMENTS,
+ INVALID_STATE,
+ NOT_SUPPORTED,
+ RESULT_TOO_BIG
+};
+
+/**
+ * Effect engine capabilities/requirements flags.
+ *
+ * Definitions for flags field of effect descriptor.
+ *
+ * +----------------+--------+--------------------------------------------------
+ * | description | bits | values
+ * +----------------+--------+--------------------------------------------------
+ * | connection | 0..2 | 0 insert: after track process
+ * | mode | | 1 auxiliary: connect to track auxiliary
+ * | | | output and use send level
+ * | | | 2 replace: replaces track process function;
+ * | | | must implement SRC, volume and mono to stereo.
+ * | | | 3 pre processing: applied below audio HAL on in
+ * | | | 4 post processing: applied below audio HAL on out
+ * | | | 5 - 7 reserved
+ * +----------------+--------+--------------------------------------------------
+ * | insertion | 3..5 | 0 none
+ * | preference | | 1 first of the chain
+ * | | | 2 last of the chain
+ * | | | 3 exclusive (only effect in the insert chain)
+ * | | | 4..7 reserved
+ * +----------------+--------+--------------------------------------------------
+ * | Volume | 6..8 | 0 none
+ * | management | | 1 implements volume control
+ * | | | 2 requires volume indication
+ * | | | 3 monitors requested volume
+ * | | | 4 reserved
+ * +----------------+--------+--------------------------------------------------
+ * | Device | 9..11 | 0 none
+ * | indication | | 1 requires device updates
+ * | | | 2, 4 reserved
+ * +----------------+--------+--------------------------------------------------
+ * | Sample input | 12..13 | 1 direct: process() function or
+ * | mode | | EFFECT_CMD_SET_CONFIG command must specify
+ * | | | a buffer descriptor
+ * | | | 2 provider: process() function uses the
+ * | | | bufferProvider indicated by the
+ * | | | EFFECT_CMD_SET_CONFIG command to request input.
+ * | | | buffers.
+ * | | | 3 both: both input modes are supported
+ * +----------------+--------+--------------------------------------------------
+ * | Sample output | 14..15 | 1 direct: process() function or
+ * | mode | | EFFECT_CMD_SET_CONFIG command must specify
+ * | | | a buffer descriptor
+ * | | | 2 provider: process() function uses the
+ * | | | bufferProvider indicated by the
+ * | | | EFFECT_CMD_SET_CONFIG command to request output
+ * | | | buffers.
+ * | | | 3 both: both output modes are supported
+ * +----------------+--------+--------------------------------------------------
+ * | Hardware | 16..17 | 0 No hardware acceleration
+ * | acceleration | | 1 non tunneled hw acceleration: the process()
+ * | | | function reads the samples, send them to HW
+ * | | | accelerated effect processor, reads back
+ * | | | the processed samples and returns them
+ * | | | to the output buffer.
+ * | | | 2 tunneled hw acceleration: the process()
+ * | | | function is transparent. The effect interface
+ * | | | is only used to control the effect engine.
+ * | | | This mode is relevant for global effects
+ * | | | actually applied by the audio hardware on
+ * | | | the output stream.
+ * +----------------+--------+--------------------------------------------------
+ * | Audio Mode | 18..19 | 0 none
+ * | indication | | 1 requires audio mode updates
+ * | | | 2..3 reserved
+ * +----------------+--------+--------------------------------------------------
+ * | Audio source | 20..21 | 0 none
+ * | indication | | 1 requires audio source updates
+ * | | | 2..3 reserved
+ * +----------------+--------+--------------------------------------------------
+ * | Effect offload | 22 | 0 The effect cannot be offloaded to an audio DSP
+ * | supported | | 1 The effect can be offloaded to an audio DSP
+ * +----------------+--------+--------------------------------------------------
+ * | Process | 23 | 0 The effect implements a process function.
+ * | function | | 1 The effect does not implement a process
+ * | not | | function: enabling the effect has no impact
+ * | implemented | | on latency or CPU load.
+ * | | | Effect implementations setting this flag do not
+ * | | | have to implement a process function.
+ * +----------------+--------+--------------------------------------------------
+ */
+@export(name="", value_prefix="EFFECT_FLAG_")
+enum EffectFlags : int32_t {
+ // Insert mode
+ TYPE_SHIFT = 0,
+ TYPE_SIZE = 3,
+ TYPE_MASK = ((1 << TYPE_SIZE) -1) << TYPE_SHIFT,
+ TYPE_INSERT = 0 << TYPE_SHIFT,
+ TYPE_AUXILIARY = 1 << TYPE_SHIFT,
+ TYPE_REPLACE = 2 << TYPE_SHIFT,
+ TYPE_PRE_PROC = 3 << TYPE_SHIFT,
+ TYPE_POST_PROC = 4 << TYPE_SHIFT,
+
+ // Insert preference
+ INSERT_SHIFT = TYPE_SHIFT + TYPE_SIZE,
+ INSERT_SIZE = 3,
+ INSERT_MASK = ((1 << INSERT_SIZE) -1) << INSERT_SHIFT,
+ INSERT_ANY = 0 << INSERT_SHIFT,
+ INSERT_FIRST = 1 << INSERT_SHIFT,
+ INSERT_LAST = 2 << INSERT_SHIFT,
+ INSERT_EXCLUSIVE = 3 << INSERT_SHIFT,
+
+ // Volume control
+ VOLUME_SHIFT = INSERT_SHIFT + INSERT_SIZE,
+ VOLUME_SIZE = 3,
+ VOLUME_MASK = ((1 << VOLUME_SIZE) -1) << VOLUME_SHIFT,
+ VOLUME_CTRL = 1 << VOLUME_SHIFT,
+ VOLUME_IND = 2 << VOLUME_SHIFT,
+ VOLUME_MONITOR = 3 << VOLUME_SHIFT,
+ VOLUME_NONE = 0 << VOLUME_SHIFT,
+
+ // Device indication
+ DEVICE_SHIFT = VOLUME_SHIFT + VOLUME_SIZE,
+ DEVICE_SIZE = 3,
+ DEVICE_MASK = ((1 << DEVICE_SIZE) -1) << DEVICE_SHIFT,
+ DEVICE_IND = 1 << DEVICE_SHIFT,
+ DEVICE_NONE = 0 << DEVICE_SHIFT,
+
+ // Sample input modes
+ INPUT_SHIFT = DEVICE_SHIFT + DEVICE_SIZE,
+ INPUT_SIZE = 2,
+ INPUT_MASK = ((1 << INPUT_SIZE) -1) << INPUT_SHIFT,
+ INPUT_DIRECT = 1 << INPUT_SHIFT,
+ INPUT_PROVIDER = 2 << INPUT_SHIFT,
+ INPUT_BOTH = 3 << INPUT_SHIFT,
+
+ // Sample output modes
+ OUTPUT_SHIFT = INPUT_SHIFT + INPUT_SIZE,
+ OUTPUT_SIZE = 2,
+ OUTPUT_MASK = ((1 << OUTPUT_SIZE) -1) << OUTPUT_SHIFT,
+ OUTPUT_DIRECT = 1 << OUTPUT_SHIFT,
+ OUTPUT_PROVIDER = 2 << OUTPUT_SHIFT,
+ OUTPUT_BOTH = 3 << OUTPUT_SHIFT,
+
+ // Hardware acceleration mode
+ HW_ACC_SHIFT = OUTPUT_SHIFT + OUTPUT_SIZE,
+ HW_ACC_SIZE = 2,
+ HW_ACC_MASK = ((1 << HW_ACC_SIZE) -1) << HW_ACC_SHIFT,
+ HW_ACC_SIMPLE = 1 << HW_ACC_SHIFT,
+ HW_ACC_TUNNEL = 2 << HW_ACC_SHIFT,
+
+ // Audio mode indication
+ AUDIO_MODE_SHIFT = HW_ACC_SHIFT + HW_ACC_SIZE,
+ AUDIO_MODE_SIZE = 2,
+ AUDIO_MODE_MASK = ((1 << AUDIO_MODE_SIZE) -1) << AUDIO_MODE_SHIFT,
+ AUDIO_MODE_IND = 1 << AUDIO_MODE_SHIFT,
+ AUDIO_MODE_NONE = 0 << AUDIO_MODE_SHIFT,
+
+ // Audio source indication
+ AUDIO_SOURCE_SHIFT = AUDIO_MODE_SHIFT + AUDIO_MODE_SIZE,
+ AUDIO_SOURCE_SIZE = 2,
+ AUDIO_SOURCE_MASK = ((1 << AUDIO_SOURCE_SIZE) -1) << AUDIO_SOURCE_SHIFT,
+ AUDIO_SOURCE_IND = 1 << AUDIO_SOURCE_SHIFT,
+ AUDIO_SOURCE_NONE = 0 << AUDIO_SOURCE_SHIFT,
+
+ // Effect offload indication
+ OFFLOAD_SHIFT = AUDIO_SOURCE_SHIFT + AUDIO_SOURCE_SIZE,
+ OFFLOAD_SIZE = 1,
+ OFFLOAD_MASK = ((1 << OFFLOAD_SIZE) -1) << OFFLOAD_SHIFT,
+ OFFLOAD_SUPPORTED = 1 << OFFLOAD_SHIFT,
+
+ // Effect has no process indication
+ NO_PROCESS_SHIFT = OFFLOAD_SHIFT + OFFLOAD_SIZE,
+ NO_PROCESS_SIZE = 1,
+ NO_PROCESS_MASK = ((1 << NO_PROCESS_SIZE) -1) << NO_PROCESS_SHIFT,
+ NO_PROCESS = 1 << NO_PROCESS_SHIFT
+};
+
+/**
+ * The effect descriptor contains necessary information to facilitate the
+ * enumeration of the effect engines present in a library.
+ */
+struct EffectDescriptor {
+ Uuid type; // UUID of to the OpenSL ES interface implemented
+ // by this effect
+ Uuid uuid; // UUID for this particular implementation
+ bitfield<EffectFlags> flags; // effect engine capabilities/requirements flags
+ uint16_t cpuLoad; // CPU load indication expressed in 0.1 MIPS units
+ // as estimated on an ARM9E core (ARMv5TE) with 0 WS
+ uint16_t memoryUsage; // data memory usage expressed in KB and includes
+ // only dynamically allocated memory
+ uint8_t[64] name; // human readable effect name
+ uint8_t[64] implementor; // human readable effect implementor name
+};
+
+/**
+ * A buffer is a chunk of audio data for processing. Multi-channel audio is
+ * always interleaved. The channel order is from LSB to MSB with regard to the
+ * channel mask definition in audio.h, audio_channel_mask_t, e.g.:
+ * Stereo: L, R; 5.1: FL, FR, FC, LFE, BL, BR.
+ *
+ * The buffer size is expressed in frame count, a frame being composed of
+ * samples for all channels at a given time. Frame size for unspecified format
+ * (AUDIO_FORMAT_OTHER) is 8 bit by definition.
+ */
+struct AudioBuffer {
+ uint64_t id;
+ uint32_t frameCount;
+ memory data;
+};
+
+@export(name="effect_buffer_access_e", value_prefix="EFFECT_BUFFER_")
+enum EffectBufferAccess : int32_t {
+ ACCESS_WRITE,
+ ACCESS_READ,
+ ACCESS_ACCUMULATE
+};
+
+/**
+ * Determines what fields of EffectBufferConfig need to be considered.
+ */
+@export(name="", value_prefix="EFFECT_CONFIG_")
+enum EffectConfigParameters : int32_t {
+ BUFFER = 0x0001, // buffer field
+ SMP_RATE = 0x0002, // samplingRate
+ CHANNELS = 0x0004, // channels
+ FORMAT = 0x0008, // format
+ ACC_MODE = 0x0010, // accessMode
+ // Note that the 2.0 ALL have been moved to an helper function
+};
+
+/**
+ * The buffer config structure specifies the input or output audio format
+ * to be used by the effect engine.
+ */
+struct EffectBufferConfig {
+ AudioBuffer buffer;
+ uint32_t samplingRateHz;
+ bitfield<AudioChannelMask> channels;
+ AudioFormat format;
+ EffectBufferAccess accessMode;
+ bitfield<EffectConfigParameters> mask;
+};
+
+struct EffectConfig {
+ EffectBufferConfig inputCfg;
+ EffectBufferConfig outputCfg;
+};
+
+@export(name="effect_feature_e", value_prefix="EFFECT_FEATURE_")
+enum EffectFeature : int32_t {
+ AUX_CHANNELS, // supports auxiliary channels
+ // (e.g. dual mic noise suppressor)
+ CNT
+};
+
+struct EffectAuxChannelsConfig {
+ bitfield<AudioChannelMask> mainChannels; // channel mask for main channels
+ bitfield<AudioChannelMask> auxChannels; // channel mask for auxiliary channels
+};
+
+struct EffectOffloadParameter {
+ bool isOffload; // true if the playback thread the effect
+ // is attached to is offloaded
+ AudioIoHandle ioHandle; // io handle of the playback thread
+ // the effect is attached to
+};
+
+/**
+ * The message queue flags used to synchronize reads and writes from
+ * the status message queue used by effects.
+ */
+enum MessageQueueFlagBits : uint32_t {
+ DONE_PROCESSING = 1 << 0,
+ REQUEST_PROCESS = 1 << 1,
+ REQUEST_PROCESS_REVERSE = 1 << 2,
+ REQUEST_QUIT = 1 << 3,
+ REQUEST_PROCESS_ALL =
+ REQUEST_PROCESS | REQUEST_PROCESS_REVERSE | REQUEST_QUIT
+};
diff --git a/audio/effect/6.0/xml/Android.bp b/audio/effect/6.0/xml/Android.bp
new file mode 100644
index 0000000..353686b
--- /dev/null
+++ b/audio/effect/6.0/xml/Android.bp
@@ -0,0 +1,6 @@
+
+xsd_config {
+ name: "audio_effects_conf_V6_0",
+ srcs: ["audio_effects_conf.xsd"],
+ package_name: "audio.effects.V6_0",
+}
diff --git a/audio/effect/6.0/xml/api/current.txt b/audio/effect/6.0/xml/api/current.txt
new file mode 100644
index 0000000..2021639
--- /dev/null
+++ b/audio/effect/6.0/xml/api/current.txt
@@ -0,0 +1,132 @@
+// Signature format: 2.0
+package audio.effects.V6_0 {
+
+ public class AudioEffectsConf {
+ ctor public AudioEffectsConf();
+ method public audio.effects.V6_0.EffectsType getEffects();
+ method public audio.effects.V6_0.LibrariesType getLibraries();
+ method public audio.effects.V6_0.AudioEffectsConf.Postprocess getPostprocess();
+ method public audio.effects.V6_0.AudioEffectsConf.Preprocess getPreprocess();
+ method public audio.effects.V6_0.VersionType getVersion();
+ method public void setEffects(audio.effects.V6_0.EffectsType);
+ method public void setLibraries(audio.effects.V6_0.LibrariesType);
+ method public void setPostprocess(audio.effects.V6_0.AudioEffectsConf.Postprocess);
+ method public void setPreprocess(audio.effects.V6_0.AudioEffectsConf.Preprocess);
+ method public void setVersion(audio.effects.V6_0.VersionType);
+ }
+
+ public static class AudioEffectsConf.Postprocess {
+ ctor public AudioEffectsConf.Postprocess();
+ method public java.util.List<audio.effects.V6_0.StreamPostprocessType> getStream();
+ }
+
+ public static class AudioEffectsConf.Preprocess {
+ ctor public AudioEffectsConf.Preprocess();
+ method public java.util.List<audio.effects.V6_0.StreamPreprocessType> getStream();
+ }
+
+ public class EffectImplType {
+ ctor public EffectImplType();
+ method public String getLibrary();
+ method public String getUuid();
+ method public void setLibrary(String);
+ method public void setUuid(String);
+ }
+
+ public class EffectProxyType extends audio.effects.V6_0.EffectType {
+ ctor public EffectProxyType();
+ method public audio.effects.V6_0.EffectImplType getLibhw();
+ method public audio.effects.V6_0.EffectImplType getLibsw();
+ method public void setLibhw(audio.effects.V6_0.EffectImplType);
+ method public void setLibsw(audio.effects.V6_0.EffectImplType);
+ }
+
+ public class EffectType extends audio.effects.V6_0.EffectImplType {
+ ctor public EffectType();
+ method public String getName();
+ method public void setName(String);
+ }
+
+ public class EffectsType {
+ ctor public EffectsType();
+ method public java.util.List<audio.effects.V6_0.EffectProxyType> getEffectProxy_optional();
+ method public java.util.List<audio.effects.V6_0.EffectType> getEffect_optional();
+ }
+
+ public class LibrariesType {
+ ctor public LibrariesType();
+ method public java.util.List<audio.effects.V6_0.LibrariesType.Library> getLibrary();
+ }
+
+ public static class LibrariesType.Library {
+ ctor public LibrariesType.Library();
+ method public String getName();
+ method public String getPath();
+ method public void setName(String);
+ method public void setPath(String);
+ }
+
+ public enum StreamInputType {
+ method public String getRawName();
+ enum_constant public static final audio.effects.V6_0.StreamInputType camcorder;
+ enum_constant public static final audio.effects.V6_0.StreamInputType mic;
+ enum_constant public static final audio.effects.V6_0.StreamInputType unprocessed;
+ enum_constant public static final audio.effects.V6_0.StreamInputType voice_call;
+ enum_constant public static final audio.effects.V6_0.StreamInputType voice_communication;
+ enum_constant public static final audio.effects.V6_0.StreamInputType voice_downlink;
+ enum_constant public static final audio.effects.V6_0.StreamInputType voice_performance;
+ enum_constant public static final audio.effects.V6_0.StreamInputType voice_recognition;
+ enum_constant public static final audio.effects.V6_0.StreamInputType voice_uplink;
+ }
+
+ public enum StreamOutputType {
+ method public String getRawName();
+ enum_constant public static final audio.effects.V6_0.StreamOutputType alarm;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType bluetooth_sco;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType dtmf;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType enforced_audible;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType music;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType notification;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType ring;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType system;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType tts;
+ enum_constant public static final audio.effects.V6_0.StreamOutputType voice_call;
+ }
+
+ public class StreamPostprocessType extends audio.effects.V6_0.StreamProcessingType {
+ ctor public StreamPostprocessType();
+ method public audio.effects.V6_0.StreamOutputType getType();
+ method public void setType(audio.effects.V6_0.StreamOutputType);
+ }
+
+ public class StreamPreprocessType extends audio.effects.V6_0.StreamProcessingType {
+ ctor public StreamPreprocessType();
+ method public audio.effects.V6_0.StreamInputType getType();
+ method public void setType(audio.effects.V6_0.StreamInputType);
+ }
+
+ public class StreamProcessingType {
+ ctor public StreamProcessingType();
+ method public java.util.List<audio.effects.V6_0.StreamProcessingType.Apply> getApply();
+ }
+
+ public static class StreamProcessingType.Apply {
+ ctor public StreamProcessingType.Apply();
+ method public String getEffect();
+ method public void setEffect(String);
+ }
+
+ public enum VersionType {
+ method public String getRawName();
+ enum_constant public static final audio.effects.V6_0.VersionType _2_0;
+ }
+
+ public class XmlParser {
+ ctor public XmlParser();
+ method public static audio.effects.V6_0.AudioEffectsConf read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ }
+
+}
+
diff --git a/audio/effect/6.0/xml/api/last_current.txt b/audio/effect/6.0/xml/api/last_current.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/audio/effect/6.0/xml/api/last_current.txt
diff --git a/audio/effect/6.0/xml/api/last_removed.txt b/audio/effect/6.0/xml/api/last_removed.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/audio/effect/6.0/xml/api/last_removed.txt
diff --git a/audio/effect/6.0/xml/api/removed.txt b/audio/effect/6.0/xml/api/removed.txt
new file mode 100644
index 0000000..d802177
--- /dev/null
+++ b/audio/effect/6.0/xml/api/removed.txt
@@ -0,0 +1 @@
+// Signature format: 2.0
diff --git a/audio/effect/6.0/xml/audio_effects_conf.xsd b/audio/effect/6.0/xml/audio_effects_conf.xsd
new file mode 120000
index 0000000..9d85fa7
--- /dev/null
+++ b/audio/effect/6.0/xml/audio_effects_conf.xsd
@@ -0,0 +1 @@
+../../2.0/xml/audio_effects_conf.xsd
\ No newline at end of file
diff --git a/audio/effect/all-versions/default/Android.bp b/audio/effect/all-versions/default/Android.bp
index 653b30c..d9bb78b 100644
--- a/audio/effect/all-versions/default/Android.bp
+++ b/audio/effect/all-versions/default/Android.bp
@@ -52,7 +52,6 @@
"android.hardware.audio.common@2.0-util",
"android.hardware.audio.effect@2.0",
],
-
cflags: [
"-DMAJOR_VERSION=2",
"-DMINOR_VERSION=0",
@@ -68,7 +67,6 @@
"android.hardware.audio.common@4.0-util",
"android.hardware.audio.effect@4.0",
],
-
cflags: [
"-DMAJOR_VERSION=4",
"-DMINOR_VERSION=0",
@@ -84,10 +82,24 @@
"android.hardware.audio.common@5.0-util",
"android.hardware.audio.effect@5.0",
],
-
cflags: [
"-DMAJOR_VERSION=5",
"-DMINOR_VERSION=0",
"-include common/all-versions/VersionMacro.h",
]
}
+
+cc_library_shared {
+ name: "android.hardware.audio.effect@6.0-impl",
+ defaults: ["android.hardware.audio.effect-impl_default"],
+ shared_libs: [
+ "android.hardware.audio.common@6.0",
+ "android.hardware.audio.common@6.0-util",
+ "android.hardware.audio.effect@6.0",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=6",
+ "-DMINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
diff --git a/audio/effect/all-versions/vts/functional/Android.bp b/audio/effect/all-versions/vts/functional/Android.bp
index cccb5c8..edc9076 100644
--- a/audio/effect/all-versions/vts/functional/Android.bp
+++ b/audio/effect/all-versions/vts/functional/Android.bp
@@ -76,3 +76,16 @@
]
}
+cc_test {
+ name: "VtsHalAudioEffectV6_0TargetTest",
+ defaults: ["VtsHalAudioEffectTargetTest_default"],
+ static_libs: [
+ "android.hardware.audio.common@6.0",
+ "android.hardware.audio.effect@6.0",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=6",
+ "-DMINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 9d68264..ea07411 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -10,6 +10,7 @@
<hal format="hidl" optional="false">
<name>android.hardware.audio</name>
<version>5.0</version>
+ <version>6.0</version>
<interface>
<name>IDevicesFactory</name>
<instance>default</instance>
@@ -18,6 +19,7 @@
<hal format="hidl" optional="false">
<name>android.hardware.audio.effect</name>
<version>5.0</version>
+ <version>6.0</version>
<interface>
<name>IEffectsFactory</name>
<instance>default</instance>