Merge changes from topic "hidl-clear-buf"
* changes:
HIDL HALs mark sensitive data
HIDL: IFoo mark @SensitiveData
diff --git a/audio/7.0/IDevice.hal b/audio/7.0/IDevice.hal
index 7082d6b..e30e545 100644
--- a/audio/7.0/IDevice.hal
+++ b/audio/7.0/IDevice.hal
@@ -117,7 +117,7 @@
AudioIoHandle ioHandle,
DeviceAddress device,
AudioConfig config,
- bitfield<AudioOutputFlag> flags,
+ vec<AudioInOutFlag> flags,
SourceMetadata sourceMetadata) generates (
Result retval,
IStreamOut outStream,
@@ -142,7 +142,7 @@
AudioIoHandle ioHandle,
DeviceAddress device,
AudioConfig config,
- bitfield<AudioInputFlag> flags,
+ vec<AudioInOutFlag> flags,
SinkMetadata sinkMetadata) generates (
Result retval,
IStreamIn inStream,
@@ -315,7 +315,6 @@
* INVALID_STATE if the device was already closed
* or there are streams currently opened.
*/
- @exit
close() generates (Result retval);
/**
diff --git a/audio/7.0/IStream.hal b/audio/7.0/IStream.hal
index dacd3fd..4fe8218 100644
--- a/audio/7.0/IStream.hal
+++ b/audio/7.0/IStream.hal
@@ -44,111 +44,42 @@
getBufferSize() generates (uint64_t bufferSize);
/**
- * Return the sampling rate in Hz.
+ * Return supported audio profiles for this particular stream. This method
+ * is normally called for streams opened on devices that use dynamic
+ * profiles, e.g. HDMI and USB interfaces. Please note that supported
+ * profiles of the stream may differ from the capabilities of the connected
+ * physical device.
*
- * @return sampleRateHz sample rate in Hz.
- */
- getSampleRate() generates (uint32_t sampleRateHz);
-
- /**
- * Return supported native sampling rates of the stream for a given format.
- * A supported native sample rate is a sample rate that can be efficiently
- * played by the hardware (typically without sample-rate conversions).
- *
- * This function is only called for dynamic profile. If called for
- * non-dynamic profile is should return NOT_SUPPORTED or the same list
- * as in audio_policy_configuration.xml.
- *
- * Calling this method is equivalent to getting
- * AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES on the legacy HAL.
- *
- *
- * @param format audio format for which the sample rates are supported.
- * @return retval operation completion status.
- * Must be OK if the format is supported.
- * @return sampleRateHz supported sample rates.
- */
- getSupportedSampleRates(AudioFormat format)
- generates (Result retval, vec<uint32_t> sampleRates);
-
- /**
- * Sets the sampling rate of the stream. Calling this method is equivalent
- * to setting AUDIO_PARAMETER_STREAM_SAMPLING_RATE on the legacy HAL.
- * Optional method. If implemented, only called on a stopped stream.
- *
- * @param sampleRateHz sample rate in Hz.
- * @return retval operation completion status.
- */
- setSampleRate(uint32_t sampleRateHz) generates (Result retval);
-
- /**
- * Return the channel mask of the stream.
- *
- * @return mask channel mask.
- */
- getChannelMask() generates (bitfield<AudioChannelMask> mask);
-
- /**
- * Return supported channel masks of the stream. Calling this method is
- * equivalent to getting AUDIO_PARAMETER_STREAM_SUP_CHANNELS on the legacy
- * HAL.
- *
- * @param format audio format for which the channel masks are supported.
- * @return retval operation completion status.
- * Must be OK if the format is supported.
- * @return masks supported audio masks.
- */
- getSupportedChannelMasks(AudioFormat format)
- generates (Result retval, vec<bitfield<AudioChannelMask>> masks);
-
- /**
- * Sets the channel mask of the stream. Calling this method is equivalent to
- * setting AUDIO_PARAMETER_STREAM_CHANNELS on the legacy HAL.
- * Optional method
- *
- * @param format audio format.
- * @return retval operation completion status.
- */
- setChannelMask(bitfield<AudioChannelMask> mask) generates (Result retval);
-
- /**
- * Return the audio format of the stream.
- *
- * @return format audio format.
- */
- getFormat() generates (AudioFormat format);
-
- /**
- * Return supported audio formats of the stream. Calling this method is
- * equivalent to getting AUDIO_PARAMETER_STREAM_SUP_FORMATS on the legacy
- * HAL.
+ * For devices with fixed configurations, e.g. built-in audio devices, all
+ * the profiles are specified in the audio_policy_configuration.xml
+ * file. For such devices, this method must return the configuration from
+ * the config file, or NOT_SUPPORTED retval.
*
* @return retval operation completion status.
- * @return formats supported audio formats.
+ * @return formats supported audio profiles.
* Must be non empty if retval is OK.
*/
- getSupportedFormats() generates (Result retval, vec<AudioFormat> formats);
+ getSupportedProfiles()
+ generates (Result retval, vec<AudioProfile> profiles);
/**
- * Sets the audio format of the stream. Calling this method is equivalent to
- * setting AUDIO_PARAMETER_STREAM_FORMAT on the legacy HAL.
- * Optional method
+ * Retrieves basic stream configuration: sample rate, audio format,
+ * channel mask.
*
- * @param format audio format.
+ * @return config basic stream configuration.
+ */
+ getAudioProperties() generates (AudioConfigBase config);
+
+ /**
+ * Sets stream parameters. Only sets parameters that are specified.
+ * See the description of AudioConfigBase for the details.
+ *
+ * Optional method. If implemented, only called on a stopped stream.
+ *
+ * @param config basic stream configuration.
* @return retval operation completion status.
*/
- setFormat(AudioFormat format) generates (Result retval);
-
- /**
- * Convenience method for retrieving several stream parameters in
- * one transaction.
- *
- * @return sampleRateHz sample rate in Hz.
- * @return mask channel mask.
- * @return format audio format.
- */
- getAudioProperties() generates (
- uint32_t sampleRateHz, bitfield<AudioChannelMask> mask, AudioFormat format);
+ setAudioProperties(AudioConfigBase config) generates (Result retval);
/**
* Applies audio effect to the stream.
@@ -312,6 +243,5 @@
* output stream interface.
* INVALID_STATE if the stream was already closed.
*/
- @exit
close() generates (Result retval);
};
diff --git a/audio/7.0/IStreamIn.hal b/audio/7.0/IStreamIn.hal
index 15e4363..0a3f24b 100644
--- a/audio/7.0/IStreamIn.hal
+++ b/audio/7.0/IStreamIn.hal
@@ -100,7 +100,7 @@
*
* The driver operates on a dedicated thread. The client must ensure that
* the thread is given an appropriate priority and assigned to correct
- * scheduler and cgroup. For this purpose, the method returns identifiers
+ * scheduler and cgroup. For this purpose, the method returns the identifier
* of the driver thread.
*
* @param frameSize the size of a single frame, in bytes.
@@ -115,7 +115,9 @@
* specified at the stream opening.
* @return statusMQ a message queue used for passing status from the driver
* using ReadStatus structures.
- * @return threadInfo identifiers of the driver's dedicated thread.
+ * @return threadId identifier of the driver's dedicated thread; the caller
+ * may adjust the thread priority to match the priority
+ * of the thread that provides audio data.
*/
prepareForReading(uint32_t frameSize, uint32_t framesCount)
generates (
@@ -123,7 +125,7 @@
fmq_sync<ReadParameters> commandMQ,
fmq_sync<uint8_t> dataMQ,
fmq_sync<ReadStatus> statusMQ,
- ThreadInfo threadInfo);
+ int32_t threadId);
/**
* Return the amount of input frames lost in the audio driver since the last
diff --git a/audio/7.0/IStreamOut.hal b/audio/7.0/IStreamOut.hal
index 208beb6..38d750f 100644
--- a/audio/7.0/IStreamOut.hal
+++ b/audio/7.0/IStreamOut.hal
@@ -95,7 +95,7 @@
*
* The driver operates on a dedicated thread. The client must ensure that
* the thread is given an appropriate priority and assigned to correct
- * scheduler and cgroup. For this purpose, the method returns identifiers
+ * scheduler and cgroup. For this purpose, the method returns the identifier
* of the driver thread.
*
* @param frameSize the size of a single frame, in bytes.
@@ -109,7 +109,9 @@
* specified at the stream opening.
* @return statusMQ a message queue used for passing status from the driver
* using WriteStatus structures.
- * @return threadInfo identifiers of the driver's dedicated thread.
+ * @return threadId identifier of the driver's dedicated thread; the caller
+ * may adjust the thread priority to match the priority
+ * of the thread that provides audio data.
*/
prepareForWriting(uint32_t frameSize, uint32_t framesCount)
generates (
@@ -117,7 +119,7 @@
fmq_sync<WriteCommand> commandMQ,
fmq_sync<uint8_t> dataMQ,
fmq_sync<WriteStatus> statusMQ,
- ThreadInfo threadInfo);
+ int32_t threadId);
/**
* Return the number of audio frames written by the audio DSP to DAC since
diff --git a/audio/7.0/config/api/current.txt b/audio/7.0/config/api/current.txt
index 98c5eac..ac8dc8a 100644
--- a/audio/7.0/config/api/current.txt
+++ b/audio/7.0/config/api/current.txt
@@ -6,6 +6,81 @@
method public java.util.List<java.lang.String> getItem();
}
+ public enum AudioChannelMask {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_10;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_11;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_12;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_13;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_14;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_15;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_16;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_17;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_18;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_19;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_20;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_21;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_22;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_23;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_24;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_3;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_4;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_5;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_6;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_7;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_8;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_INDEX_MASK_9;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_2POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_2POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_3POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_3POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_5POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_6;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_FRONT_BACK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_STEREO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_VOICE_CALL_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_2POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_2POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_2POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_3POINT0POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_3POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1POINT4;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1_BACK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_6POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_7POINT1;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_7POINT1POINT2;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_7POINT1POINT4;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_HAPTIC_AB;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_MONO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_MONO_HAPTIC_A;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_PENTA;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_QUAD;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_QUAD_BACK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_QUAD_SIDE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_STEREO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioChannelMask AUDIO_CHANNEL_OUT_SURROUND;
+ }
+
+ public enum AudioContentType {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_MOVIE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_MUSIC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_SONIFICATION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_SPEECH;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioContentType AUDIO_CONTENT_TYPE_UNKNOWN;
+ }
+
public enum AudioDevice {
method public String getRawName();
enum_constant public static final audio.policy.configuration.V7_0.AudioDevice AUDIO_DEVICE_IN_AMBIENT;
@@ -116,6 +191,7 @@
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX_HD;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_APTX_TWSP;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_CELT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DEFAULT;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DOLBY_TRUEHD;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DSD;
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_DTS;
@@ -152,6 +228,33 @@
enum_constant public static final audio.policy.configuration.V7_0.AudioFormat AUDIO_FORMAT_WMA_PRO;
}
+ public enum AudioInOutFlag {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_DIRECT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_FAST;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_HW_HOTWORD;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_MMAP_NOIRQ;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_RAW;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_SYNC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_INPUT_FLAG_VOIP_TX;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_DIRECT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_DIRECT_PCM;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_FAST;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_NON_BLOCKING;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_PRIMARY;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_RAW;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_SYNC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_TTS;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioInOutFlag AUDIO_OUTPUT_FLAG_VOIP_RX;
+ }
+
public class AudioPolicyConfiguration {
ctor public AudioPolicyConfiguration();
method public audio.policy.configuration.V7_0.GlobalConfiguration getGlobalConfiguration();
@@ -164,18 +267,59 @@
method public void setVersion(audio.policy.configuration.V7_0.Version);
}
+ public enum AudioSource {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_CAMCORDER;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_DEFAULT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_ECHO_REFERENCE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_FM_TUNER;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_HOTWORD;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_MIC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_REMOTE_SUBMIX;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_UNPROCESSED;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_CALL;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_COMMUNICATION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_DOWNLINK;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_PERFORMANCE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_RECOGNITION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioSource AUDIO_SOURCE_VOICE_UPLINK;
+ }
+
+ public enum AudioStreamType {
+ method public String getRawName();
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ACCESSIBILITY;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ALARM;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ASSISTANT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_BLUETOOTH_SCO;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_DTMF;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_ENFORCED_AUDIBLE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_MUSIC;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_NOTIFICATION;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_PATCH;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_REROUTING;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_RING;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_SYSTEM;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_TTS;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioStreamType AUDIO_STREAM_VOICE_CALL;
+ }
+
public enum AudioUsage {
method public String getRawName();
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ALARM;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ANNOUNCEMENT;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANCE_SONIFICATION;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_ASSISTANT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_CALL_ASSISTANT;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_EMERGENCY;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_GAME;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_MEDIA;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_NOTIFICATION;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_SAFETY;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_UNKNOWN;
+ enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VEHICLE_STATUS;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VIRTUAL_SOURCE;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION;
enum_constant public static final audio.policy.configuration.V7_0.AudioUsage AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
@@ -234,7 +378,7 @@
public static class Gains.Gain {
ctor public Gains.Gain();
- method public String getChannel_mask();
+ method public audio.policy.configuration.V7_0.AudioChannelMask getChannel_mask();
method public int getDefaultValueMB();
method public int getMaxRampMs();
method public int getMaxValueMB();
@@ -244,7 +388,7 @@
method public String getName();
method public int getStepValueMB();
method public boolean getUseForVolume();
- method public void setChannel_mask(String);
+ method public void setChannel_mask(audio.policy.configuration.V7_0.AudioChannelMask);
method public void setDefaultValueMB(int);
method public void setMaxRampMs(int);
method public void setMaxValueMB(int);
@@ -279,7 +423,7 @@
public static class MixPorts.MixPort {
ctor public MixPorts.MixPort();
- method public String getFlags();
+ method public java.util.List<audio.policy.configuration.V7_0.AudioInOutFlag> getFlags();
method public audio.policy.configuration.V7_0.Gains getGains();
method public long getMaxActiveCount();
method public long getMaxOpenCount();
@@ -287,7 +431,7 @@
method public java.util.List<audio.policy.configuration.V7_0.AudioUsage> getPreferredUsage();
method public java.util.List<audio.policy.configuration.V7_0.Profile> getProfile();
method public audio.policy.configuration.V7_0.Role getRole();
- method public void setFlags(String);
+ method public void setFlags(java.util.List<audio.policy.configuration.V7_0.AudioInOutFlag>);
method public void setGains(audio.policy.configuration.V7_0.Gains);
method public void setMaxActiveCount(long);
method public void setMaxOpenCount(long);
@@ -327,14 +471,14 @@
public class Profile {
ctor public Profile();
- method public String getChannelMasks();
+ method public java.util.List<audio.policy.configuration.V7_0.AudioChannelMask> getChannelMasks();
method public String getFormat();
method public String getName();
- method public String getSamplingRates();
- method public void setChannelMasks(String);
+ method public java.util.List<java.math.BigInteger> getSamplingRates();
+ method public void setChannelMasks(java.util.List<audio.policy.configuration.V7_0.AudioChannelMask>);
method public void setFormat(String);
method public void setName(String);
- method public void setSamplingRates(String);
+ method public void setSamplingRates(java.util.List<java.math.BigInteger>);
}
public class Reference {
@@ -365,24 +509,6 @@
method public void setType(audio.policy.configuration.V7_0.MixType);
}
- public enum Stream {
- method public String getRawName();
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ACCESSIBILITY;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ALARM;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ASSISTANT;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_BLUETOOTH_SCO;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_DTMF;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_ENFORCED_AUDIBLE;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_MUSIC;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_NOTIFICATION;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_PATCH;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_REROUTING;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_RING;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_SYSTEM;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_TTS;
- enum_constant public static final audio.policy.configuration.V7_0.Stream AUDIO_STREAM_VOICE_CALL;
- }
-
public class SurroundFormats {
ctor public SurroundFormats();
method public java.util.List<audio.policy.configuration.V7_0.SurroundFormats.Format> getFormat();
@@ -412,10 +538,10 @@
method public audio.policy.configuration.V7_0.DeviceCategory getDeviceCategory();
method public java.util.List<java.lang.String> getPoint();
method public String getRef();
- method public audio.policy.configuration.V7_0.Stream getStream();
+ method public audio.policy.configuration.V7_0.AudioStreamType getStream();
method public void setDeviceCategory(audio.policy.configuration.V7_0.DeviceCategory);
method public void setRef(String);
- method public void setStream(audio.policy.configuration.V7_0.Stream);
+ method public void setStream(audio.policy.configuration.V7_0.AudioStreamType);
}
public class Volumes {
diff --git a/audio/7.0/config/audio_policy_configuration.xsd b/audio/7.0/config/audio_policy_configuration.xsd
index 19c6f70..20fe020 100644
--- a/audio/7.0/config/audio_policy_configuration.xsd
+++ b/audio/7.0/config/audio_policy_configuration.xsd
@@ -13,7 +13,6 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-<!-- TODO: define a targetNamespace. Note that it will break retrocompatibility -->
<xs:schema version="2.0"
elementFormDefault="qualified"
attributeFormDefault="unqualified"
@@ -27,7 +26,9 @@
<xs:simpleType name="halVersion">
<xs:annotation>
<xs:documentation xml:lang="en">
- Version of the interface the hal implements.
+ Version of the interface the hal implements. Note that this
+ relates to legacy HAL API versions since HIDL APIs are versioned
+ using other mechanisms.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:decimal">
@@ -154,17 +155,41 @@
<xs:element name="item" type="xs:token" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
- <!-- TODO: separate values by space for better xsd validations. -->
- <xs:simpleType name="audioInOutFlags">
+ <xs:simpleType name="audioInOutFlag">
<xs:annotation>
<xs:documentation xml:lang="en">
- "|" separated list of audio_output_flags_t or audio_input_flags_t.
+ The flags indicate suggested stream attributes supported by the profile.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="|[_A-Z]+(\|[_A-Z]+)*"/>
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_DIRECT" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_PRIMARY" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_FAST" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_DEEP_BUFFER" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_NON_BLOCKING" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_HW_AV_SYNC" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_TTS" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_RAW" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_SYNC" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_DIRECT_PCM" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_MMAP_NOIRQ" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_VOIP_RX" />
+ <xs:enumeration value="AUDIO_OUTPUT_FLAG_INCALL_MUSIC" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_FAST" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_HW_HOTWORD" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_RAW" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_SYNC" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_MMAP_NOIRQ" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_VOIP_TX" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_HW_AV_SYNC" />
+ <xs:enumeration value="AUDIO_INPUT_FLAG_DIRECT" />
</xs:restriction>
</xs:simpleType>
+ <xs:simpleType name="audioInOutFlags">
+ <xs:list itemType="audioInOutFlag" />
+ </xs:simpleType>
<xs:simpleType name="role">
<xs:restriction base="xs:string">
<xs:enumeration value="sink"/>
@@ -212,9 +237,6 @@
</xs:element>
</xs:sequence>
</xs:complexType>
- <!-- Enum values of audio_device_t in audio.h
- TODO: generate from hidl to avoid manual sync.
- TODO: separate source and sink in the xml for better xsd validations. -->
<xs:simpleType name="audioDevice">
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_DEVICE_NONE"/>
@@ -252,7 +274,6 @@
<xs:enumeration value="AUDIO_DEVICE_OUT_DEFAULT"/>
<xs:enumeration value="AUDIO_DEVICE_OUT_STUB"/>
- <!-- Due to the xml format, IN types can not be a separated from OUT types -->
<xs:enumeration value="AUDIO_DEVICE_IN_COMMUNICATION"/>
<xs:enumeration value="AUDIO_DEVICE_IN_AMBIENT"/>
<xs:enumeration value="AUDIO_DEVICE_IN_BUILTIN_MIC"/>
@@ -298,10 +319,9 @@
<xs:simpleType name="extendableAudioDevice">
<xs:union memberTypes="audioDevice vendorExtension"/>
</xs:simpleType>
- <!-- Enum values of audio_format_t in audio.h
- TODO: generate from hidl to avoid manual sync. -->
<xs:simpleType name="audioFormat">
<xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_FORMAT_DEFAULT" />
<xs:enumeration value="AUDIO_FORMAT_PCM_16_BIT" />
<xs:enumeration value="AUDIO_FORMAT_PCM_8_BIT"/>
<xs:enumeration value="AUDIO_FORMAT_PCM_32_BIT"/>
@@ -382,9 +402,14 @@
<xs:simpleType name="extendableAudioFormat">
<xs:union memberTypes="audioFormat vendorExtension"/>
</xs:simpleType>
- <!-- Enum values of audio::common::4_0::AudioUsage
- TODO: generate from HIDL to avoid manual sync. -->
<xs:simpleType name="audioUsage">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Audio usage specifies the intended use case for the sound being played.
+ Please consult frameworks/base/media/java/android/media/AudioAttributes.java
+ for the description of each value.
+ </xs:documentation>
+ </xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_USAGE_UNKNOWN" />
<xs:enumeration value="AUDIO_USAGE_MEDIA" />
@@ -399,34 +424,119 @@
<xs:enumeration value="AUDIO_USAGE_GAME" />
<xs:enumeration value="AUDIO_USAGE_VIRTUAL_SOURCE" />
<xs:enumeration value="AUDIO_USAGE_ASSISTANT" />
+ <xs:enumeration value="AUDIO_USAGE_CALL_ASSISTANT" />
+ <xs:enumeration value="AUDIO_USAGE_EMERGENCY" />
+ <xs:enumeration value="AUDIO_USAGE_SAFETY" />
+ <xs:enumeration value="AUDIO_USAGE_VEHICLE_STATUS" />
+ <xs:enumeration value="AUDIO_USAGE_ANNOUNCEMENT" />
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="audioUsageList">
<xs:list itemType="audioUsage"/>
</xs:simpleType>
- <!-- TODO: Change to a space separated list to xsd enforce correctness. -->
- <xs:simpleType name="samplingRates">
- <xs:restriction base="xs:string">
- <xs:pattern value="[0-9]+(,[0-9]+)*"/>
- </xs:restriction>
- </xs:simpleType>
- <!-- TODO: Change to a space separated list to xsd enforce correctness. -->
- <xs:simpleType name="channelMask">
+ <xs:simpleType name="audioContentType">
<xs:annotation>
<xs:documentation xml:lang="en">
- Comma (",") separated list of channel flags
- from audio_channel_mask_t.
+ Audio content type expresses the general category of the content.
+ Please consult frameworks/base/media/java/android/media/AudioAttributes.java
+ for the description of each value.
</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
- <xs:pattern value="[_A-Z][_A-Z0-9]*(,[_A-Z][_A-Z0-9]*)*"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_UNKNOWN"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_SPEECH"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_MUSIC"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_MOVIE"/>
+ <xs:enumeration value="AUDIO_CONTENT_TYPE_SONIFICATION"/>
</xs:restriction>
</xs:simpleType>
+ <xs:simpleType name="samplingRates">
+ <xs:list itemType="xs:nonNegativeInteger" />
+ </xs:simpleType>
+ <xs:simpleType name="audioChannelMask">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Audio channel mask specifies presence of particular channels.
+ There are two representations:
+ - representation position (traditional discrete channel specification,
+ e.g. "left", "right");
+ - indexed (this is similar to "tracks" in audio mixing, channels
+ are represented using numbers).
+ </xs:documentation>
+ </xs:annotation>
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_STEREO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_2POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_2POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_2POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_3POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_3POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_QUAD"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_QUAD_BACK"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_QUAD_SIDE"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_SURROUND"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_PENTA"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1_BACK"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1_SIDE"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_5POINT1POINT4"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_6POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_7POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_7POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_7POINT1POINT4"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_MONO_HAPTIC_A"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_HAPTIC_AB"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB"/>
+ <xs:enumeration value="AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_STEREO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_6"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_2POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_2POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_3POINT0POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_3POINT1POINT2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_5POINT1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_IN_VOICE_CALL_MONO"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_1"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_2"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_3"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_4"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_5"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_6"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_7"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_8"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_9"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_10"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_11"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_12"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_13"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_14"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_15"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_16"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_17"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_18"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_19"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_20"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_21"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_22"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_23"/>
+ <xs:enumeration value="AUDIO_CHANNEL_INDEX_MASK_24"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:simpleType name="channelMasks">
+ <xs:list itemType="audioChannelMask" />
+ </xs:simpleType>
<xs:complexType name="profile">
<xs:attribute name="name" type="xs:token" use="optional"/>
<xs:attribute name="format" type="extendableAudioFormat" use="optional"/>
<xs:attribute name="samplingRates" type="samplingRates" use="optional"/>
- <xs:attribute name="channelMasks" type="channelMask" use="optional"/>
+ <xs:attribute name="channelMasks" type="channelMasks" use="optional"/>
</xs:complexType>
<xs:simpleType name="gainMode">
<xs:restriction base="xs:string">
@@ -441,7 +551,7 @@
<xs:complexType>
<xs:attribute name="name" type="xs:token" use="required"/>
<xs:attribute name="mode" type="gainMode" use="required"/>
- <xs:attribute name="channel_mask" type="channelMask" use="optional"/>
+ <xs:attribute name="channel_mask" type="audioChannelMask" use="optional"/>
<xs:attribute name="minValueMB" type="xs:int" use="optional"/>
<xs:attribute name="maxValueMB" type="xs:int" use="optional"/>
<xs:attribute name="defaultValueMB" type="xs:int" use="optional"/>
@@ -537,9 +647,14 @@
<xs:pattern value="([0-9]{1,2}|100),-?[0-9]+"/>
</xs:restriction>
</xs:simpleType>
- <!-- Enum values of audio_stream_type_t in audio-base.h
- TODO: generate from hidl to avoid manual sync. -->
- <xs:simpleType name="stream">
+ <xs:simpleType name="audioStreamType">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ Audio stream type describing the intended use case of a stream.
+ Please consult frameworks/base/media/java/android/media/AudioSystem.java
+ for the description of each value.
+ </xs:documentation>
+ </xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="AUDIO_STREAM_VOICE_CALL"/>
<xs:enumeration value="AUDIO_STREAM_SYSTEM"/>
@@ -557,8 +672,32 @@
<xs:enumeration value="AUDIO_STREAM_PATCH"/>
</xs:restriction>
</xs:simpleType>
- <!-- Enum values of device_category from Volume.h.
- TODO: generate from hidl to avoid manual sync. -->
+ <xs:simpleType name="audioSource">
+ <xs:annotation>
+ <xs:documentation xml:lang="en">
+ An audio source defines the intended use case for the sound being recorded.
+ Please consult frameworks/base/media/java/android/media/MediaRecorder.java
+ for the description of each value.
+ </xs:documentation>
+ </xs:annotation>
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="AUDIO_SOURCE_DEFAULT"/>
+ <xs:enumeration value="AUDIO_SOURCE_MIC"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_UPLINK"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_DOWNLINK"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_CALL"/>
+ <xs:enumeration value="AUDIO_SOURCE_CAMCORDER"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_RECOGNITION"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_COMMUNICATION"/>
+ <xs:enumeration value="AUDIO_SOURCE_REMOTE_SUBMIX"/>
+ <xs:enumeration value="AUDIO_SOURCE_UNPROCESSED"/>
+ <xs:enumeration value="AUDIO_SOURCE_VOICE_PERFORMANCE"/>
+ <xs:enumeration value="AUDIO_SOURCE_ECHO_REFERENCE"/>
+ <xs:enumeration value="AUDIO_SOURCE_FM_TUNER"/>
+ <xs:enumeration value="AUDIO_SOURCE_HOTWORD"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <!-- Enum values of device_category from Volume.h. -->
<xs:simpleType name="deviceCategory">
<xs:restriction base="xs:string">
<xs:enumeration value="DEVICE_CATEGORY_HEADSET"/>
@@ -591,7 +730,7 @@
<xs:sequence>
<xs:element name="point" type="volumePoint" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
- <xs:attribute name="stream" type="stream"/>
+ <xs:attribute name="stream" type="audioStreamType"/>
<xs:attribute name="deviceCategory" type="deviceCategory"/>
<xs:attribute name="ref" type="xs:token" use="optional"/>
</xs:complexType>
diff --git a/audio/7.0/config/update_audio_policy_config.sh b/audio/7.0/config/update_audio_policy_config.sh
new file mode 100755
index 0000000..051a0df
--- /dev/null
+++ b/audio/7.0/config/update_audio_policy_config.sh
@@ -0,0 +1,160 @@
+#!/bin/bash
+
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script is used to update audio policy configuration files
+# to comply with the updated audio_policy_configuration.xsd from V7.0.
+#
+# The main difference is the separator used in lists for attributes.
+# Since the XML Schema Definition standard only allows space to be
+# used as a separator (see https://www.w3.org/TR/xmlschema11-2/#list-datatypes)
+# the previous versions used a regular expression to validate lists
+# in attribute values. E.g. the channel masks were validated using
+# the following regexp: [_A-Z][_A-Z0-9]*(,[_A-Z][_A-Z0-9]*)*
+# This has an obvious drawback of missing typos in the config file.
+#
+# The V7.0 has shifted to defining most of the frequently changed
+# types in the XSD schema only. This allows for verifying all the values
+# in lists, but in order to comply with XML Schema requirements
+# list elements must be separated by space.
+#
+# Since the APM config files typically use include directives,
+# the script must be pointed to the main APM config file and will
+# take care all the included files automatically.
+# If the included file is a shared version from 'frameworks/av',
+# instead of updating it the script checks if there is a newer
+# version with the corresponding name suffix (e.g.
+# 'a2dp_audio_policy_configuration_7_0.xml') and updates the include
+# path instead.
+
+set -euo pipefail
+
+if (echo "$@" | grep -qe -h); then
+ echo "This script will update Audio Policy Manager config file"
+ echo "to the format required by V7.0 XSD schema from a previous"
+ echo "version."
+ echo
+ echo "USAGE: $0 [APM_XML_FILE] [OLD_VERSION]"
+ echo " APM_XML_FILE specifies the path to audio_policy_configuration.xml"
+ echo " relative to Android repository root"
+ echo " OLD_VERSION specifies the version of schema currently used"
+ echo
+ echo "Example: $0 device/generic/goldfish/audio/policy/audio_policy_configuration.xml 6.0"
+ exit
+fi
+readonly HAL_DIRECTORY=hardware/interfaces/audio
+readonly SHARED_CONFIGS_DIRECTORY=frameworks/av/services/audiopolicy/config
+readonly OLD_VERSION=${2:-$(ls ${ANDROID_BUILD_TOP}/${HAL_DIRECTORY} | grep -E '[0-9]+\.[0-9]+' |
+ sort -n | tail -n1)}
+readonly NEW_VERSION=7.0
+readonly NEW_VERSION_UNDERSCORE=7_0
+
+readonly SOURCE_CONFIG=${ANDROID_BUILD_TOP}/$1
+
+# First, validate the input using the schema of the current version
+
+echo Validating the source against the $OLD_VERSION schema
+xmllint --noout --xinclude \
+ --nofixup-base-uris --path "$ANDROID_BUILD_TOP/$SHARED_CONFIGS_DIRECTORY" \
+ --schema ${ANDROID_BUILD_TOP}/${HAL_DIRECTORY}/${OLD_VERSION}/config/audio_policy_configuration.xsd \
+ ${SOURCE_CONFIG}
+if [ $? -ne 0 ]; then
+ echo
+ echo "Config file fails validation for the specified version $OLD_VERSION--unsafe to update"
+ exit 1
+fi
+
+# Find all the source files recursively
+
+SOURCE_FILES=${SOURCE_CONFIG}
+SHARED_FILES=
+findIncludes() {
+ local FILES_TO_CHECK=
+ for F in $1; do
+ local FOUND_INCLUDES=$(grep -Po '<xi:include href="\K[^"]+(?="\/>)' ${F})
+ for I in ${FOUND_INCLUDES}; do
+ SOURCE_FULL_PATH=$(dirname ${F})/${I}
+ SHARED_FULL_PATH=${ANDROID_BUILD_TOP}/${SHARED_CONFIGS_DIRECTORY}/${I}
+ if [ -f "$SOURCE_FULL_PATH" ]; then
+ # Device-specific file.
+ SOURCE_FILES+=$'\n'${SOURCE_FULL_PATH}
+ FILES_TO_CHECK+=$'\n'${SOURCE_FULL_PATH}
+ elif [ -f "$SHARED_FULL_PATH" ]; then
+ # Shared file from the frameworks repo.
+ SHARED_FILES+=$'\n'${I}
+ FILES_TO_CHECK+=$'\n'${SHARED_FULL_PATH}
+ else
+ echo
+ echo "Include file not found: $I"
+ exit 1
+ fi
+ done
+ done
+ if [ "$FILES_TO_CHECK" ]; then
+ findIncludes "$FILES_TO_CHECK"
+ fi
+}
+findIncludes ${SOURCE_FILES}
+
+echo "Will update $1 and included device-specific files in place."
+echo "Will update paths to shared included files."
+echo "Press Ctrl-C to cancel, Enter to continue"
+read
+
+updateFile() {
+ FILE=$1
+ ATTR=$2
+ SEPARATOR=$3
+ SRC_LINES=$(grep -nPo "$ATTR=\"[^\"]+\"" ${FILE} || true)
+ for S in $SRC_LINES; do
+ # Prepare instruction for 'sed' for in-place editing of specified line
+ R=$(echo ${S} | sed -e 's/^[0-9]\+:/\//' | sed -e "s/$SEPARATOR/ /g")
+ S=$(echo ${S} | sed -e 's/:/s\//')${R}/
+ echo ${S} | sed -i -f - ${FILE}
+ done
+}
+for F in $SOURCE_FILES; do
+ updateFile ${F} "channelMasks" ","
+ updateFile ${F} "samplingRates" ","
+ updateFile ${F} "flags" "|"
+done;
+
+updateIncludes() {
+ FILE=$1
+ for I in $SHARED_FILES; do
+ NEW_VERSION_I=${I%.*}_${NEW_VERSION_UNDERSCORE}.${I##*.}
+ if [ -e "$ANDROID_BUILD_TOP/$SHARED_CONFIGS_DIRECTORY/$NEW_VERSION_I" ]; then
+ echo "s/$I/$NEW_VERSION_I/g" | sed -i -f - ${FILE}
+ fi
+ done
+}
+for F in $SOURCE_FILES; do
+ updateIncludes ${F}
+done
+
+# Validate the results against the new schema
+
+echo Validating the result against the $NEW_VERSION schema
+xmllint --noout --xinclude \
+ --nofixup-base-uris --path "$ANDROID_BUILD_TOP/$SHARED_CONFIGS_DIRECTORY" \
+ --schema ${ANDROID_BUILD_TOP}/${HAL_DIRECTORY}/${NEW_VERSION}/config/audio_policy_configuration.xsd \
+ ${SOURCE_CONFIG}
+if [ $? -ne 0 ]; then
+ echo
+ echo "Config file fails validation for the specified version $NEW_VERSION--please check the changes"
+ exit 1
+fi
+echo
+echo "Please check the diff and update path to APM shared files in the device makefile!"
diff --git a/audio/7.0/types.hal b/audio/7.0/types.hal
index b0b0843..4a9e289 100644
--- a/audio/7.0/types.hal
+++ b/audio/7.0/types.hal
@@ -355,3 +355,17 @@
*/
TimestretchFallbackMode fallbackMode;
};
+
+/**
+ * The audio flags serve two purposes:
+ *
+ * - when a stream is created they indicate its attributes;
+ *
+ * - when present in a profile descriptor listed for a particular audio
+ * hardware module, they indicate that a stream can be opened that
+ * supports the attributes indicated by the flags.
+ *
+ * See 'audioIoFlag' in audio_policy_configuration.xsd for the
+ * list of allowed values.
+ */
+typedef string AudioInOutFlag;
diff --git a/audio/README b/audio/README
deleted file mode 100644
index afafbe3..0000000
--- a/audio/README
+++ /dev/null
@@ -1,36 +0,0 @@
-Directory structure of the audio HIDL related code.
-
-Run `common/all-versions/copyHAL.sh` to create a new version of the audio HAL
-based on an existing one.
-
-audio
-|-- 2.0 <== core 2.0 HIDL API. .hal can not be moved into the core directory
-| because that would change its namespace and include path
-|-- 4.0 <== Version 4.0 of the core API
-|
-|-- ...
-|
-|-- common <== code common to audio core and effect API
-| |-- 2.0 <== HIDL API of V2
-| |-- 4.0
-| |-- ...
-| `-- all-versions <== code common to all version of both core and effect API
-| |-- default <== implementation shared code between core and effect impl
-| |-- test <== utilities used by tests
-| `-- util <== utilities used by both implementation and tests
-|
-|-- core <== VTS and default implementation of the core API (not HIDL, see /audio/2.0))
-| `-- all-versions <== Code is version independent through #if and separate files
-| |-- default <== code that wraps the legacy API
-| `-- vts <== vts of core API
-| |-- 2.0 <== 2.0 specific tests and helpers
-| |-- 4.0
-| |-- ...
-|
-`-- effect <== idem for the effect API
- |-- 2.0
- |-- 4.0
- |-- ...
- `-- all-versions
- |-- default
- `-- vts
diff --git a/audio/README.md b/audio/README.md
new file mode 100644
index 0000000..b77b9ba
--- /dev/null
+++ b/audio/README.md
@@ -0,0 +1,53 @@
+# Audio HAL
+
+Directory structure of the audio HAL related code.
+
+Run `common/all-versions/copyHAL.sh` to create a new version of the audio HAL
+based on an existing one.
+
+## Directory Structure
+
+* `2.0` -- version 2.0 of the core HIDL API. Note that `.hal` files
+ can not be moved into the `core` directory because that would change
+ its namespace and include path.
+ - `config` -- the XSD schema for the Audio Policy Manager
+ configuration file.
+* `4.0` -- version 4.0 of the core HIDL API.
+* ...
+* `common` -- common types for audio core and effect HIDL API.
+ - `2.0` -- version 2.0 of the common types HIDL API.
+ - `4.0` -- version 4.0.
+ - ...
+ - `7.0` -- version 7.0.
+ - `example` -- example implementation of the core and effect
+ V7.0 API. It represents a "fake" audio HAL that doesn't
+ actually communicate with hardware.
+ - `all-versions` -- code common to all version of both core and effect API.
+ - `default` -- shared code of the default implementation.
+ - `service` -- vendor HAL service for hosting the default
+ implementation.
+ - `test` -- utilities used by tests.
+ - `util` -- utilities used by both implementation and tests.
+* `core` -- VTS tests and the default implementation of the core API
+ (not HIDL API, it's in `audio/N.M`).
+ - `7.0` -- code specific to version V7.0 of the core HIDL API
+ - `all-versions` -- the code is common between all versions,
+ version-specific parts are enclosed into conditional directives
+ of preprocessor or reside in dedicated files.
+ - `default` -- code that wraps the legacy API (from
+ `hardware/libhardware`).
+ - `vts` VTS tests for the core HIDL API.
+* `effect` -- same for the effect HIDL API.
+ - `2.0`
+ - `config` -- the XSD schema for the Audio Effects configuration
+ file.
+ - `4.0`
+ - ...
+ - `all-versions`
+ - `default`
+ - `vts`
+* `policy` -- Configurable Audio Policy schemes.
+ - `1.0` -- note that versions of CAP are not linked to the versions
+ of audio HAL.
+ - `vts` -- VTS tests for validating actual configuration files.
+ - `xml` -- XSD schemas for CAP configuration files.
diff --git a/audio/common/7.0/Android.bp b/audio/common/7.0/Android.bp
index e24871c..1c016b4 100644
--- a/audio/common/7.0/Android.bp
+++ b/audio/common/7.0/Android.bp
@@ -16,9 +16,12 @@
cc_library {
name: "android.hardware.audio.common@7.0-enums",
vendor_available: true,
- generated_sources: ["audio_policy_configuration_V7_0"],
generated_headers: ["audio_policy_configuration_V7_0"],
+ generated_sources: ["audio_policy_configuration_V7_0"],
header_libs: ["libxsdc-utils"],
+ export_generated_headers: ["audio_policy_configuration_V7_0"],
+ export_header_lib_headers: ["libxsdc-utils"],
+ export_include_dirs: ["enums/include"],
shared_libs: [
"libbase",
"liblog",
diff --git a/audio/common/7.0/enums/include/audio_policy_configuration_V7_0-enums.h b/audio/common/7.0/enums/include/audio_policy_configuration_V7_0-enums.h
new file mode 100644
index 0000000..d5fedce
--- /dev/null
+++ b/audio/common/7.0/enums/include/audio_policy_configuration_V7_0-enums.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUDIO_POLICY_CONFIGURATION_V7_0_ENUMS_H
+#define AUDIO_POLICY_CONFIGURATION_V7_0_ENUMS_H
+
+#include <sys/types.h>
+
+#include <audio_policy_configuration_V7_0.h>
+
+namespace audio::policy::configuration::V7_0 {
+
+static inline size_t getChannelCount(AudioChannelMask mask) {
+ switch (mask) {
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_MONO:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_MONO:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_1:
+ return 1;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_STEREO:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_MONO_HAPTIC_A:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_HAPTIC_AB:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_STEREO:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_FRONT_BACK:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_VOICE_CALL_MONO:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_2:
+ return 2;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_2POINT1:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_3:
+ return 3;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_2POINT0POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_QUAD:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_QUAD_BACK:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_QUAD_SIDE:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_SURROUND:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_2POINT0POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_4:
+ return 4;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_2POINT1POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_3POINT0POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_PENTA:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_2POINT1POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_3POINT0POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_5:
+ return 5;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_3POINT1POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_5POINT1:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_5POINT1_BACK:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_5POINT1_SIDE:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_6:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_3POINT1POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_IN_5POINT1:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_6:
+ return 6;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_6POINT1:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_7:
+ return 7;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_5POINT1POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_7POINT1:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_8:
+ return 8;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_9:
+ return 9;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_5POINT1POINT4:
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_7POINT1POINT2:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_10:
+ return 10;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_11:
+ return 11;
+ case AudioChannelMask::AUDIO_CHANNEL_OUT_7POINT1POINT4:
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_12:
+ return 12;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_13:
+ return 13;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_14:
+ return 14;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_15:
+ return 15;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_16:
+ return 16;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_17:
+ return 17;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_18:
+ return 18;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_19:
+ return 19;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_20:
+ return 20;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_21:
+ return 21;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_22:
+ return 22;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_23:
+ return 23;
+ case AudioChannelMask::AUDIO_CHANNEL_INDEX_MASK_24:
+ return 24;
+ case AudioChannelMask::UNKNOWN:
+ return 0;
+ // No default to make sure all cases are covered.
+ }
+ // This is to avoid undefined behavior if 'mask' isn't a valid enum value.
+ return 0;
+}
+
+static inline ssize_t getChannelCount(const std::string& mask) {
+ return getChannelCount(stringToAudioChannelMask(mask));
+}
+
+static inline bool isOutputDevice(AudioDevice device) {
+ switch (device) {
+ case AudioDevice::UNKNOWN:
+ case AudioDevice::AUDIO_DEVICE_NONE:
+ return false;
+ case AudioDevice::AUDIO_DEVICE_OUT_EARPIECE:
+ case AudioDevice::AUDIO_DEVICE_OUT_SPEAKER:
+ case AudioDevice::AUDIO_DEVICE_OUT_WIRED_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+ case AudioDevice::AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
+ case AudioDevice::AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
+ case AudioDevice::AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+ case AudioDevice::AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+ case AudioDevice::AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
+ case AudioDevice::AUDIO_DEVICE_OUT_AUX_DIGITAL:
+ case AudioDevice::AUDIO_DEVICE_OUT_HDMI:
+ case AudioDevice::AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_OUT_USB_ACCESSORY:
+ case AudioDevice::AUDIO_DEVICE_OUT_USB_DEVICE:
+ case AudioDevice::AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
+ case AudioDevice::AUDIO_DEVICE_OUT_TELEPHONY_TX:
+ case AudioDevice::AUDIO_DEVICE_OUT_LINE:
+ case AudioDevice::AUDIO_DEVICE_OUT_HDMI_ARC:
+ case AudioDevice::AUDIO_DEVICE_OUT_SPDIF:
+ case AudioDevice::AUDIO_DEVICE_OUT_FM:
+ case AudioDevice::AUDIO_DEVICE_OUT_AUX_LINE:
+ case AudioDevice::AUDIO_DEVICE_OUT_SPEAKER_SAFE:
+ case AudioDevice::AUDIO_DEVICE_OUT_IP:
+ case AudioDevice::AUDIO_DEVICE_OUT_BUS:
+ case AudioDevice::AUDIO_DEVICE_OUT_PROXY:
+ case AudioDevice::AUDIO_DEVICE_OUT_USB_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_OUT_HEARING_AID:
+ case AudioDevice::AUDIO_DEVICE_OUT_ECHO_CANCELLER:
+ case AudioDevice::AUDIO_DEVICE_OUT_DEFAULT:
+ case AudioDevice::AUDIO_DEVICE_OUT_STUB:
+ return true;
+ case AudioDevice::AUDIO_DEVICE_IN_COMMUNICATION:
+ case AudioDevice::AUDIO_DEVICE_IN_AMBIENT:
+ case AudioDevice::AUDIO_DEVICE_IN_BUILTIN_MIC:
+ case AudioDevice::AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_IN_WIRED_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_IN_AUX_DIGITAL:
+ case AudioDevice::AUDIO_DEVICE_IN_HDMI:
+ case AudioDevice::AUDIO_DEVICE_IN_VOICE_CALL:
+ case AudioDevice::AUDIO_DEVICE_IN_TELEPHONY_RX:
+ case AudioDevice::AUDIO_DEVICE_IN_BACK_MIC:
+ case AudioDevice::AUDIO_DEVICE_IN_REMOTE_SUBMIX:
+ case AudioDevice::AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_IN_USB_ACCESSORY:
+ case AudioDevice::AUDIO_DEVICE_IN_USB_DEVICE:
+ case AudioDevice::AUDIO_DEVICE_IN_FM_TUNER:
+ case AudioDevice::AUDIO_DEVICE_IN_TV_TUNER:
+ case AudioDevice::AUDIO_DEVICE_IN_LINE:
+ case AudioDevice::AUDIO_DEVICE_IN_SPDIF:
+ case AudioDevice::AUDIO_DEVICE_IN_BLUETOOTH_A2DP:
+ case AudioDevice::AUDIO_DEVICE_IN_LOOPBACK:
+ case AudioDevice::AUDIO_DEVICE_IN_IP:
+ case AudioDevice::AUDIO_DEVICE_IN_BUS:
+ case AudioDevice::AUDIO_DEVICE_IN_PROXY:
+ case AudioDevice::AUDIO_DEVICE_IN_USB_HEADSET:
+ case AudioDevice::AUDIO_DEVICE_IN_BLUETOOTH_BLE:
+ case AudioDevice::AUDIO_DEVICE_IN_HDMI_ARC:
+ case AudioDevice::AUDIO_DEVICE_IN_ECHO_REFERENCE:
+ case AudioDevice::AUDIO_DEVICE_IN_DEFAULT:
+ case AudioDevice::AUDIO_DEVICE_IN_STUB:
+ return false;
+ // No default to make sure all cases are covered.
+ }
+ // This is to avoid undefined behavior if 'device' isn't a valid enum value.
+ return false;
+}
+
+static inline bool isOutputDevice(const std::string& device) {
+ return isOutputDevice(stringToAudioDevice(device));
+}
+
+} // namespace audio::policy::configuration::V7_0
+
+#endif // AUDIO_POLICY_CONFIGURATION_V7_0_ENUMS_H
diff --git a/audio/common/7.0/example/Android.bp b/audio/common/7.0/example/Android.bp
new file mode 100644
index 0000000..03c1cd8
--- /dev/null
+++ b/audio/common/7.0/example/Android.bp
@@ -0,0 +1,45 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_binary {
+ name: "android.hardware.audio@7.0-service.example",
+ vendor: true,
+ relative_install_path: "hw",
+ init_rc: ["android.hardware.audio@7.0-service.example.rc"],
+ vintf_fragments: ["android.hardware.audio@7.0-service.example.xml"],
+ srcs: [
+ "DevicesFactory.cpp",
+ "Effect.cpp",
+ "EffectsFactory.cpp",
+ "EqualizerEffect.cpp",
+ "LoudnessEnhancerEffect.cpp",
+ "service.cpp",
+ ],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ shared_libs: [
+ "libcutils",
+ "libhidlbase",
+ "liblog",
+ "libxml2",
+ "libutils",
+ "android.hardware.audio@7.0",
+ "android.hardware.audio.common@7.0",
+ "android.hardware.audio.common@7.0-enums",
+ "android.hardware.audio.effect@7.0",
+ ],
+}
diff --git a/audio/common/7.0/example/DevicesFactory.cpp b/audio/common/7.0/example/DevicesFactory.cpp
new file mode 100644
index 0000000..ddd5fef
--- /dev/null
+++ b/audio/common/7.0/example/DevicesFactory.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactory7.0"
+#include <log/log.h>
+
+#include "DevicesFactory.h"
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+namespace android::hardware::audio::V7_0::implementation {
+
+Return<void> DevicesFactory::openDevice(const hidl_string& device, openDevice_cb _hidl_cb) {
+ (void)device;
+ _hidl_cb(Result::INVALID_ARGUMENTS, nullptr);
+ return Void();
+}
+
+Return<void> DevicesFactory::openPrimaryDevice(openPrimaryDevice_cb _hidl_cb) {
+ _hidl_cb(Result::INVALID_ARGUMENTS, nullptr);
+ return Void();
+}
+
+} // namespace android::hardware::audio::V7_0::implementation
diff --git a/audio/common/7.0/example/DevicesFactory.h b/audio/common/7.0/example/DevicesFactory.h
new file mode 100644
index 0000000..00f665c
--- /dev/null
+++ b/audio/common/7.0/example/DevicesFactory.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/audio/7.0/IDevicesFactory.h>
+
+namespace android::hardware::audio::V7_0::implementation {
+
+class DevicesFactory : public IDevicesFactory {
+ public:
+ DevicesFactory() = default;
+
+ ::android::hardware::Return<void> openDevice(const ::android::hardware::hidl_string& device,
+ openDevice_cb _hidl_cb) override;
+
+ ::android::hardware::Return<void> openPrimaryDevice(openPrimaryDevice_cb _hidl_cb) override;
+};
+
+} // namespace android::hardware::audio::V7_0::implementation
diff --git a/audio/common/7.0/example/Effect.cpp b/audio/common/7.0/example/Effect.cpp
new file mode 100644
index 0000000..423754d
--- /dev/null
+++ b/audio/common/7.0/example/Effect.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectsFactory7.0"
+#include <log/log.h>
+
+#include <audio_policy_configuration_V7_0.h>
+
+#include "Effect.h"
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using namespace ::android::hardware::audio::common::V7_0;
+// Make an alias for enumerations generated from the APM config XSD.
+namespace xsd {
+using namespace ::audio::policy::configuration::V7_0;
+}
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+Return<Result> Effect::init() {
+ return Result::OK;
+}
+
+Return<Result> Effect::setConfig(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) {
+ (void)config;
+ (void)inputBufferProvider;
+ (void)outputBufferProvider;
+ return Result::OK;
+}
+
+Return<Result> Effect::reset() {
+ return Result::OK;
+}
+
+Return<Result> Effect::enable() {
+ if (!mEnabled) {
+ mEnabled = true;
+ return Result::OK;
+ } else {
+ return Result::NOT_SUPPORTED;
+ }
+}
+
+Return<Result> Effect::disable() {
+ if (mEnabled) {
+ mEnabled = false;
+ return Result::OK;
+ } else {
+ return Result::NOT_SUPPORTED;
+ }
+}
+
+Return<Result> Effect::setDevice(const DeviceAddress& device) {
+ (void)device;
+ return Result::OK;
+}
+
+Return<void> Effect::setAndGetVolume(const hidl_vec<uint32_t>& volumes,
+ setAndGetVolume_cb _hidl_cb) {
+ (void)volumes;
+ _hidl_cb(Result::OK, hidl_vec<uint32_t>{});
+ return Void();
+}
+
+Return<Result> Effect::volumeChangeNotification(const hidl_vec<uint32_t>& volumes) {
+ (void)volumes;
+ return Result::OK;
+}
+
+Return<Result> Effect::setAudioMode(AudioMode mode) {
+ (void)mode;
+ return Result::OK;
+}
+
+Return<Result> Effect::setConfigReverse(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) {
+ (void)config;
+ (void)inputBufferProvider;
+ (void)outputBufferProvider;
+ return Result::OK;
+}
+
+Return<Result> Effect::setInputDevice(const DeviceAddress& device) {
+ (void)device;
+ return Result::OK;
+}
+
+Return<void> Effect::getConfig(getConfig_cb _hidl_cb) {
+ const EffectConfig config = {{} /* inputCfg */,
+ // outputCfg
+ {{} /* buffer */,
+ 48000 /* samplingRateHz */,
+ toString(xsd::AudioChannelMask::AUDIO_CHANNEL_OUT_STEREO),
+ toString(xsd::AudioFormat::AUDIO_FORMAT_PCM_16_BIT),
+ EffectBufferAccess::ACCESS_ACCUMULATE,
+ 0 /* mask */}};
+ _hidl_cb(Result::OK, config);
+ return Void();
+}
+
+Return<void> Effect::getConfigReverse(getConfigReverse_cb _hidl_cb) {
+ _hidl_cb(Result::OK, EffectConfig{});
+ return Void();
+}
+
+Return<void> Effect::getSupportedAuxChannelsConfigs(uint32_t maxConfigs,
+ getSupportedAuxChannelsConfigs_cb _hidl_cb) {
+ (void)maxConfigs;
+ _hidl_cb(Result::OK, hidl_vec<EffectAuxChannelsConfig>{});
+ return Void();
+}
+
+Return<void> Effect::getAuxChannelsConfig(getAuxChannelsConfig_cb _hidl_cb) {
+ _hidl_cb(Result::OK, EffectAuxChannelsConfig{});
+ return Void();
+}
+
+Return<Result> Effect::setAuxChannelsConfig(const EffectAuxChannelsConfig& config) {
+ (void)config;
+ return Result::OK;
+}
+
+Return<Result> Effect::setAudioSource(const hidl_string& source) {
+ (void)source;
+ return Result::OK;
+}
+
+Return<Result> Effect::offload(const EffectOffloadParameter& param) {
+ (void)param;
+ return Result::OK;
+}
+
+Return<void> Effect::getDescriptor(getDescriptor_cb _hidl_cb) {
+ _hidl_cb(Result::OK, mDescriptor);
+ return Void();
+}
+
+Return<void> Effect::prepareForProcessing(prepareForProcessing_cb _hidl_cb) {
+ _hidl_cb(Result::OK, MQDescriptor<Result, kSynchronizedReadWrite>{});
+ return Void();
+}
+
+Return<Result> Effect::setProcessBuffers(const AudioBuffer& inBuffer,
+ const AudioBuffer& outBuffer) {
+ (void)inBuffer;
+ (void)outBuffer;
+ return Result::OK;
+}
+
+Return<void> Effect::command(uint32_t commandId, const hidl_vec<uint8_t>& data,
+ uint32_t resultMaxSize, command_cb _hidl_cb) {
+ (void)commandId;
+ (void)data;
+ (void)resultMaxSize;
+ _hidl_cb(-EINVAL, hidl_vec<uint8_t>{});
+ return Void();
+}
+
+Return<Result> Effect::setParameter(const hidl_vec<uint8_t>& parameter,
+ const hidl_vec<uint8_t>& value) {
+ (void)parameter;
+ (void)value;
+ return Result::OK;
+}
+
+Return<void> Effect::getParameter(const hidl_vec<uint8_t>& parameter, uint32_t valueMaxSize,
+ getParameter_cb _hidl_cb) {
+ (void)parameter;
+ (void)valueMaxSize;
+ _hidl_cb(Result::OK, hidl_vec<uint8_t>{});
+ return Void();
+}
+
+Return<void> Effect::getSupportedConfigsForFeature(uint32_t featureId, uint32_t maxConfigs,
+ uint32_t configSize,
+ getSupportedConfigsForFeature_cb _hidl_cb) {
+ (void)featureId;
+ (void)maxConfigs;
+ (void)configSize;
+ _hidl_cb(Result::OK, 0, hidl_vec<uint8_t>{});
+ return Void();
+}
+
+Return<void> Effect::getCurrentConfigForFeature(uint32_t featureId, uint32_t configSize,
+ getCurrentConfigForFeature_cb _hidl_cb) {
+ (void)featureId;
+ (void)configSize;
+ _hidl_cb(Result::OK, hidl_vec<uint8_t>{});
+ return Void();
+}
+
+Return<Result> Effect::setCurrentConfigForFeature(uint32_t featureId,
+ const hidl_vec<uint8_t>& configData) {
+ (void)featureId;
+ (void)configData;
+ return Result::OK;
+}
+
+Return<Result> Effect::close() {
+ return Result::OK;
+}
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/Effect.h b/audio/common/7.0/example/Effect.h
new file mode 100644
index 0000000..fa7f41b
--- /dev/null
+++ b/audio/common/7.0/example/Effect.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/audio/effect/7.0/IEffect.h>
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+class Effect : public IEffect {
+ public:
+ explicit Effect(const EffectDescriptor& descriptor) : mDescriptor(descriptor) {}
+
+ ::android::hardware::Return<Result> init() override;
+ ::android::hardware::Return<Result> setConfig(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) override;
+ ::android::hardware::Return<Result> reset() override;
+ ::android::hardware::Return<Result> enable() override;
+ ::android::hardware::Return<Result> disable() override;
+ ::android::hardware::Return<Result> setDevice(
+ const ::android::hardware::audio::common::V7_0::DeviceAddress& device) override;
+ ::android::hardware::Return<void> setAndGetVolume(
+ const ::android::hardware::hidl_vec<uint32_t>& volumes,
+ setAndGetVolume_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> volumeChangeNotification(
+ const ::android::hardware::hidl_vec<uint32_t>& volumes) override;
+ ::android::hardware::Return<Result> setAudioMode(
+ ::android::hardware::audio::common::V7_0::AudioMode mode) override;
+ ::android::hardware::Return<Result> setConfigReverse(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) override;
+ ::android::hardware::Return<Result> setInputDevice(
+ const ::android::hardware::audio::common::V7_0::DeviceAddress& device) override;
+ ::android::hardware::Return<void> getConfig(getConfig_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getConfigReverse(getConfigReverse_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getSupportedAuxChannelsConfigs(
+ uint32_t maxConfigs, getSupportedAuxChannelsConfigs_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getAuxChannelsConfig(
+ getAuxChannelsConfig_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> setAuxChannelsConfig(
+ const EffectAuxChannelsConfig& config) override;
+ ::android::hardware::Return<Result> setAudioSource(
+ const ::android::hardware::hidl_string& source) override;
+ ::android::hardware::Return<Result> offload(const EffectOffloadParameter& param) override;
+ ::android::hardware::Return<void> getDescriptor(getDescriptor_cb _hidl_cb) override;
+ ::android::hardware::Return<void> prepareForProcessing(
+ prepareForProcessing_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> setProcessBuffers(const AudioBuffer& inBuffer,
+ const AudioBuffer& outBuffer) override;
+ ::android::hardware::Return<void> command(uint32_t commandId,
+ const ::android::hardware::hidl_vec<uint8_t>& data,
+ uint32_t resultMaxSize, command_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> setParameter(
+ const ::android::hardware::hidl_vec<uint8_t>& parameter,
+ const ::android::hardware::hidl_vec<uint8_t>& value) override;
+ ::android::hardware::Return<void> getParameter(
+ const ::android::hardware::hidl_vec<uint8_t>& parameter, uint32_t valueMaxSize,
+ getParameter_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getSupportedConfigsForFeature(
+ uint32_t featureId, uint32_t maxConfigs, uint32_t configSize,
+ getSupportedConfigsForFeature_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getCurrentConfigForFeature(
+ uint32_t featureId, uint32_t configSize,
+ getCurrentConfigForFeature_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> setCurrentConfigForFeature(
+ uint32_t featureId, const ::android::hardware::hidl_vec<uint8_t>& configData) override;
+ ::android::hardware::Return<Result> close() override;
+
+ private:
+ const EffectDescriptor mDescriptor;
+ bool mEnabled = false;
+};
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/EffectsFactory.cpp b/audio/common/7.0/example/EffectsFactory.cpp
new file mode 100644
index 0000000..7d333ae
--- /dev/null
+++ b/audio/common/7.0/example/EffectsFactory.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectsFactory7.0"
+#include <log/log.h>
+
+#include "EffectsFactory.h"
+#include "EqualizerEffect.h"
+#include "LoudnessEnhancerEffect.h"
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using namespace ::android::hardware::audio::common::V7_0;
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+Return<void> EffectsFactory::getAllDescriptors(getAllDescriptors_cb _hidl_cb) {
+ hidl_vec<EffectDescriptor> descriptors;
+ descriptors.resize(2);
+ descriptors[0] = EqualizerEffect::getDescriptor();
+ descriptors[1] = LoudnessEnhancerEffect::getDescriptor();
+ _hidl_cb(Result::OK, descriptors);
+ return Void();
+}
+
+Return<void> EffectsFactory::getDescriptor(const Uuid& uuid, getDescriptor_cb _hidl_cb) {
+ if (auto desc = EqualizerEffect::getDescriptor(); uuid == desc.type || uuid == desc.uuid) {
+ _hidl_cb(Result::OK, desc);
+ } else if (auto desc = LoudnessEnhancerEffect::getDescriptor();
+ uuid == desc.type || uuid == desc.uuid) {
+ _hidl_cb(Result::OK, desc);
+ } else {
+ _hidl_cb(Result::INVALID_ARGUMENTS, EffectDescriptor{});
+ }
+ return Void();
+}
+
+Return<void> EffectsFactory::createEffect(const Uuid& uuid, int32_t session, int32_t ioHandle,
+ int32_t device, createEffect_cb _hidl_cb) {
+ (void)session;
+ (void)ioHandle;
+ (void)device;
+ if (auto desc = EqualizerEffect::getDescriptor(); uuid == desc.type || uuid == desc.uuid) {
+ _hidl_cb(Result::OK, new EqualizerEffect(), 0);
+ } else if (auto desc = LoudnessEnhancerEffect::getDescriptor();
+ uuid == desc.type || uuid == desc.uuid) {
+ _hidl_cb(Result::OK, new LoudnessEnhancerEffect(), 0);
+ } else {
+ _hidl_cb(Result::INVALID_ARGUMENTS, nullptr, 0);
+ }
+ return Void();
+}
+
+Return<void> EffectsFactory::debug(const hidl_handle& fd, const hidl_vec<hidl_string>& options) {
+ (void)fd;
+ (void)options;
+ return Void();
+}
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/EffectsFactory.h b/audio/common/7.0/example/EffectsFactory.h
new file mode 100644
index 0000000..8fec70c
--- /dev/null
+++ b/audio/common/7.0/example/EffectsFactory.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/audio/effect/7.0/IEffectsFactory.h>
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+class EffectsFactory : public IEffectsFactory {
+ public:
+ EffectsFactory() = default;
+
+ ::android::hardware::Return<void> getAllDescriptors(getAllDescriptors_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getDescriptor(
+ const ::android::hardware::audio::common::V7_0::Uuid& uuid,
+ getDescriptor_cb _hidl_cb) override;
+ ::android::hardware::Return<void> createEffect(
+ const ::android::hardware::audio::common::V7_0::Uuid& uuid, int32_t session,
+ int32_t ioHandle, int32_t device, createEffect_cb _hidl_cb) override;
+ ::android::hardware::Return<void>
+ debug(const ::android::hardware::hidl_handle& fd,
+ const ::android::hardware::hidl_vec<::android::hardware::hidl_string>& options) override;
+};
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/EqualizerEffect.cpp b/audio/common/7.0/example/EqualizerEffect.cpp
new file mode 100644
index 0000000..c93c5a9
--- /dev/null
+++ b/audio/common/7.0/example/EqualizerEffect.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits>
+
+#define LOG_TAG "EffectsFactory7.0"
+#include <log/log.h>
+
+#include "EqualizerEffect.h"
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using namespace ::android::hardware::audio::common::V7_0;
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+const EffectDescriptor& EqualizerEffect::getDescriptor() {
+ // Note: for VTS tests only 'type' and 'uuid' fields are required.
+ // The actual implementation must provide meaningful values
+ // for all fields of the descriptor.
+ static const EffectDescriptor descriptor = {
+ .type =
+ {// Same UUID as AudioEffect.EFFECT_TYPE_EQUALIZER in Java.
+ 0x0bed4300, 0xddd6, 0x11db, 0x8f34,
+ std::array<uint8_t, 6>{{0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}},
+ .uuid = {0, 0, 0, 1, std::array<uint8_t, 6>{{0, 0, 0, 0, 0, 0}}}};
+ return descriptor;
+}
+
+EqualizerEffect::EqualizerEffect() : mEffect(new Effect(getDescriptor())) {
+ mProperties.bandLevels.resize(kNumBands);
+}
+
+Return<void> EqualizerEffect::getNumBands(getNumBands_cb _hidl_cb) {
+ _hidl_cb(Result::OK, kNumBands);
+ return Void();
+}
+
+Return<void> EqualizerEffect::getLevelRange(getLevelRange_cb _hidl_cb) {
+ _hidl_cb(Result::OK, std::numeric_limits<int16_t>::min(), std::numeric_limits<int16_t>::max());
+ return Void();
+}
+
+Return<Result> EqualizerEffect::setBandLevel(uint16_t band, int16_t level) {
+ if (band < kNumBands) {
+ mProperties.bandLevels[band] = level;
+ return Result::OK;
+ } else {
+ return Result::INVALID_ARGUMENTS;
+ }
+}
+
+Return<void> EqualizerEffect::getBandLevel(uint16_t band, getBandLevel_cb _hidl_cb) {
+ if (band < kNumBands) {
+ _hidl_cb(Result::OK, mProperties.bandLevels[band]);
+ } else {
+ _hidl_cb(Result::INVALID_ARGUMENTS, 0);
+ }
+ return Void();
+}
+
+Return<void> EqualizerEffect::getBandCenterFrequency(uint16_t band,
+ getBandCenterFrequency_cb _hidl_cb) {
+ (void)band;
+ _hidl_cb(Result::OK, 0);
+ return Void();
+}
+
+Return<void> EqualizerEffect::getBandFrequencyRange(uint16_t band,
+ getBandFrequencyRange_cb _hidl_cb) {
+ (void)band;
+ _hidl_cb(Result::OK, 0, 1);
+ return Void();
+}
+
+Return<void> EqualizerEffect::getBandForFrequency(uint32_t freq, getBandForFrequency_cb _hidl_cb) {
+ (void)freq;
+ _hidl_cb(Result::OK, 0);
+ return Void();
+}
+
+Return<void> EqualizerEffect::getPresetNames(getPresetNames_cb _hidl_cb) {
+ hidl_vec<hidl_string> presetNames;
+ presetNames.resize(kNumPresets);
+ presetNames[0] = "default";
+ _hidl_cb(Result::OK, presetNames);
+ return Void();
+}
+
+Return<Result> EqualizerEffect::setCurrentPreset(uint16_t preset) {
+ if (preset < kNumPresets) {
+ mProperties.curPreset = preset;
+ return Result::OK;
+ } else {
+ return Result::INVALID_ARGUMENTS;
+ }
+}
+
+Return<void> EqualizerEffect::getCurrentPreset(getCurrentPreset_cb _hidl_cb) {
+ _hidl_cb(Result::OK, mProperties.curPreset);
+ return Void();
+}
+
+Return<Result> EqualizerEffect::setAllProperties(
+ const IEqualizerEffect::AllProperties& properties) {
+ mProperties = properties;
+ return Result::OK;
+}
+
+Return<void> EqualizerEffect::getAllProperties(getAllProperties_cb _hidl_cb) {
+ _hidl_cb(Result::OK, mProperties);
+ return Void();
+}
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/EqualizerEffect.h b/audio/common/7.0/example/EqualizerEffect.h
new file mode 100644
index 0000000..11853c3
--- /dev/null
+++ b/audio/common/7.0/example/EqualizerEffect.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/audio/effect/7.0/IEqualizerEffect.h>
+
+#include "Effect.h"
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+class EqualizerEffect : public IEqualizerEffect {
+ public:
+ static const EffectDescriptor& getDescriptor();
+
+ EqualizerEffect();
+
+ // Methods from IEffect interface.
+ ::android::hardware::Return<Result> init() override { return mEffect->init(); }
+ ::android::hardware::Return<Result> setConfig(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) override {
+ return mEffect->setConfig(config, inputBufferProvider, outputBufferProvider);
+ }
+ ::android::hardware::Return<Result> reset() override { return mEffect->reset(); }
+ ::android::hardware::Return<Result> enable() override { return mEffect->enable(); }
+ ::android::hardware::Return<Result> disable() override { return mEffect->disable(); }
+ ::android::hardware::Return<Result> setDevice(
+ const ::android::hardware::audio::common::V7_0::DeviceAddress& device) override {
+ return mEffect->setDevice(device);
+ }
+ ::android::hardware::Return<void> setAndGetVolume(
+ const ::android::hardware::hidl_vec<uint32_t>& volumes,
+ setAndGetVolume_cb _hidl_cb) override {
+ return mEffect->setAndGetVolume(volumes, _hidl_cb);
+ }
+ ::android::hardware::Return<Result> volumeChangeNotification(
+ const ::android::hardware::hidl_vec<uint32_t>& volumes) override {
+ return mEffect->volumeChangeNotification(volumes);
+ }
+ ::android::hardware::Return<Result> setAudioMode(
+ ::android::hardware::audio::common::V7_0::AudioMode mode) override {
+ return mEffect->setAudioMode(mode);
+ }
+ ::android::hardware::Return<Result> setConfigReverse(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) override {
+ return mEffect->setConfigReverse(config, inputBufferProvider, outputBufferProvider);
+ }
+ ::android::hardware::Return<Result> setInputDevice(
+ const ::android::hardware::audio::common::V7_0::DeviceAddress& device) override {
+ return mEffect->setInputDevice(device);
+ }
+ ::android::hardware::Return<void> getConfig(getConfig_cb _hidl_cb) override {
+ return mEffect->getConfig(_hidl_cb);
+ }
+ ::android::hardware::Return<void> getConfigReverse(getConfigReverse_cb _hidl_cb) override {
+ return mEffect->getConfigReverse(_hidl_cb);
+ }
+ ::android::hardware::Return<void> getSupportedAuxChannelsConfigs(
+ uint32_t maxConfigs, getSupportedAuxChannelsConfigs_cb _hidl_cb) override {
+ return mEffect->getSupportedAuxChannelsConfigs(maxConfigs, _hidl_cb);
+ }
+ ::android::hardware::Return<void> getAuxChannelsConfig(
+ getAuxChannelsConfig_cb _hidl_cb) override {
+ return mEffect->getAuxChannelsConfig(_hidl_cb);
+ }
+ ::android::hardware::Return<Result> setAuxChannelsConfig(
+ const EffectAuxChannelsConfig& config) override {
+ return mEffect->setAuxChannelsConfig(config);
+ }
+ ::android::hardware::Return<Result> setAudioSource(
+ const ::android::hardware::hidl_string& source) override {
+ return mEffect->setAudioSource(source);
+ }
+ ::android::hardware::Return<Result> offload(const EffectOffloadParameter& param) override {
+ return mEffect->offload(param);
+ }
+ ::android::hardware::Return<void> getDescriptor(getDescriptor_cb _hidl_cb) override {
+ return mEffect->getDescriptor(_hidl_cb);
+ }
+ ::android::hardware::Return<void> prepareForProcessing(
+ prepareForProcessing_cb _hidl_cb) override {
+ return mEffect->prepareForProcessing(_hidl_cb);
+ }
+ ::android::hardware::Return<Result> setProcessBuffers(const AudioBuffer& inBuffer,
+ const AudioBuffer& outBuffer) override {
+ return mEffect->setProcessBuffers(inBuffer, outBuffer);
+ }
+ ::android::hardware::Return<void> command(uint32_t commandId,
+ const ::android::hardware::hidl_vec<uint8_t>& data,
+ uint32_t resultMaxSize,
+ command_cb _hidl_cb) override {
+ return mEffect->command(commandId, data, resultMaxSize, _hidl_cb);
+ }
+ ::android::hardware::Return<Result> setParameter(
+ const ::android::hardware::hidl_vec<uint8_t>& parameter,
+ const ::android::hardware::hidl_vec<uint8_t>& value) override {
+ return mEffect->setParameter(parameter, value);
+ }
+ ::android::hardware::Return<void> getParameter(
+ const ::android::hardware::hidl_vec<uint8_t>& parameter, uint32_t valueMaxSize,
+ getParameter_cb _hidl_cb) override {
+ return mEffect->getParameter(parameter, valueMaxSize, _hidl_cb);
+ }
+ ::android::hardware::Return<void> getSupportedConfigsForFeature(
+ uint32_t featureId, uint32_t maxConfigs, uint32_t configSize,
+ getSupportedConfigsForFeature_cb _hidl_cb) override {
+ return mEffect->getSupportedConfigsForFeature(featureId, maxConfigs, configSize, _hidl_cb);
+ }
+ ::android::hardware::Return<void> getCurrentConfigForFeature(
+ uint32_t featureId, uint32_t configSize,
+ getCurrentConfigForFeature_cb _hidl_cb) override {
+ return mEffect->getCurrentConfigForFeature(featureId, configSize, _hidl_cb);
+ }
+ ::android::hardware::Return<Result> setCurrentConfigForFeature(
+ uint32_t featureId, const ::android::hardware::hidl_vec<uint8_t>& configData) override {
+ return mEffect->setCurrentConfigForFeature(featureId, configData);
+ }
+ ::android::hardware::Return<Result> close() override { return mEffect->close(); }
+
+ // Methods from IEqualizerEffect interface.
+ ::android::hardware::Return<void> getNumBands(getNumBands_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getLevelRange(getLevelRange_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> setBandLevel(uint16_t band, int16_t level) override;
+ ::android::hardware::Return<void> getBandLevel(uint16_t band,
+ getBandLevel_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getBandCenterFrequency(
+ uint16_t band, getBandCenterFrequency_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getBandFrequencyRange(
+ uint16_t band, getBandFrequencyRange_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getBandForFrequency(uint32_t freq,
+ getBandForFrequency_cb _hidl_cb) override;
+ ::android::hardware::Return<void> getPresetNames(getPresetNames_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> setCurrentPreset(uint16_t preset) override;
+ ::android::hardware::Return<void> getCurrentPreset(getCurrentPreset_cb _hidl_cb) override;
+ ::android::hardware::Return<Result> setAllProperties(
+ const IEqualizerEffect::AllProperties& properties) override;
+ ::android::hardware::Return<void> getAllProperties(getAllProperties_cb _hidl_cb) override;
+
+ private:
+ static constexpr size_t kNumBands = 1;
+ static constexpr size_t kNumPresets = 1;
+ sp<Effect> mEffect;
+ IEqualizerEffect::AllProperties mProperties{};
+};
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/LoudnessEnhancerEffect.cpp b/audio/common/7.0/example/LoudnessEnhancerEffect.cpp
new file mode 100644
index 0000000..38269b3
--- /dev/null
+++ b/audio/common/7.0/example/LoudnessEnhancerEffect.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectsFactory7.0"
+#include <log/log.h>
+
+#include "LoudnessEnhancerEffect.h"
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using namespace ::android::hardware::audio::common::V7_0;
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+const EffectDescriptor& LoudnessEnhancerEffect::getDescriptor() {
+ // Note: for VTS tests only 'type' and 'uuid' fields are required.
+ // The actual implementation must provide meaningful values
+ // for all fields of the descriptor.
+ static const EffectDescriptor descriptor = {
+ .type =
+ {// Same UUID as AudioEffect.EFFECT_TYPE_LOUDNESS_ENHANCER in Java.
+ 0xfe3199be, 0xaed0, 0x413f, 0x87bb,
+ std::array<uint8_t, 6>{{0x11, 0x26, 0x0e, 0xb6, 0x3c, 0xf1}}},
+ .uuid = {0, 0, 0, 2, std::array<uint8_t, 6>{{0, 0, 0, 0, 0, 0}}}};
+ return descriptor;
+} // namespace android::hardware::audio::effect::V7_0::implementation
+
+LoudnessEnhancerEffect::LoudnessEnhancerEffect() : mEffect(new Effect(getDescriptor())) {}
+
+Return<Result> LoudnessEnhancerEffect::setTargetGain(int32_t targetGainMb) {
+ mTargetGainMb = targetGainMb;
+ return Result::OK;
+}
+
+Return<void> LoudnessEnhancerEffect::getTargetGain(getTargetGain_cb _hidl_cb) {
+ _hidl_cb(Result::OK, mTargetGainMb);
+ return Void();
+}
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/LoudnessEnhancerEffect.h b/audio/common/7.0/example/LoudnessEnhancerEffect.h
new file mode 100644
index 0000000..1af0d9f
--- /dev/null
+++ b/audio/common/7.0/example/LoudnessEnhancerEffect.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/audio/effect/7.0/ILoudnessEnhancerEffect.h>
+
+#include "Effect.h"
+
+namespace android::hardware::audio::effect::V7_0::implementation {
+
+class LoudnessEnhancerEffect : public ILoudnessEnhancerEffect {
+ public:
+ static const EffectDescriptor& getDescriptor();
+
+ LoudnessEnhancerEffect();
+
+ // Methods from IEffect interface.
+ ::android::hardware::Return<Result> init() override { return mEffect->init(); }
+ ::android::hardware::Return<Result> setConfig(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) override {
+ return mEffect->setConfig(config, inputBufferProvider, outputBufferProvider);
+ }
+ ::android::hardware::Return<Result> reset() override { return mEffect->reset(); }
+ ::android::hardware::Return<Result> enable() override { return mEffect->enable(); }
+ ::android::hardware::Return<Result> disable() override { return mEffect->disable(); }
+ ::android::hardware::Return<Result> setDevice(
+ const ::android::hardware::audio::common::V7_0::DeviceAddress& device) override {
+ return mEffect->setDevice(device);
+ }
+ ::android::hardware::Return<void> setAndGetVolume(
+ const ::android::hardware::hidl_vec<uint32_t>& volumes,
+ setAndGetVolume_cb _hidl_cb) override {
+ return mEffect->setAndGetVolume(volumes, _hidl_cb);
+ }
+ ::android::hardware::Return<Result> volumeChangeNotification(
+ const ::android::hardware::hidl_vec<uint32_t>& volumes) override {
+ return mEffect->volumeChangeNotification(volumes);
+ }
+ ::android::hardware::Return<Result> setAudioMode(
+ ::android::hardware::audio::common::V7_0::AudioMode mode) override {
+ return mEffect->setAudioMode(mode);
+ }
+ ::android::hardware::Return<Result> setConfigReverse(
+ const EffectConfig& config,
+ const ::android::sp<IEffectBufferProviderCallback>& inputBufferProvider,
+ const ::android::sp<IEffectBufferProviderCallback>& outputBufferProvider) override {
+ return mEffect->setConfigReverse(config, inputBufferProvider, outputBufferProvider);
+ }
+ ::android::hardware::Return<Result> setInputDevice(
+ const ::android::hardware::audio::common::V7_0::DeviceAddress& device) override {
+ return mEffect->setInputDevice(device);
+ }
+ ::android::hardware::Return<void> getConfig(getConfig_cb _hidl_cb) override {
+ return mEffect->getConfig(_hidl_cb);
+ }
+ ::android::hardware::Return<void> getConfigReverse(getConfigReverse_cb _hidl_cb) override {
+ return mEffect->getConfigReverse(_hidl_cb);
+ }
+ ::android::hardware::Return<void> getSupportedAuxChannelsConfigs(
+ uint32_t maxConfigs, getSupportedAuxChannelsConfigs_cb _hidl_cb) override {
+ return mEffect->getSupportedAuxChannelsConfigs(maxConfigs, _hidl_cb);
+ }
+ ::android::hardware::Return<void> getAuxChannelsConfig(
+ getAuxChannelsConfig_cb _hidl_cb) override {
+ return mEffect->getAuxChannelsConfig(_hidl_cb);
+ }
+ ::android::hardware::Return<Result> setAuxChannelsConfig(
+ const EffectAuxChannelsConfig& config) override {
+ return mEffect->setAuxChannelsConfig(config);
+ }
+ ::android::hardware::Return<Result> setAudioSource(
+ const ::android::hardware::hidl_string& source) override {
+ return mEffect->setAudioSource(source);
+ }
+ ::android::hardware::Return<Result> offload(const EffectOffloadParameter& param) override {
+ return mEffect->offload(param);
+ }
+ ::android::hardware::Return<void> getDescriptor(getDescriptor_cb _hidl_cb) override {
+ return mEffect->getDescriptor(_hidl_cb);
+ }
+ ::android::hardware::Return<void> prepareForProcessing(
+ prepareForProcessing_cb _hidl_cb) override {
+ return mEffect->prepareForProcessing(_hidl_cb);
+ }
+ ::android::hardware::Return<Result> setProcessBuffers(const AudioBuffer& inBuffer,
+ const AudioBuffer& outBuffer) override {
+ return mEffect->setProcessBuffers(inBuffer, outBuffer);
+ }
+ ::android::hardware::Return<void> command(uint32_t commandId,
+ const ::android::hardware::hidl_vec<uint8_t>& data,
+ uint32_t resultMaxSize,
+ command_cb _hidl_cb) override {
+ return mEffect->command(commandId, data, resultMaxSize, _hidl_cb);
+ }
+ ::android::hardware::Return<Result> setParameter(
+ const ::android::hardware::hidl_vec<uint8_t>& parameter,
+ const ::android::hardware::hidl_vec<uint8_t>& value) override {
+ return mEffect->setParameter(parameter, value);
+ }
+ ::android::hardware::Return<void> getParameter(
+ const ::android::hardware::hidl_vec<uint8_t>& parameter, uint32_t valueMaxSize,
+ getParameter_cb _hidl_cb) override {
+ return mEffect->getParameter(parameter, valueMaxSize, _hidl_cb);
+ }
+ ::android::hardware::Return<void> getSupportedConfigsForFeature(
+ uint32_t featureId, uint32_t maxConfigs, uint32_t configSize,
+ getSupportedConfigsForFeature_cb _hidl_cb) override {
+ return mEffect->getSupportedConfigsForFeature(featureId, maxConfigs, configSize, _hidl_cb);
+ }
+ ::android::hardware::Return<void> getCurrentConfigForFeature(
+ uint32_t featureId, uint32_t configSize,
+ getCurrentConfigForFeature_cb _hidl_cb) override {
+ return mEffect->getCurrentConfigForFeature(featureId, configSize, _hidl_cb);
+ }
+ ::android::hardware::Return<Result> setCurrentConfigForFeature(
+ uint32_t featureId, const ::android::hardware::hidl_vec<uint8_t>& configData) override {
+ return mEffect->setCurrentConfigForFeature(featureId, configData);
+ }
+ ::android::hardware::Return<Result> close() override { return mEffect->close(); }
+
+ // Methods from ILoudnessEnhancerEffect interface.
+ ::android::hardware::Return<Result> setTargetGain(int32_t targetGainMb) override;
+ ::android::hardware::Return<void> getTargetGain(getTargetGain_cb _hidl_cb) override;
+
+ private:
+ sp<Effect> mEffect;
+ int32_t mTargetGainMb = 0;
+};
+
+} // namespace android::hardware::audio::effect::V7_0::implementation
diff --git a/audio/common/7.0/example/android.hardware.audio@7.0-service.example.rc b/audio/common/7.0/example/android.hardware.audio@7.0-service.example.rc
new file mode 100644
index 0000000..cf8b51f
--- /dev/null
+++ b/audio/common/7.0/example/android.hardware.audio@7.0-service.example.rc
@@ -0,0 +1,7 @@
+service vendor.audio-hal-7-0 /vendor/bin/hw/android.hardware.audio@7.0-service.example
+ class hal
+ user audioserver
+ group audio
+ capabilities BLOCK_SUSPEND
+ ioprio rt 4
+ task_profiles ProcessCapacityHigh HighPerformance
diff --git a/audio/common/7.0/example/android.hardware.audio@7.0-service.example.xml b/audio/common/7.0/example/android.hardware.audio@7.0-service.example.xml
new file mode 100644
index 0000000..b91b061
--- /dev/null
+++ b/audio/common/7.0/example/android.hardware.audio@7.0-service.example.xml
@@ -0,0 +1,20 @@
+<manifest version="1.0" type="device">
+ <hal format="hidl">
+ <name>android.hardware.audio</name>
+ <transport>hwbinder</transport>
+ <version>7.0</version>
+ <interface>
+ <name>IDevicesFactory</name>
+ <instance>example</instance>
+ </interface>
+ </hal>
+ <hal format="hidl">
+ <name>android.hardware.audio.effect</name>
+ <transport>hwbinder</transport>
+ <version>7.0</version>
+ <interface>
+ <name>IEffectsFactory</name>
+ <instance>example</instance>
+ </interface>
+ </hal>
+</manifest>
diff --git a/audio/common/7.0/example/service.cpp b/audio/common/7.0/example/service.cpp
new file mode 100644
index 0000000..641e2c9
--- /dev/null
+++ b/audio/common/7.0/example/service.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.audio@7.0-service.example"
+#include <hidl/HidlTransportSupport.h>
+#include <log/log.h>
+
+#include "DevicesFactory.h"
+#include "EffectsFactory.h"
+
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+using namespace android;
+
+status_t registerDevicesFactoryService() {
+ sp<::android::hardware::audio::V7_0::IDevicesFactory> devicesFactory =
+ new ::android::hardware::audio::V7_0::implementation::DevicesFactory();
+ status_t status = devicesFactory->registerAsService("example");
+ ALOGE_IF(status != OK, "Error registering devices factory as service: %d", status);
+ return status;
+}
+
+status_t registerEffectsFactoryService() {
+ sp<::android::hardware::audio::effect::V7_0::IEffectsFactory> devicesFactory =
+ new ::android::hardware::audio::effect::V7_0::implementation::EffectsFactory();
+ status_t status = devicesFactory->registerAsService("example");
+ ALOGE_IF(status != OK, "Error registering effects factory as service: %d", status);
+ return status;
+}
+
+int main() {
+ configureRpcThreadpool(1, true);
+ status_t status = registerDevicesFactoryService();
+ if (status != OK) {
+ return status;
+ }
+ status = registerEffectsFactoryService();
+ if (status != OK) {
+ return status;
+ }
+ joinRpcThreadpool();
+
+ return 1;
+}
diff --git a/audio/common/7.0/types.hal b/audio/common/7.0/types.hal
index 2288eb1..31c7388 100644
--- a/audio/common/7.0/types.hal
+++ b/audio/common/7.0/types.hal
@@ -18,34 +18,18 @@
import android.hidl.safe_union@1.0;
-/*
- *
- * IDs and Handles
- *
- */
-
/**
- * Handle type for identifying audio sources and sinks.
+ * Handle type for identifying audio resources. Handles are allocated by the framework.
*/
typedef int32_t AudioIoHandle;
/**
- * Audio hw module handle functions or structures referencing a module.
- */
-typedef int32_t AudioModuleHandle;
-
-/**
* Each port has a unique ID or handle allocated by policy manager.
*/
typedef int32_t AudioPortHandle;
/**
- * Each patch is identified by a handle at the interface used to create that
- * patch. For instance, when a patch is created by the audio HAL, the HAL
- * allocates and returns a handle. This handle is unique to a given audio HAL
- * hardware module. But the same patch receives another system wide unique
- * handle allocated by the framework. This unique handle is used for all
- * transactions inside the framework.
+ * Each patch is identified by a handle allocated by the HAL.
*/
typedef int32_t AudioPatchHandle;
@@ -55,17 +39,6 @@
typedef uint32_t AudioHwSync;
/**
- * Each port has a unique ID or handle allocated by policy manager.
- */
-@export(name="")
-enum AudioHandleConsts : int32_t {
- AUDIO_IO_HANDLE_NONE = 0,
- AUDIO_MODULE_HANDLE_NONE = 0,
- AUDIO_PORT_HANDLE_NONE = 0,
- AUDIO_PATCH_HANDLE_NONE = 0,
-};
-
-/**
* Commonly used structure for passing unique identifieds (UUID).
* For the definition of UUID, refer to ITU-T X.667 spec.
*/
@@ -86,116 +59,25 @@
/**
* Audio stream type describing the intended use case of a stream.
+ * See 'audioStreamType' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_stream_type_t", value_prefix="AUDIO_STREAM_")
-enum AudioStreamType : int32_t {
- // These values must kept in sync with
- // frameworks/base/media/java/android/media/AudioSystem.java
- /** Used to identify the default audio stream volume. */
- DEFAULT = -1,
- /** Specifies the minimum value for use in checks and loops. */
- MIN = 0,
- /** Used to identify the volume of audio streams for phone calls. */
- VOICE_CALL = 0,
- /** Used to identify the volume of audio streams for system sounds. */
- SYSTEM = 1,
- /**
- * Used to identify the volume of audio streams for the phone ring
- * and message alerts.
- */
- RING = 2,
- /** Used to identify the volume of audio streams for music playback. */
- MUSIC = 3,
- /** Used to identify the volume of audio streams for alarms. */
- ALARM = 4,
- /** Used to identify the volume of audio streams for notifications. */
- NOTIFICATION = 5,
- /**
- * Used to identify the volume of audio streams for phone calls
- * when connected on bluetooth.
- */
- BLUETOOTH_SCO = 6,
- /**
- * Used to identify the volume of audio streams for enforced system
- * sounds in certain countries (e.g camera in Japan). */
- ENFORCED_AUDIBLE = 7,
- /** Used to identify the volume of audio streams for DTMF tones. */
- DTMF = 8,
- /**
- * Used to identify the volume of audio streams exclusively transmitted
- * through the speaker (TTS) of the device.
- */
- TTS = 9,
- /**
- * Used to identify the volume of audio streams for accessibility prompts.
- */
- ACCESSIBILITY = 10,
- /** Used to identify the volume of audio streams for virtual assistant. */
- ASSISTANT = 11,
-};
+typedef string AudioStreamType;
-@export(name="audio_source_t", value_prefix="AUDIO_SOURCE_")
-enum AudioSource : int32_t {
- // These values must kept in sync with
- // frameworks/base/media/java/android/media/MediaRecorder.java,
- // system/media/audio_effects/include/audio_effects/audio_effects_conf.h
- /** Default audio source. */
- DEFAULT = 0,
- /** Microphone audio source. */
- MIC = 1,
- /** Voice call uplink (Tx) audio source. */
- VOICE_UPLINK = 2,
- /** Voice call downlink (Rx) audio source. */
- VOICE_DOWNLINK = 3,
- /** Voice call uplink + downlink audio source. */
- VOICE_CALL = 4,
- /**
- * Microphone audio source tuned for video recording, with the same
- * orientation as the camera if available.
- */
- CAMCORDER = 5,
- /** Microphone audio source tuned for voice recognition. */
- VOICE_RECOGNITION = 6,
- /**
- * Microphone audio source tuned for voice communications such as VoIP. It
- * will for instance take advantage of echo cancellation or automatic gain
- * control if available.
- */
- VOICE_COMMUNICATION = 7,
- /**
- * Source for the mix to be presented remotely. An example of remote
- * presentation is Wifi Display where a dongle attached to a TV can be used
- * to play the mix captured by this audio source.
- */
- REMOTE_SUBMIX = 8,
- /**
- * Source for unprocessed sound. Usage examples include level measurement
- * and raw signal analysis.
- */
- UNPROCESSED = 9,
- /**
- * Source for capturing audio meant to be processed in real time and played back for live
- * performance (e.g karaoke). The capture path will minimize latency and coupling with
- * playback path.
- */
- VOICE_PERFORMANCE = 10,
- /**
- * Source for an echo canceller to capture the reference signal to be cancelled.
- * The echo reference signal will be captured as close as possible to the DAC in order
- * to include all post processing applied to the playback path.
- */
- ECHO_REFERENCE = 1997,
- /** Virtual source for the built-in FM tuner. */
- FM_TUNER = 1998,
- /** Virtual source for the last captured hotword. */
- HOTWORD = 1999,
-};
-
-typedef int32_t AudioSession;
/**
- * Special audio session values.
+ * An audio source defines the intended use case for the sound being recorded.
+ * See 'audioSource' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_session_t", value_prefix="AUDIO_SESSION_")
+typedef string AudioSource;
+
+/**
+ * An audio session identifier is used to designate the particular
+ * playback or recording session (e.g. playback performed by a certain
+ * application).
+ */
+typedef int32_t AudioSession;
+
enum AudioSessionConsts : int32_t {
/**
* Session for effects attached to a particular sink or source audio device
@@ -213,382 +95,45 @@
* (value must be 0)
*/
OUTPUT_MIX = 0,
- /**
- * Application does not specify an explicit session ID to be used, and
- * requests a new session ID to be allocated. Corresponds to
- * AudioManager.AUDIO_SESSION_ID_GENERATE and
- * AudioSystem.AUDIO_SESSION_ALLOCATE.
- */
- ALLOCATE = 0,
- /**
- * For use with AudioRecord::start(), this indicates no trigger session.
- * It is also used with output tracks and patch tracks, which never have a
- * session.
- */
- NONE = 0
};
/**
- * Audio format is a 32-bit word that consists of:
- * main format field (upper 8 bits)
- * sub format field (lower 24 bits).
- *
- * The main format indicates the main codec type. The sub format field indicates
- * options and parameters for each format. The sub format is mainly used for
- * record to indicate for instance the requested bitrate or profile. It can
- * also be used for certain formats to give informations not present in the
- * encoded audio stream (e.g. octet alignement for AMR).
+ * Audio format indicates audio codec type.
+ * See 'audioFormat' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_format_t", value_prefix="AUDIO_FORMAT_")
-enum AudioFormat : uint32_t {
- INVALID = 0xFFFFFFFFUL,
- DEFAULT = 0,
- PCM = 0x00000000UL,
- MP3 = 0x01000000UL,
- AMR_NB = 0x02000000UL,
- AMR_WB = 0x03000000UL,
- AAC = 0x04000000UL,
- /** Deprecated, Use AAC_HE_V1 */
- HE_AAC_V1 = 0x05000000UL,
- /** Deprecated, Use AAC_HE_V2 */
- HE_AAC_V2 = 0x06000000UL,
- VORBIS = 0x07000000UL,
- OPUS = 0x08000000UL,
- AC3 = 0x09000000UL,
- E_AC3 = 0x0A000000UL,
- DTS = 0x0B000000UL,
- DTS_HD = 0x0C000000UL,
- /** IEC61937 is encoded audio wrapped in 16-bit PCM. */
- IEC61937 = 0x0D000000UL,
- DOLBY_TRUEHD = 0x0E000000UL,
- EVRC = 0x10000000UL,
- EVRCB = 0x11000000UL,
- EVRCWB = 0x12000000UL,
- EVRCNW = 0x13000000UL,
- AAC_ADIF = 0x14000000UL,
- WMA = 0x15000000UL,
- WMA_PRO = 0x16000000UL,
- AMR_WB_PLUS = 0x17000000UL,
- MP2 = 0x18000000UL,
- QCELP = 0x19000000UL,
- DSD = 0x1A000000UL,
- FLAC = 0x1B000000UL,
- ALAC = 0x1C000000UL,
- APE = 0x1D000000UL,
- AAC_ADTS = 0x1E000000UL,
- SBC = 0x1F000000UL,
- APTX = 0x20000000UL,
- APTX_HD = 0x21000000UL,
- AC4 = 0x22000000UL,
- LDAC = 0x23000000UL,
- /** Dolby Metadata-enhanced Audio Transmission */
- MAT = 0x24000000UL,
- AAC_LATM = 0x25000000UL,
- CELT = 0x26000000UL,
- APTX_ADAPTIVE = 0x27000000UL,
- LHDC = 0x28000000UL,
- LHDC_LL = 0x29000000UL,
- APTX_TWSP = 0x2A000000UL,
+typedef string AudioFormat;
- /** Deprecated */
- MAIN_MASK = 0xFF000000UL,
- SUB_MASK = 0x00FFFFFFUL,
+/**
+ * Audio channel mask indicates presence of particular channels.
+ * See 'audioChannelMask' in audio_policy_configuration.xsd for the
+ * list of allowed values.
+ */
+typedef string AudioChannelMask;
- /* Subformats */
- PCM_SUB_16_BIT = 0x1, // PCM signed 16 bits
- PCM_SUB_8_BIT = 0x2, // PCM unsigned 8 bits
- PCM_SUB_32_BIT = 0x3, // PCM signed .31 fixed point
- PCM_SUB_8_24_BIT = 0x4, // PCM signed 8.23 fixed point
- PCM_SUB_FLOAT = 0x5, // PCM single-precision float pt
- PCM_SUB_24_BIT_PACKED = 0x6, // PCM signed .23 fix pt (3 bytes)
-
- MP3_SUB_NONE = 0x0,
-
- AMR_SUB_NONE = 0x0,
-
- AAC_SUB_MAIN = 0x1,
- AAC_SUB_LC = 0x2,
- AAC_SUB_SSR = 0x4,
- AAC_SUB_LTP = 0x8,
- AAC_SUB_HE_V1 = 0x10,
- AAC_SUB_SCALABLE = 0x20,
- AAC_SUB_ERLC = 0x40,
- AAC_SUB_LD = 0x80,
- AAC_SUB_HE_V2 = 0x100,
- AAC_SUB_ELD = 0x200,
- AAC_SUB_XHE = 0x300,
-
- VORBIS_SUB_NONE = 0x0,
-
- E_AC3_SUB_JOC = 0x1,
-
- MAT_SUB_1_0 = 0x1,
- MAT_SUB_2_0 = 0x2,
- MAT_SUB_2_1 = 0x3,
-
- /* Aliases */
- /** note != AudioFormat.ENCODING_PCM_16BIT */
- PCM_16_BIT = (PCM | PCM_SUB_16_BIT),
- /** note != AudioFormat.ENCODING_PCM_8BIT */
- PCM_8_BIT = (PCM | PCM_SUB_8_BIT),
- PCM_32_BIT = (PCM | PCM_SUB_32_BIT),
- PCM_8_24_BIT = (PCM | PCM_SUB_8_24_BIT),
- PCM_FLOAT = (PCM | PCM_SUB_FLOAT),
- PCM_24_BIT_PACKED = (PCM | PCM_SUB_24_BIT_PACKED),
- AAC_MAIN = (AAC | AAC_SUB_MAIN),
- AAC_LC = (AAC | AAC_SUB_LC),
- AAC_SSR = (AAC | AAC_SUB_SSR),
- AAC_LTP = (AAC | AAC_SUB_LTP),
- AAC_HE_V1 = (AAC | AAC_SUB_HE_V1),
- AAC_SCALABLE = (AAC | AAC_SUB_SCALABLE),
- AAC_ERLC = (AAC | AAC_SUB_ERLC),
- AAC_LD = (AAC | AAC_SUB_LD),
- AAC_HE_V2 = (AAC | AAC_SUB_HE_V2),
- AAC_ELD = (AAC | AAC_SUB_ELD),
- AAC_XHE = (AAC | AAC_SUB_XHE),
- AAC_ADTS_MAIN = (AAC_ADTS | AAC_SUB_MAIN),
- AAC_ADTS_LC = (AAC_ADTS | AAC_SUB_LC),
- AAC_ADTS_SSR = (AAC_ADTS | AAC_SUB_SSR),
- AAC_ADTS_LTP = (AAC_ADTS | AAC_SUB_LTP),
- AAC_ADTS_HE_V1 = (AAC_ADTS | AAC_SUB_HE_V1),
- AAC_ADTS_SCALABLE = (AAC_ADTS | AAC_SUB_SCALABLE),
- AAC_ADTS_ERLC = (AAC_ADTS | AAC_SUB_ERLC),
- AAC_ADTS_LD = (AAC_ADTS | AAC_SUB_LD),
- AAC_ADTS_HE_V2 = (AAC_ADTS | AAC_SUB_HE_V2),
- AAC_ADTS_ELD = (AAC_ADTS | AAC_SUB_ELD),
- AAC_ADTS_XHE = (AAC_ADTS | AAC_SUB_XHE),
- E_AC3_JOC = (E_AC3 | E_AC3_SUB_JOC),
- MAT_1_0 = (MAT | MAT_SUB_1_0),
- MAT_2_0 = (MAT | MAT_SUB_2_0),
- MAT_2_1 = (MAT | MAT_SUB_2_1),
- AAC_LATM_LC = (AAC_LATM | AAC_SUB_LC),
- AAC_LATM_HE_V1 = (AAC_LATM | AAC_SUB_HE_V1),
- AAC_LATM_HE_V2 = (AAC_LATM | AAC_SUB_HE_V2),
+/**
+ * Base configuration attributes applicable to any stream of audio.
+ */
+struct AudioConfigBase {
+ AudioFormat format; // 'DEFAULT' means 'unspecified'
+ uint32_t sampleRateHz; // 0 means 'unspecified'
+ vec<AudioChannelMask> channelMask; // empty means 'unspecified'
};
/**
- * Usage of these values highlights places in the code that use 2- or 8- channel
- * assumptions.
+ * Configurations supported for a certain audio format.
*/
-@export(name="")
-enum FixedChannelCount : int32_t {
- FCC_2 = 2, // This is typically due to legacy implementation of stereo I/O
- FCC_8 = 8 // This is typically due to audio mixer and resampler limitations
-};
-
-/**
- * A channel mask per se only defines the presence or absence of a channel, not
- * the order.
- *
- * The channel order convention is that channels are interleaved in order from
- * least significant channel mask bit to most significant channel mask bit,
- * with unused bits skipped. For example for stereo, LEFT would be first,
- * followed by RIGHT.
- * Any exceptions to this convention are noted at the appropriate API.
- *
- * AudioChannelMask is an opaque type and its internal layout should not be
- * assumed as it may change in the future. Instead, always use functions
- * to examine it.
- *
- * These are the current representations:
- *
- * REPRESENTATION_POSITION
- * is a channel mask representation for position assignment. Each low-order
- * bit corresponds to the spatial position of a transducer (output), or
- * interpretation of channel (input). The user of a channel mask needs to
- * know the context of whether it is for output or input. The constants
- * OUT_* or IN_* apply to the bits portion. It is not permitted for no bits
- * to be set.
- *
- * REPRESENTATION_INDEX
- * is a channel mask representation for index assignment. Each low-order
- * bit corresponds to a selected channel. There is no platform
- * interpretation of the various bits. There is no concept of output or
- * input. It is not permitted for no bits to be set.
- *
- * All other representations are reserved for future use.
- *
- * Warning: current representation distinguishes between input and output, but
- * this will not the be case in future revisions of the platform. Wherever there
- * is an ambiguity between input and output that is currently resolved by
- * checking the channel mask, the implementer should look for ways to fix it
- * with additional information outside of the mask.
- */
-@export(name="", value_prefix="AUDIO_CHANNEL_")
-enum AudioChannelMask : uint32_t {
- /** must be 0 for compatibility */
- REPRESENTATION_POSITION = 0,
- /** 1 is reserved for future use */
- REPRESENTATION_INDEX = 2,
- /* 3 is reserved for future use */
-
- /** These can be a complete value of AudioChannelMask */
- NONE = 0x0,
- INVALID = 0xC0000000,
-
- /*
- * These can be the bits portion of an AudioChannelMask
- * with representation REPRESENTATION_POSITION.
- */
-
- /** output channels */
- OUT_FRONT_LEFT = 0x1,
- OUT_FRONT_RIGHT = 0x2,
- OUT_FRONT_CENTER = 0x4,
- OUT_LOW_FREQUENCY = 0x8,
- OUT_BACK_LEFT = 0x10,
- OUT_BACK_RIGHT = 0x20,
- OUT_FRONT_LEFT_OF_CENTER = 0x40,
- OUT_FRONT_RIGHT_OF_CENTER = 0x80,
- OUT_BACK_CENTER = 0x100,
- OUT_SIDE_LEFT = 0x200,
- OUT_SIDE_RIGHT = 0x400,
- OUT_TOP_CENTER = 0x800,
- OUT_TOP_FRONT_LEFT = 0x1000,
- OUT_TOP_FRONT_CENTER = 0x2000,
- OUT_TOP_FRONT_RIGHT = 0x4000,
- OUT_TOP_BACK_LEFT = 0x8000,
- OUT_TOP_BACK_CENTER = 0x10000,
- OUT_TOP_BACK_RIGHT = 0x20000,
- OUT_TOP_SIDE_LEFT = 0x40000,
- OUT_TOP_SIDE_RIGHT = 0x80000,
-
+struct AudioProfile {
+ AudioFormat format;
+ /** List of the sample rates (in Hz) supported by the profile. */
+ vec<uint32_t> sampleRates;
/**
- * Haptic channel characteristics are specific to a device and
- * only used to play device specific resources (eg: ringtones).
- * The HAL can freely map A and B to haptic controllers, the
- * framework shall not interpret those values and forward them
- * from the device audio assets.
+ * List of channel masks supported by the profile. Every subvector might be
+ * comprised of several individual channel mask entries for non-traditional
+ * channel masks, e.g. a combination "OUT_FRONT_LEFT,OUT_FRONT_CENTER" which
+ * doesn't have a corresponding predefined channel mask.
*/
- OUT_HAPTIC_A = 0x20000000,
- OUT_HAPTIC_B = 0x10000000,
-
- OUT_MONO = OUT_FRONT_LEFT,
- OUT_STEREO = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT),
- OUT_2POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_LOW_FREQUENCY),
- OUT_2POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_2POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
- OUT_LOW_FREQUENCY),
- OUT_3POINT0POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_3POINT1POINT2 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER |
- OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT |
- OUT_LOW_FREQUENCY),
- OUT_QUAD = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_BACK_LEFT | OUT_BACK_RIGHT),
- OUT_QUAD_BACK = OUT_QUAD,
- /** like OUT_QUAD_BACK with *_SIDE_* instead of *_BACK_* */
- OUT_QUAD_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
- OUT_SURROUND = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_BACK_CENTER),
- OUT_PENTA = (OUT_QUAD | OUT_FRONT_CENTER),
- OUT_5POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_BACK_LEFT | OUT_BACK_RIGHT),
- OUT_5POINT1_BACK = OUT_5POINT1,
- /** like OUT_5POINT1_BACK with *_SIDE_* instead of *_BACK_* */
- OUT_5POINT1_SIDE = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
- OUT_5POINT1POINT2 = (OUT_5POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_5POINT1POINT4 = (OUT_5POINT1 |
- OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
- OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
- OUT_6POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_BACK_LEFT | OUT_BACK_RIGHT |
- OUT_BACK_CENTER),
- /** matches the correct AudioFormat.CHANNEL_OUT_7POINT1_SURROUND */
- OUT_7POINT1 = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_FRONT_CENTER | OUT_LOW_FREQUENCY |
- OUT_BACK_LEFT | OUT_BACK_RIGHT |
- OUT_SIDE_LEFT | OUT_SIDE_RIGHT),
- OUT_7POINT1POINT2 = (OUT_7POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT),
- OUT_7POINT1POINT4 = (OUT_7POINT1 |
- OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT |
- OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT),
- OUT_MONO_HAPTIC_A = (OUT_FRONT_LEFT | OUT_HAPTIC_A),
- OUT_STEREO_HAPTIC_A = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_HAPTIC_A),
- OUT_HAPTIC_AB = (OUT_HAPTIC_A | OUT_HAPTIC_B),
- OUT_MONO_HAPTIC_AB = (OUT_FRONT_LEFT | OUT_HAPTIC_A | OUT_HAPTIC_B),
- OUT_STEREO_HAPTIC_AB = (OUT_FRONT_LEFT | OUT_FRONT_RIGHT |
- OUT_HAPTIC_A | OUT_HAPTIC_B),
- // Note that the 2.0 OUT_ALL* have been moved to helper functions
-
- /* These are bits only, not complete values */
-
- /** input channels */
- IN_LEFT = 0x4,
- IN_RIGHT = 0x8,
- IN_FRONT = 0x10,
- IN_BACK = 0x20,
- IN_LEFT_PROCESSED = 0x40,
- IN_RIGHT_PROCESSED = 0x80,
- IN_FRONT_PROCESSED = 0x100,
- IN_BACK_PROCESSED = 0x200,
- IN_PRESSURE = 0x400,
- IN_X_AXIS = 0x800,
- IN_Y_AXIS = 0x1000,
- IN_Z_AXIS = 0x2000,
- IN_BACK_LEFT = 0x10000,
- IN_BACK_RIGHT = 0x20000,
- IN_CENTER = 0x40000,
- IN_LOW_FREQUENCY = 0x100000,
- IN_TOP_LEFT = 0x200000,
- IN_TOP_RIGHT = 0x400000,
-
- IN_VOICE_UPLINK = 0x4000,
- IN_VOICE_DNLINK = 0x8000,
-
- IN_MONO = IN_FRONT,
- IN_STEREO = (IN_LEFT | IN_RIGHT),
- IN_FRONT_BACK = (IN_FRONT | IN_BACK),
- IN_6 = (IN_LEFT | IN_RIGHT |
- IN_FRONT | IN_BACK |
- IN_LEFT_PROCESSED | IN_RIGHT_PROCESSED),
- IN_2POINT0POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
- IN_2POINT1POINT2 = (IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT |
- IN_LOW_FREQUENCY),
- IN_3POINT0POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT),
- IN_3POINT1POINT2 = (IN_LEFT | IN_CENTER | IN_RIGHT |
- IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY),
- IN_5POINT1 = (IN_LEFT | IN_CENTER | IN_RIGHT |
- IN_BACK_LEFT | IN_BACK_RIGHT | IN_LOW_FREQUENCY),
- IN_VOICE_UPLINK_MONO = (IN_VOICE_UPLINK | IN_MONO),
- IN_VOICE_DNLINK_MONO = (IN_VOICE_DNLINK | IN_MONO),
- IN_VOICE_CALL_MONO = (IN_VOICE_UPLINK_MONO |
- IN_VOICE_DNLINK_MONO),
- // Note that the 2.0 IN_ALL* have been moved to helper functions
-
- COUNT_MAX = 30,
- INDEX_HDR = REPRESENTATION_INDEX << COUNT_MAX,
- INDEX_MASK_1 = INDEX_HDR | ((1 << 1) - 1),
- INDEX_MASK_2 = INDEX_HDR | ((1 << 2) - 1),
- INDEX_MASK_3 = INDEX_HDR | ((1 << 3) - 1),
- INDEX_MASK_4 = INDEX_HDR | ((1 << 4) - 1),
- INDEX_MASK_5 = INDEX_HDR | ((1 << 5) - 1),
- INDEX_MASK_6 = INDEX_HDR | ((1 << 6) - 1),
- INDEX_MASK_7 = INDEX_HDR | ((1 << 7) - 1),
- INDEX_MASK_8 = INDEX_HDR | ((1 << 8) - 1),
- INDEX_MASK_9 = INDEX_HDR | ((1 << 9) - 1),
- INDEX_MASK_10 = INDEX_HDR | ((1 << 10) - 1),
- INDEX_MASK_11 = INDEX_HDR | ((1 << 11) - 1),
- INDEX_MASK_12 = INDEX_HDR | ((1 << 12) - 1),
- INDEX_MASK_13 = INDEX_HDR | ((1 << 13) - 1),
- INDEX_MASK_14 = INDEX_HDR | ((1 << 14) - 1),
- INDEX_MASK_15 = INDEX_HDR | ((1 << 15) - 1),
- INDEX_MASK_16 = INDEX_HDR | ((1 << 16) - 1),
- INDEX_MASK_17 = INDEX_HDR | ((1 << 17) - 1),
- INDEX_MASK_18 = INDEX_HDR | ((1 << 18) - 1),
- INDEX_MASK_19 = INDEX_HDR | ((1 << 19) - 1),
- INDEX_MASK_20 = INDEX_HDR | ((1 << 20) - 1),
- INDEX_MASK_21 = INDEX_HDR | ((1 << 21) - 1),
- INDEX_MASK_22 = INDEX_HDR | ((1 << 22) - 1),
- INDEX_MASK_23 = INDEX_HDR | ((1 << 23) - 1),
- INDEX_MASK_24 = INDEX_HDR | ((1 << 24) - 1),
+ vec<vec<AudioChannelMask>> channelMasks;
};
/**
@@ -607,301 +152,58 @@
CALL_SCREEN = 4,
};
-@export(name="", value_prefix="AUDIO_DEVICE_")
-enum AudioDevice : uint32_t {
- NONE = 0x0,
- /** reserved bits */
- BIT_IN = 0x80000000,
- BIT_DEFAULT = 0x40000000,
- /** output devices */
- OUT_EARPIECE = 0x1,
- OUT_SPEAKER = 0x2,
- OUT_WIRED_HEADSET = 0x4,
- OUT_WIRED_HEADPHONE = 0x8,
- OUT_BLUETOOTH_SCO = 0x10,
- OUT_BLUETOOTH_SCO_HEADSET = 0x20,
- OUT_BLUETOOTH_SCO_CARKIT = 0x40,
- OUT_BLUETOOTH_A2DP = 0x80,
- OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
- OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
- OUT_AUX_DIGITAL = 0x400,
- OUT_HDMI = OUT_AUX_DIGITAL,
- /** uses an analog connection (multiplexed over the USB pins for instance) */
- OUT_ANLG_DOCK_HEADSET = 0x800,
- OUT_DGTL_DOCK_HEADSET = 0x1000,
- /** USB accessory mode: Android device is USB device and dock is USB host */
- OUT_USB_ACCESSORY = 0x2000,
- /** USB host mode: Android device is USB host and dock is USB device */
- OUT_USB_DEVICE = 0x4000,
- OUT_REMOTE_SUBMIX = 0x8000,
- /** Telephony voice TX path */
- OUT_TELEPHONY_TX = 0x10000,
- /** Analog jack with line impedance detected */
- OUT_LINE = 0x20000,
- /** HDMI Audio Return Channel */
- OUT_HDMI_ARC = 0x40000,
- /** S/PDIF out */
- OUT_SPDIF = 0x80000,
- /** FM transmitter out */
- OUT_FM = 0x100000,
- /** Line out for av devices */
- OUT_AUX_LINE = 0x200000,
- /** limited-output speaker device for acoustic safety */
- OUT_SPEAKER_SAFE = 0x400000,
- OUT_IP = 0x800000,
- /** audio bus implemented by the audio system (e.g an MOST stereo channel) */
- OUT_BUS = 0x1000000,
- OUT_PROXY = 0x2000000,
- OUT_USB_HEADSET = 0x4000000,
- OUT_HEARING_AID = 0x8000000,
- OUT_ECHO_CANCELLER = 0x10000000,
- OUT_DEFAULT = BIT_DEFAULT,
- // Note that the 2.0 OUT_ALL* have been moved to helper functions
-
- /** input devices */
- IN_COMMUNICATION = BIT_IN | 0x1,
- IN_AMBIENT = BIT_IN | 0x2,
- IN_BUILTIN_MIC = BIT_IN | 0x4,
- IN_BLUETOOTH_SCO_HEADSET = BIT_IN | 0x8,
- IN_WIRED_HEADSET = BIT_IN | 0x10,
- IN_AUX_DIGITAL = BIT_IN | 0x20,
- IN_HDMI = IN_AUX_DIGITAL,
- /** Telephony voice RX path */
- IN_VOICE_CALL = BIT_IN | 0x40,
- IN_TELEPHONY_RX = IN_VOICE_CALL,
- IN_BACK_MIC = BIT_IN | 0x80,
- IN_REMOTE_SUBMIX = BIT_IN | 0x100,
- IN_ANLG_DOCK_HEADSET = BIT_IN | 0x200,
- IN_DGTL_DOCK_HEADSET = BIT_IN | 0x400,
- IN_USB_ACCESSORY = BIT_IN | 0x800,
- IN_USB_DEVICE = BIT_IN | 0x1000,
- /** FM tuner input */
- IN_FM_TUNER = BIT_IN | 0x2000,
- /** TV tuner input */
- IN_TV_TUNER = BIT_IN | 0x4000,
- /** Analog jack with line impedance detected */
- IN_LINE = BIT_IN | 0x8000,
- /** S/PDIF in */
- IN_SPDIF = BIT_IN | 0x10000,
- IN_BLUETOOTH_A2DP = BIT_IN | 0x20000,
- IN_LOOPBACK = BIT_IN | 0x40000,
- IN_IP = BIT_IN | 0x80000,
- /** audio bus implemented by the audio system (e.g an MOST stereo channel) */
- IN_BUS = BIT_IN | 0x100000,
- IN_PROXY = BIT_IN | 0x1000000,
- IN_USB_HEADSET = BIT_IN | 0x2000000,
- IN_BLUETOOTH_BLE = BIT_IN | 0x4000000,
- IN_ECHO_REFERENCE = BIT_IN | 0x10000000,
- IN_DEFAULT = BIT_IN | BIT_DEFAULT,
-
- // Note that the 2.0 IN_ALL* have been moved to helper functions
-};
-
/**
- * IEEE 802 MAC address.
+ * Audio device specifies type (or category) of audio I/O device
+ * (e.g. speaker or headphones).
+ * See 'audioDevice' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-typedef uint8_t[6] MacAddress;
+typedef string AudioDevice;
/**
* Specifies a device address in case when several devices of the same type
* can be connected (e.g. BT A2DP, USB).
*/
struct DeviceAddress {
- AudioDevice device; // discriminator
- union Address {
- MacAddress mac; // used for BLUETOOTH_A2DP_*
- uint8_t[4] ipv4; // used for IP
+ /** The type of the device. */
+ AudioDevice deviceType;
+ safe_union Address {
+ /**
+ * The address may be left unspecified if 'device' specifies
+ * a physical device unambiguously.
+ */
+ Monostate unspecified;
+ /** IEEE 802 MAC address. Set for Bluetooth devices. */
+ uint8_t[6] mac;
+ /** IPv4 Address. Set for IPv4 devices. */
+ uint8_t[4] ipv4;
+ /** IPv6 Address. Set for IPv6 devices. */
+ uint16_t[8] ipv6;
+ /** PCI bus Address. Set for USB devices. */
struct Alsa {
int32_t card;
int32_t device;
- } alsa; // used for USB_*
+ } alsa;
+ /** Arbitrary BUS device unique address. Not interpreted by the framework. */
+ string bus;
+ /** Arbitrary REMOTE_SUBMIX device unique address. Not interpreted by the HAL. */
+ string rSubmix;
} address;
- /** Arbitrary BUS device unique address. Should not be interpreted by the framework. */
- string busAddress;
- /** Arbitrary REMOTE_SUBMIX device unique address. Should not be interpreted by the HAL. */
- string rSubmixAddress;
};
/**
- * The audio output flags serve two purposes:
- *
- * - when an AudioTrack is created they indicate a "wish" to be connected to an
- * output stream with attributes corresponding to the specified flags;
- *
- * - when present in an output profile descriptor listed for a particular audio
- * hardware module, they indicate that an output stream can be opened that
- * supports the attributes indicated by the flags.
- *
- * The audio policy manager will try to match the flags in the request
- * (when getOuput() is called) to an available output stream.
+ * Audio usage specifies the intended use case for the sound being played.
+ * See 'audioUsage' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_output_flags_t", value_prefix="AUDIO_OUTPUT_FLAG_")
-enum AudioOutputFlag : int32_t {
- NONE = 0x0, // no attributes
- DIRECT = 0x1, // this output directly connects a track
- // to one output stream: no software mixer
- PRIMARY = 0x2, // this output is the primary output of the device. It is
- // unique and must be present. It is opened by default and
- // receives routing, audio mode and volume controls related
- // to voice calls.
- FAST = 0x4, // output supports "fast tracks", defined elsewhere
- DEEP_BUFFER = 0x8, // use deep audio buffers
- COMPRESS_OFFLOAD = 0x10, // offload playback of compressed streams to
- // hardware codec
- NON_BLOCKING = 0x20, // use non-blocking write
- HW_AV_SYNC = 0x40, // output uses a hardware A/V sync
- TTS = 0x80, // output for streams transmitted through speaker at a
- // sample rate high enough to accommodate lower-range
- // ultrasonic p/b
- RAW = 0x100, // minimize signal processing
- SYNC = 0x200, // synchronize I/O streams
- IEC958_NONAUDIO = 0x400, // Audio stream contains compressed audio in SPDIF
- // data bursts, not PCM.
- DIRECT_PCM = 0x2000, // Audio stream containing PCM data that needs
- // to pass through compress path for DSP post proc.
- MMAP_NOIRQ = 0x4000, // output operates in MMAP no IRQ mode.
- VOIP_RX = 0x8000, // preferred output for VoIP calls.
- /** preferred output for call music */
- INCALL_MUSIC = 0x10000,
-};
+typedef string AudioUsage;
/**
- * The audio input flags are analogous to audio output flags.
- * Currently they are used only when an AudioRecord is created,
- * to indicate a preference to be connected to an input stream with
- * attributes corresponding to the specified flags.
+ * Audio content type expresses the general category of the content.
+ * See 'audioContentType' in audio_policy_configuration.xsd for the
+ * list of allowed values.
*/
-@export(name="audio_input_flags_t", value_prefix="AUDIO_INPUT_FLAG_")
-enum AudioInputFlag : int32_t {
- NONE = 0x0, // no attributes
- FAST = 0x1, // prefer an input that supports "fast tracks"
- HW_HOTWORD = 0x2, // prefer an input that captures from hw hotword source
- RAW = 0x4, // minimize signal processing
- SYNC = 0x8, // synchronize I/O streams
- MMAP_NOIRQ = 0x10, // input operates in MMAP no IRQ mode.
- VOIP_TX = 0x20, // preferred input for VoIP calls.
- HW_AV_SYNC = 0x40, // input connected to an output that uses a hardware A/V sync
- DIRECT = 0x80, // for acquiring encoded streams
-};
-
-@export(name="audio_usage_t", value_prefix="AUDIO_USAGE_")
-enum AudioUsage : int32_t {
- // These values must kept in sync with
- // frameworks/base/media/java/android/media/AudioAttributes.java
- // Note that not all framework values are exposed
- /**
- * Usage value to use when the usage is unknown.
- */
- UNKNOWN = 0,
- /**
- * Usage value to use when the usage is media, such as music, or movie
- * soundtracks.
- */
- MEDIA = 1,
- /**
- * Usage value to use when the usage is voice communications, such as
- * telephony or VoIP.
- */
- VOICE_COMMUNICATION = 2,
- /**
- * Usage value to use when the usage is in-call signalling, such as with
- * a "busy" beep, or DTMF tones.
- */
- VOICE_COMMUNICATION_SIGNALLING = 3,
- /**
- * Usage value to use when the usage is an alarm (e.g. wake-up alarm).
- */
- ALARM = 4,
- /**
- * Usage value to use when the usage is a generic notification.
- */
- NOTIFICATION = 5,
- /**
- * Usage value to use when the usage is telephony ringtone.
- */
- NOTIFICATION_TELEPHONY_RINGTONE = 6,
- /**
- * Usage value to use when the usage is for accessibility, such as with
- * a screen reader.
- */
- ASSISTANCE_ACCESSIBILITY = 11,
- /**
- * Usage value to use when the usage is driving or navigation directions.
- */
- ASSISTANCE_NAVIGATION_GUIDANCE = 12,
- /**
- * Usage value to use when the usage is sonification, such as with user
- * interface sounds.
- */
- ASSISTANCE_SONIFICATION = 13,
- /**
- * Usage value to use when the usage is for game audio.
- */
- GAME = 14,
- /**
- * Usage value to use when feeding audio to the platform and replacing
- * "traditional" audio source, such as audio capture devices.
- */
- VIRTUAL_SOURCE = 15,
- /**
- * Usage value to use for audio responses to user queries, audio
- * instructions or help utterances.
- */
- ASSISTANT = 16,
- /**
- * Usage value to use for assistant voice interaction with remote caller
- * on Cell and VoIP calls.
- */
- CALL_ASSISTANT = 17,
- /**
- * Usage value to use when the usage is an emergency.
- */
- EMERGENCY = 1000,
- /**
- * Usage value to use when the usage is a safety sound.
- */
- SAFETY = 1001,
- /**
- * Usage value to use when the usage is a vehicle status.
- */
- VEHICLE_STATUS = 1002,
- /**
- * Usage value to use when the usage is an announcement.
- */
- ANNOUNCEMENT = 1003,
-};
-
-/** Type of audio generated by an application. */
-@export(name="audio_content_type_t", value_prefix="AUDIO_CONTENT_TYPE_")
-enum AudioContentType : uint32_t {
- // Do not change these values without updating their counterparts
- // in frameworks/base/media/java/android/media/AudioAttributes.java
- /**
- * Content type value to use when the content type is unknown, or other than
- * the ones defined.
- */
- UNKNOWN = 0,
- /**
- * Content type value to use when the content type is speech.
- */
- SPEECH = 1,
- /**
- * Content type value to use when the content type is music.
- */
- MUSIC = 2,
- /**
- * Content type value to use when the content type is a soundtrack,
- * typically accompanying a movie or TV program.
- */
- MOVIE = 3,
- /**
- * Content type value to use when the content type is a sound used to
- * accompany a user action, such as a beep or sound effect expressing a key
- * click, or event, such as the type of a sound for a bonus being received
- * in a game. These sounds are mostly synthesized or short Foley sounds.
- */
- SONIFICATION = 4,
-};
+typedef string AudioContentType;
/** Encapsulation mode used for sending audio compressed data. */
@export(name="audio_encapsulation_mode_t", value_prefix="AUDIO_ENCAPSULATION_MODE_")
@@ -926,9 +228,7 @@
* Additional information about the stream passed to hardware decoders.
*/
struct AudioOffloadInfo {
- uint32_t sampleRateHz;
- bitfield<AudioChannelMask> channelMask;
- AudioFormat format;
+ AudioConfigBase base;
AudioStreamType streamType;
uint32_t bitRatePerSecond;
int64_t durationMicroseconds; // -1 if unknown
@@ -946,9 +246,7 @@
* Commonly used audio stream configuration parameters.
*/
struct AudioConfig {
- uint32_t sampleRateHz;
- bitfield<AudioChannelMask> channelMask;
- AudioFormat format;
+ AudioConfigBase base;
AudioOffloadInfo offloadInfo;
uint64_t frameCount;
};
@@ -985,8 +283,7 @@
safe_union Destination {
Monostate unspecified;
DeviceAddress device;
- };
- Destination destination;
+ } destination;
};
/** Metadatas of the sink of a StreamIn. */
@@ -994,7 +291,6 @@
vec<RecordTrackMetadata> tracks;
};
-
/*
*
* Volume control
@@ -1017,7 +313,7 @@
*/
struct AudioGain {
bitfield<AudioGainMode> mode;
- bitfield<AudioChannelMask> channelMask; // channels which gain an be controlled
+ vec<AudioChannelMask> channelMask; // channels which gain an be controlled
int32_t minValue; // minimum gain value in millibels
int32_t maxValue; // maximum gain value in millibels
int32_t defaultValue; // default gain value in millibels
@@ -1033,10 +329,8 @@
struct AudioGainConfig {
int32_t index; // index of the corresponding AudioGain in AudioPort.gains
AudioGainMode mode;
- AudioChannelMask channelMask; // channels which gain value follows
+ vec<AudioChannelMask> channelMask; // channels which gain value follows
/**
- * 4 = sizeof(AudioChannelMask),
- * 8 is not "FCC_8", so it won't need to be changed for > 8 channels.
* Gain values in millibels for each channel ordered from LSb to MSb in
* channel mask. The number of values is 1 in joint mode or
* popcount(channel_mask).
@@ -1060,132 +354,78 @@
* the interface.
*/
-/** Audio port role: either source or sink */
-@export(name="audio_port_role_t", value_prefix="AUDIO_PORT_ROLE_")
-enum AudioPortRole : int32_t {
- NONE,
- SOURCE,
- SINK,
-};
-
/**
- * Audio port type indicates if it is a session (e.g AudioTrack), a mix (e.g
- * PlaybackThread output) or a physical device (e.g OUT_SPEAKER)
+ * A helper aggregate structure providing parameters that depend on the
+ * port role.
*/
-@export(name="audio_port_type_t", value_prefix="AUDIO_PORT_TYPE_")
-enum AudioPortType : int32_t {
- NONE,
- DEVICE,
- MIX,
- SESSION,
-};
-
-/**
- * Extension for audio port configuration structure when the audio port is a
- * hardware device.
- */
-struct AudioPortConfigDeviceExt {
- AudioModuleHandle hwModule; // module the device is attached to
- AudioDevice type; // device type (e.g OUT_SPEAKER)
- uint8_t[32] address; // device address. "" if N/A
-};
-
-/**
- * Extension for audio port configuration structure when the audio port is an
- * audio session.
- */
-struct AudioPortConfigSessionExt {
+safe_union AudioPortExtendedInfo {
+ /** Set when no information is provided. */
+ Monostate unspecified;
+ /** Set when the audio port is an audio device. */
+ DeviceAddress device;
+ /** Set when the audio port is a mix. The handle is of a stream. */
+ struct AudioPortMixExt {
+ /** I/O handle of the input/output stream. */
+ AudioIoHandle ioHandle;
+ safe_union UseCase {
+ /** Specified when the port is in the SOURCE role. */
+ AudioStreamType stream;
+ /** Specified when the port is in the SINK role. */
+ AudioSource source;
+ } useCase;
+ } mix;
+ /** Set when the audio port is an audio session. */
AudioSession session;
};
/**
- * Flags indicating which fields are to be considered in AudioPortConfig.
- */
-@export(name="", value_prefix="AUDIO_PORT_CONFIG_")
-enum AudioPortConfigMask : uint32_t {
- SAMPLE_RATE = 0x1,
- CHANNEL_MASK = 0x2,
- FORMAT = 0x4,
- GAIN = 0x8,
-};
-
-/**
* Audio port configuration structure used to specify a particular configuration
* of an audio port.
*/
struct AudioPortConfig {
+ /**
+ * The 'id' field is set when it is needed to select the port and
+ * apply new configuration for it.
+ */
AudioPortHandle id;
- bitfield<AudioPortConfigMask> configMask;
- uint32_t sampleRateHz;
- bitfield<AudioChannelMask> channelMask;
- AudioFormat format;
- AudioGainConfig gain;
- AudioPortType type; // type is used as a discriminator for Ext union
- AudioPortRole role; // role is used as a discriminator for UseCase union
- union Ext {
- AudioPortConfigDeviceExt device;
- struct AudioPortConfigMixExt {
- AudioModuleHandle hwModule; // module the stream is attached to
- AudioIoHandle ioHandle; // I/O handle of the input/output stream
- union UseCase {
- AudioStreamType stream;
- AudioSource source;
- } useCase;
- } mix;
- AudioPortConfigSessionExt session;
- } ext;
+ /**
+ * Basic parameters: sampling rate, format, channel mask. Only some of the
+ * parameters (or none) may be set. See the documentation of the
+ * AudioConfigBase struct.
+ */
+ AudioConfigBase config;
+ /** Associated gain control. */
+ safe_union OptionalGain {
+ Monostate unspecified;
+ AudioGainConfig config;
+ } gain;
+ /** Parameters that depend on the actual port role. */
+ AudioPortExtendedInfo ext;
};
/**
- * Extension for audio port structure when the audio port is a hardware device.
+ * Audio port structure describes the capabilities of an audio port
+ * as well as its current configuration.
*/
-struct AudioPortDeviceExt {
- AudioModuleHandle hwModule; // module the device is attached to
- AudioDevice type;
- /** 32 byte string identifying the port. */
- uint8_t[32] address;
-};
-
-/**
- * Latency class of the audio mix.
- */
-@export(name="audio_mix_latency_class_t", value_prefix="AUDIO_LATENCY_")
-enum AudioMixLatencyClass : int32_t {
- LOW,
- NORMAL
-};
-
-struct AudioPortMixExt {
- AudioModuleHandle hwModule; // module the stream is attached to
- AudioIoHandle ioHandle; // I/O handle of the stream
- AudioMixLatencyClass latencyClass;
-};
-
-/**
- * Extension for audio port structure when the audio port is an audio session.
- */
-struct AudioPortSessionExt {
- AudioSession session;
-};
-
struct AudioPort {
+ /**
+ * Unique identifier of the port within this HAL service. When calling
+ * from the client side functions like IDevice.getAudioPort is it allowed
+ * to only specify the 'id' and leave the other fields unspecified.
+ */
AudioPortHandle id;
- AudioPortRole role;
+ /**
+ * Human-readable name describing the function of the port.
+ * E.g. "telephony_tx" or "fm_tuner".
+ */
string name;
- vec<uint32_t> sampleRates;
- vec<bitfield<AudioChannelMask>> channelMasks;
- vec<AudioFormat> formats;
+ /** List of audio profiles supported by the port. */
+ vec<AudioProfile> profiles;
+ /** List of gain controls attached to the port. */
vec<AudioGain> gains;
- AudioPortConfig activeConfig; // current audio port configuration
- AudioPortType type; // type is used as a discriminator
- union Ext {
- AudioPortDeviceExt device;
- AudioPortMixExt mix;
- AudioPortSessionExt session;
- } ext;
-};
-
-struct ThreadInfo {
- int64_t pid;
- int64_t tid;
+ /**
+ * Current configuration of the audio port, may have all the fields left
+ * unspecified.
+ */
+ AudioPortConfig activeConfig;
};
diff --git a/audio/common/all-versions/default/VersionUtils.h b/audio/common/all-versions/default/VersionUtils.h
index e7755b1..9bfca0c 100644
--- a/audio/common/all-versions/default/VersionUtils.h
+++ b/audio/common/all-versions/default/VersionUtils.h
@@ -31,7 +31,7 @@
typedef common::CPP_VERSION::AudioChannelMask AudioChannelBitfield;
typedef common::CPP_VERSION::AudioOutputFlag AudioOutputFlagBitfield;
typedef common::CPP_VERSION::AudioInputFlag AudioInputFlagBitfield;
-#elif MAJOR_VERSION >= 4
+#elif MAJOR_VERSION >= 4 && MAJOR_VERSION <= 6
typedef hidl_bitfield<common::CPP_VERSION::AudioDevice> AudioDeviceBitfield;
typedef hidl_bitfield<common::CPP_VERSION::AudioChannelMask> AudioChannelBitfield;
typedef hidl_bitfield<common::CPP_VERSION::AudioOutputFlag> AudioOutputFlagBitfield;
diff --git a/audio/core/all-versions/default/include/core/default/Device.h b/audio/core/all-versions/default/include/core/default/Device.h
index b0e72d9..907acd7 100644
--- a/audio/core/all-versions/default/include/core/default/Device.h
+++ b/audio/core/all-versions/default/include/core/default/Device.h
@@ -43,8 +43,10 @@
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
+#if MAJOR_VERSION <= 6
using ::android::hardware::audio::common::CPP_VERSION::implementation::AudioInputFlagBitfield;
using ::android::hardware::audio::common::CPP_VERSION::implementation::AudioOutputFlagBitfield;
+#endif
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::CPP_VERSION;
diff --git a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
index b0eb2e0..2466fd1 100644
--- a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
@@ -16,6 +16,13 @@
#include "AudioPrimaryHidlHalTest.h"
+#if MAJOR_VERSION >= 7
+#include <audio_policy_configuration_V7_0.h>
+#include <xsdc/XsdcSupport.h>
+
+using android::xsdc_enum_range;
+#endif
+
TEST_P(AudioHidlTest, OpenPrimaryDeviceUsingGetDevice) {
doc::test("Calling openDevice(\"primary\") should return the primary device.");
if (getDeviceName() != DeviceManager::kPrimaryDevice) {
@@ -53,14 +60,29 @@
"Make sure getMicrophones always succeeds"
"and getActiveMicrophones always succeeds when recording from these microphones.");
AudioConfig config{};
+#if MAJOR_VERSION <= 6
config.channelMask = mkEnumBitfield(AudioChannelMask::IN_MONO);
config.sampleRateHz = 8000;
config.format = AudioFormat::PCM_16_BIT;
auto flags = hidl_bitfield<AudioInputFlag>(AudioInputFlag::NONE);
const SinkMetadata initMetadata = {{{.source = AudioSource::MIC, .gain = 1}}};
+#elif MAJOR_VERSION >= 7
+ config.base.channelMask.resize(1);
+ config.base.channelMask[0] = toString(xsd::AudioChannelMask::AUDIO_CHANNEL_IN_MONO);
+ config.base.sampleRateHz = 8000;
+ config.base.format = toString(xsd::AudioFormat::AUDIO_FORMAT_PCM_16_BIT);
+ hidl_vec<hidl_string> flags;
+ const SinkMetadata initMetadata = {
+ {{.source = toString(xsd::AudioSource::AUDIO_SOURCE_MIC), .gain = 1}}};
+#endif
EventFlag* efGroup;
for (auto microphone : microphones) {
+#if MAJOR_VERSION <= 6
if (microphone.deviceAddress.device != AudioDevice::IN_BUILTIN_MIC) {
+#elif MAJOR_VERSION >= 7
+ if (xsd::stringToAudioDevice(microphone.deviceAddress.deviceType) !=
+ xsd::AudioDevice::AUDIO_DEVICE_IN_BUILTIN_MIC) {
+#endif
continue;
}
sp<IStreamIn> stream;
@@ -81,16 +103,16 @@
size_t frameSize = stream->getFrameSize();
size_t frameCount = stream->getBufferSize() / frameSize;
ASSERT_OK(stream->prepareForReading(
- frameSize, frameCount, [&](auto r, auto& c, auto& d, auto&, auto&) {
- readRes = r;
- if (readRes == Result::OK) {
- commandMQ.reset(new CommandMQ(c));
- dataMQ.reset(new DataMQ(d));
- if (dataMQ->isValid() && dataMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(dataMQ->getEventFlagWord(), &efGroup);
+ frameSize, frameCount, [&](auto r, auto& c, auto& d, auto&, auto) {
+ readRes = r;
+ if (readRes == Result::OK) {
+ commandMQ.reset(new CommandMQ(c));
+ dataMQ.reset(new DataMQ(d));
+ if (dataMQ->isValid() && dataMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(dataMQ->getEventFlagWord(), &efGroup);
+ }
}
- }
- }));
+ }));
ASSERT_OK(readRes);
IStreamIn::ReadParameters params;
params.command = IStreamIn::ReadCommand::READ;
@@ -116,13 +138,24 @@
TEST_P(AudioHidlDeviceTest, SetConnectedState) {
doc::test("Check that the HAL can be notified of device connection and deconnection");
+#if MAJOR_VERSION <= 6
using AD = AudioDevice;
for (auto deviceType : {AD::OUT_HDMI, AD::OUT_WIRED_HEADPHONE, AD::IN_USB_HEADSET}) {
+#elif MAJOR_VERSION >= 7
+ using AD = xsd::AudioDevice;
+ for (auto deviceType :
+ {toString(AD::AUDIO_DEVICE_OUT_HDMI), toString(AD::AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+ toString(AD::AUDIO_DEVICE_IN_USB_HEADSET)}) {
+#endif
SCOPED_TRACE("device=" + ::testing::PrintToString(deviceType));
for (bool state : {true, false}) {
SCOPED_TRACE("state=" + ::testing::PrintToString(state));
DeviceAddress address = {};
+#if MAJOR_VERSION <= 6
address.device = deviceType;
+#elif MAJOR_VERSION >= 7
+ address.deviceType = deviceType;
+#endif
auto ret = getDevice()->setConnectedState(address, state);
ASSERT_TRUE(ret.isOk());
if (ret == Result::NOT_SUPPORTED) {
@@ -148,7 +181,11 @@
}
// The stream was constructed with one device, thus getDevices must only return one
ASSERT_EQ(1U, devices.size());
+#if MAJOR_VERSION <= 6
AudioDevice device = devices[0].device;
+#elif MAJOR_VERSION >= 7
+ auto device = devices[0].deviceType;
+#endif
ASSERT_TRUE(device == expectedDevice)
<< "Expected: " << ::testing::PrintToString(expectedDevice)
<< "\n Actual: " << ::testing::PrintToString(device);
@@ -156,12 +193,22 @@
TEST_IO_STREAM(GetDevices, "Check that the stream device == the one it was opened with",
areAudioPatchesSupported() ? doc::partialTest("Audio patches are supported")
+#if MAJOR_VERSION <= 6
: testGetDevices(stream.get(), address.device))
+#elif MAJOR_VERSION >= 7
+ : testGetDevices(stream.get(), address.deviceType))
+#endif
static void testSetDevices(IStream* stream, const DeviceAddress& address) {
DeviceAddress otherAddress = address;
+#if MAJOR_VERSION <= 6
otherAddress.device = (address.device & AudioDevice::BIT_IN) == 0 ? AudioDevice::OUT_SPEAKER
: AudioDevice::IN_BUILTIN_MIC;
+#elif MAJOR_VERSION >= 7
+ otherAddress.deviceType = xsd::isOutputDevice(address.deviceType)
+ ? toString(xsd::AudioDevice::AUDIO_DEVICE_OUT_SPEAKER)
+ : toString(xsd::AudioDevice::AUDIO_DEVICE_IN_BUILTIN_MIC);
+#endif
EXPECT_RESULT(okOrNotSupported, stream->setDevices({otherAddress}));
ASSERT_RESULT(okOrNotSupported,
@@ -186,11 +233,19 @@
TEST_P(InputStreamTest, updateSinkMetadata) {
doc::test("The HAL should not crash on metadata change");
+#if MAJOR_VERSION <= 6
hidl_enum_range<AudioSource> range;
+#elif MAJOR_VERSION >= 7
+ xsdc_enum_range<audio::policy::configuration::V7_0::AudioSource> range;
+#endif
// Test all possible track configuration
- for (AudioSource source : range) {
+ for (auto source : range) {
for (float volume : {0.0, 0.5, 1.0}) {
+#if MAJOR_VERSION <= 6
const SinkMetadata metadata = {{{.source = source, .gain = volume}}};
+#elif MAJOR_VERSION >= 7
+ const SinkMetadata metadata = {{{.source = toString(source), .gain = volume}}};
+#endif
ASSERT_OK(stream->updateSinkMetadata(metadata))
<< "source=" << toString(source) << ", volume=" << volume;
}
@@ -213,13 +268,22 @@
TEST_P(OutputStreamTest, updateSourceMetadata) {
doc::test("The HAL should not crash on metadata change");
+#if MAJOR_VERSION <= 6
hidl_enum_range<AudioUsage> usageRange;
hidl_enum_range<AudioContentType> contentRange;
+#elif MAJOR_VERSION >= 7
+ xsdc_enum_range<audio::policy::configuration::V7_0::AudioUsage> usageRange;
+ xsdc_enum_range<audio::policy::configuration::V7_0::AudioContentType> contentRange;
+#endif
// Test all possible track configuration
for (auto usage : usageRange) {
for (auto content : contentRange) {
for (float volume : {0.0, 0.5, 1.0}) {
+#if MAJOR_VERSION <= 6
const SourceMetadata metadata = {{{usage, content, volume}}};
+#elif MAJOR_VERSION >= 7
+ const SourceMetadata metadata = {{{toString(usage), toString(content), volume}}};
+#endif
ASSERT_OK(stream->updateSourceMetadata(metadata))
<< "usage=" << toString(usage) << ", content=" << toString(content)
<< ", volume=" << volume;
@@ -227,12 +291,26 @@
}
}
+ // clang-format off
// Set many track of different configuration
ASSERT_OK(stream->updateSourceMetadata(
+#if MAJOR_VERSION <= 6
{{{AudioUsage::MEDIA, AudioContentType::MUSIC, 0.1},
{AudioUsage::VOICE_COMMUNICATION, AudioContentType::SPEECH, 1.0},
{AudioUsage::ALARM, AudioContentType::SONIFICATION, 0.0},
- {AudioUsage::ASSISTANT, AudioContentType::UNKNOWN, 0.3}}}));
+ {AudioUsage::ASSISTANT, AudioContentType::UNKNOWN, 0.3}}}
+#elif MAJOR_VERSION >= 7
+ {{{toString(xsd::AudioUsage::AUDIO_USAGE_MEDIA),
+ toString(xsd::AudioContentType::AUDIO_CONTENT_TYPE_MUSIC), 0.1},
+ {toString(xsd::AudioUsage::AUDIO_USAGE_VOICE_COMMUNICATION),
+ toString(xsd::AudioContentType::AUDIO_CONTENT_TYPE_SPEECH), 1.0},
+ {toString(xsd::AudioUsage::AUDIO_USAGE_ALARM),
+ toString(xsd::AudioContentType::AUDIO_CONTENT_TYPE_SONIFICATION), 0.0},
+ {toString(xsd::AudioUsage::AUDIO_USAGE_ASSISTANT),
+ toString(xsd::AudioContentType::AUDIO_CONTENT_TYPE_UNKNOWN), 0.3}}}
+#endif
+ ));
+ // clang-format on
// Set no metadata as if all stream track had stopped
ASSERT_OK(stream->updateSourceMetadata({}));
diff --git a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalUtils.h b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalUtils.h
index 7a52d0e..81a1f7b 100644
--- a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalUtils.h
+++ b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalUtils.h
@@ -56,6 +56,7 @@
}
};
+#if MAJOR_VERSION <= 6
struct GetSupported {
static auto getFormat(IStream* stream) {
auto ret = stream->getFormat();
@@ -80,7 +81,7 @@
EXPECT_OK(stream->getSupportedFormats(returnIn(capabilities)));
return Result::OK;
}
-#elif MAJOR_VERSION >= 6
+#else // MAJOR_VERSION == 6
static Result formats(IStream* stream, hidl_vec<AudioFormat>& capabilities) {
Result res;
EXPECT_OK(stream->getSupportedFormats(returnIn(res, capabilities)));
@@ -88,6 +89,7 @@
}
#endif
};
+#endif // MAJOR_VERSION <= 6
template <class T>
auto dump(T t, hidl_handle handle) {
diff --git a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
index 54d4bbd..bd8de2d 100644
--- a/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/6.0/AudioPrimaryHidlHalTest.cpp
@@ -17,6 +17,7 @@
// pull in all the <= 5.0 tests
#include "5.0/AudioPrimaryHidlHalTest.cpp"
+#if MAJOR_VERSION <= 6
const std::vector<DeviceConfigParameter>& getOutputDeviceConfigParameters() {
static std::vector<DeviceConfigParameter> parameters = [] {
std::vector<DeviceConfigParameter> result;
@@ -28,8 +29,8 @@
const auto& channels = profile->getChannels();
const auto& sampleRates = profile->getSampleRates();
auto configs = ConfigHelper::combineAudioConfig(
- vector<audio_channel_mask_t>(channels.begin(), channels.end()),
- vector<uint32_t>(sampleRates.begin(), sampleRates.end()),
+ std::vector<audio_channel_mask_t>(channels.begin(), channels.end()),
+ std::vector<uint32_t>(sampleRates.begin(), sampleRates.end()),
profile->getFormat());
auto flags = ioProfile->getFlags();
for (auto& config : configs) {
@@ -46,8 +47,8 @@
config.offloadInfo.bufferSize = 256; // arbitrary value
config.offloadInfo.usage = AudioUsage::MEDIA;
result.emplace_back(device, config,
- AudioOutputFlag(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
- AUDIO_OUTPUT_FLAG_DIRECT));
+ AudioOutputFlag(AudioOutputFlag::COMPRESS_OFFLOAD |
+ AudioOutputFlag::DIRECT));
} else {
if (flags & AUDIO_OUTPUT_FLAG_PRIMARY) { // ignore the flag
flags &= ~AUDIO_OUTPUT_FLAG_PRIMARY;
@@ -74,8 +75,8 @@
const auto& channels = profile->getChannels();
const auto& sampleRates = profile->getSampleRates();
auto configs = ConfigHelper::combineAudioConfig(
- vector<audio_channel_mask_t>(channels.begin(), channels.end()),
- vector<uint32_t>(sampleRates.begin(), sampleRates.end()),
+ std::vector<audio_channel_mask_t>(channels.begin(), channels.end()),
+ std::vector<uint32_t>(sampleRates.begin(), sampleRates.end()),
profile->getFormat());
for (const auto& config : configs) {
result.emplace_back(device, config, AudioInputFlag(ioProfile->getFlags()));
@@ -87,13 +88,22 @@
}();
return parameters;
}
+#endif // MAJOR_VERSION <= 6
TEST_P(AudioHidlDeviceTest, CloseDeviceWithOpenedOutputStreams) {
doc::test("Verify that a device can't be closed if there are streams opened");
+#if MAJOR_VERSION <= 6
DeviceAddress address{.device = AudioDevice::OUT_DEFAULT};
- AudioConfig config{};
- auto flags = hidl_bitfield<AudioOutputFlag>(AudioOutputFlag::NONE);
SourceMetadata initMetadata = {{{AudioUsage::MEDIA, AudioContentType::MUSIC, 1 /* gain */}}};
+ auto flags = hidl_bitfield<AudioOutputFlag>(AudioOutputFlag::NONE);
+#elif MAJOR_VERSION >= 7
+ DeviceAddress address{.deviceType = toString(xsd::AudioDevice::AUDIO_DEVICE_OUT_DEFAULT)};
+ SourceMetadata initMetadata = {
+ {{toString(xsd::AudioUsage::AUDIO_USAGE_MEDIA),
+ toString(xsd::AudioContentType::AUDIO_CONTENT_TYPE_MUSIC), 1 /* gain */}}};
+ hidl_vec<AudioInOutFlag> flags;
+#endif
+ AudioConfig config{};
sp<IStreamOut> stream;
StreamHelper<IStreamOut> helper(stream);
AudioConfig suggestedConfig{};
@@ -111,14 +121,20 @@
TEST_P(AudioHidlDeviceTest, CloseDeviceWithOpenedInputStreams) {
doc::test("Verify that a device can't be closed if there are streams opened");
- auto module = getCachedPolicyConfig().getModuleFromName(getDeviceName());
- if (module->getInputProfiles().empty()) {
+ if (!getCachedPolicyConfig().haveInputProfilesInModule(getDeviceName())) {
GTEST_SKIP() << "Device doesn't have input profiles";
}
+#if MAJOR_VERSION <= 6
DeviceAddress address{.device = AudioDevice::IN_DEFAULT};
- AudioConfig config{};
- auto flags = hidl_bitfield<AudioInputFlag>(AudioInputFlag::NONE);
SinkMetadata initMetadata = {{{.source = AudioSource::MIC, .gain = 1}}};
+ auto flags = hidl_bitfield<AudioInputFlag>(AudioInputFlag::NONE);
+#elif MAJOR_VERSION >= 7
+ DeviceAddress address{.deviceType = toString(xsd::AudioDevice::AUDIO_DEVICE_IN_DEFAULT)};
+ SinkMetadata initMetadata = {
+ {{.source = toString(xsd::AudioSource::AUDIO_SOURCE_MIC), .gain = 1}}};
+ hidl_vec<AudioInOutFlag> flags;
+#endif
+ AudioConfig config{};
sp<IStreamIn> stream;
StreamHelper<IStreamIn> helper(stream);
AudioConfig suggestedConfig{};
@@ -137,9 +153,8 @@
TEST_P(AudioPatchHidlTest, UpdatePatchInvalidHandle) {
doc::test("Verify that passing an invalid handle to updateAudioPatch is checked");
AudioPatchHandle ignored;
- ASSERT_OK(getDevice()->updateAudioPatch(
- static_cast<int32_t>(AudioHandleConsts::AUDIO_PATCH_HANDLE_NONE),
- hidl_vec<AudioPortConfig>(), hidl_vec<AudioPortConfig>(), returnIn(res, ignored)));
+ ASSERT_OK(getDevice()->updateAudioPatch(AudioPatchHandle{}, hidl_vec<AudioPortConfig>(),
+ hidl_vec<AudioPortConfig>(), returnIn(res, ignored)));
ASSERT_RESULT(Result::INVALID_ARGUMENTS, res);
}
diff --git a/audio/core/all-versions/vts/functional/7.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/7.0/AudioPrimaryHidlHalTest.cpp
index 33efa6f..63eaea8 100644
--- a/audio/core/all-versions/vts/functional/7.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/7.0/AudioPrimaryHidlHalTest.cpp
@@ -16,3 +16,101 @@
// pull in all the <= 6.0 tests
#include "6.0/AudioPrimaryHidlHalTest.cpp"
+
+static std::vector<AudioConfig> combineAudioConfig(std::vector<xsd::AudioChannelMask> channelMasks,
+ std::vector<int64_t> sampleRates,
+ const std::string& format) {
+ std::vector<AudioConfig> configs;
+ configs.reserve(channelMasks.size() * sampleRates.size());
+ for (auto channelMask : channelMasks) {
+ for (auto sampleRate : sampleRates) {
+ AudioConfig config{};
+ // leave offloadInfo to 0
+ config.base.channelMask.resize(1);
+ config.base.channelMask[0] = toString(channelMask);
+ config.base.sampleRateHz = sampleRate;
+ config.base.format = format;
+ configs.push_back(config);
+ }
+ }
+ return configs;
+}
+
+const std::vector<DeviceConfigParameter>& getOutputDeviceConfigParameters() {
+ static std::vector<DeviceConfigParameter> parameters = [] {
+ std::vector<DeviceConfigParameter> result;
+ const std::vector<AudioInOutFlag> offloadFlags = {
+ toString(xsd::AudioInOutFlag::AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+ toString(xsd::AudioInOutFlag::AUDIO_OUTPUT_FLAG_DIRECT)};
+ for (const auto& device : getDeviceParameters()) {
+ auto module =
+ getCachedPolicyConfig().getModuleFromName(std::get<PARAM_DEVICE_NAME>(device));
+ for (const auto& mixPort : module->getFirstMixPorts()->getMixPort()) {
+ if (mixPort.getRole() != xsd::Role::source) continue; // not an output profile
+ auto xsdFlags = mixPort.getFlags();
+ const bool isOffload =
+ std::find(xsdFlags.begin(), xsdFlags.end(),
+ xsd::AudioInOutFlag::AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) !=
+ xsdFlags.end();
+ std::vector<AudioInOutFlag> flags;
+ if (!isOffload) {
+ for (auto flag : xsdFlags) {
+ if (flag != xsd::AudioInOutFlag::AUDIO_OUTPUT_FLAG_PRIMARY) {
+ flags.push_back(toString(flag));
+ }
+ }
+ } else {
+ flags = offloadFlags;
+ }
+ for (const auto& profile : mixPort.getProfile()) {
+ auto configs =
+ combineAudioConfig(profile.getChannelMasks(),
+ profile.getSamplingRates(), profile.getFormat());
+ for (auto& config : configs) {
+ // Some combinations of flags declared in the config file require special
+ // treatment.
+ if (isOffload) {
+ config.offloadInfo.base = config.base;
+ config.offloadInfo.streamType =
+ toString(xsd::AudioStreamType::AUDIO_STREAM_MUSIC);
+ config.offloadInfo.usage = toString(xsd::AudioUsage::AUDIO_USAGE_MEDIA);
+ config.offloadInfo.bitRatePerSecond = 320;
+ config.offloadInfo.durationMicroseconds = -1;
+ config.offloadInfo.bitWidth = 16;
+ config.offloadInfo.bufferSize = 256; // arbitrary value
+ }
+ result.emplace_back(device, config, flags);
+ }
+ }
+ }
+ }
+ return result;
+ }();
+ return parameters;
+}
+
+const std::vector<DeviceConfigParameter>& getInputDeviceConfigParameters() {
+ static std::vector<DeviceConfigParameter> parameters = [] {
+ std::vector<DeviceConfigParameter> result;
+ for (const auto& device : getDeviceParameters()) {
+ auto module =
+ getCachedPolicyConfig().getModuleFromName(std::get<PARAM_DEVICE_NAME>(device));
+ for (const auto& mixPort : module->getFirstMixPorts()->getMixPort()) {
+ if (mixPort.getRole() != xsd::Role::sink) continue; // not an input profile
+ std::vector<AudioInOutFlag> flags;
+ std::transform(mixPort.getFlags().begin(), mixPort.getFlags().end(), flags.begin(),
+ [](auto flag) { return toString(flag); });
+ for (const auto& profile : mixPort.getProfile()) {
+ auto configs =
+ combineAudioConfig(profile.getChannelMasks(),
+ profile.getSamplingRates(), profile.getFormat());
+ for (const auto& config : configs) {
+ result.emplace_back(device, config, flags);
+ }
+ }
+ }
+ }
+ return result;
+ }();
+ return parameters;
+}
diff --git a/audio/core/all-versions/vts/functional/7.0/PolicyConfig.h b/audio/core/all-versions/vts/functional/7.0/PolicyConfig.h
new file mode 100644
index 0000000..d790b34
--- /dev/null
+++ b/audio/core/all-versions/vts/functional/7.0/PolicyConfig.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+// Note: it is assumed that this file is included from AudioPrimaryHidlTest.h
+// and thus it doesn't have all '#include' and 'using' directives required
+// for a standalone compilation.
+
+namespace xsd {
+using Module = Modules::Module;
+}
+
+class PolicyConfig {
+ public:
+ explicit PolicyConfig(const std::string& configFileName)
+ : mConfigFileName{configFileName},
+ mFilePath{findExistingConfigurationFile(mConfigFileName)},
+ mConfig{xsd::read(mFilePath.c_str())} {
+ if (mConfig) {
+ mStatus = OK;
+ mPrimaryModule = getModuleFromName(DeviceManager::kPrimaryDevice);
+ for (const auto& module : mConfig->getFirstModules()->get_module()) {
+ auto attachedDevices = module.getFirstAttachedDevices()->getItem();
+ if (!attachedDevices.empty()) {
+ mModulesWithDevicesNames.insert(module.getName());
+ }
+ }
+ }
+ }
+ status_t getStatus() const { return mStatus; }
+ std::string getError() const {
+ if (mFilePath.empty()) {
+ return std::string{"Could not find "} + mConfigFileName +
+ " file in: " + testing::PrintToString(android::audio_get_configuration_paths());
+ } else {
+ return "Invalid config file: " + mFilePath;
+ }
+ }
+ const std::string& getFilePath() const { return mFilePath; }
+ const xsd::Module* getModuleFromName(const std::string& name) const {
+ if (mConfig) {
+ for (const auto& module : mConfig->getFirstModules()->get_module()) {
+ if (module.getName() == name) return &module;
+ }
+ }
+ return nullptr;
+ }
+ const xsd::Module* getPrimaryModule() const { return mPrimaryModule; }
+ const std::set<std::string>& getModulesWithDevicesNames() const {
+ return mModulesWithDevicesNames;
+ }
+ bool haveInputProfilesInModule(const std::string& name) const {
+ auto module = getModuleFromName(name);
+ for (const auto& mixPort : module->getFirstMixPorts()->getMixPort()) {
+ if (mixPort.getRole() == xsd::Role::sink) return true;
+ }
+ return false;
+ }
+
+ private:
+ static std::string findExistingConfigurationFile(const std::string& fileName) {
+ for (const auto& location : android::audio_get_configuration_paths()) {
+ std::string path = location + '/' + fileName;
+ if (access(path.c_str(), F_OK) == 0) {
+ return path;
+ }
+ }
+ return std::string{};
+ }
+
+ const std::string mConfigFileName;
+ const std::string mFilePath;
+ std::optional<xsd::AudioPolicyConfiguration> mConfig;
+ status_t mStatus = NO_INIT;
+ const xsd::Module* mPrimaryModule;
+ std::set<std::string> mModulesWithDevicesNames;
+};
diff --git a/audio/core/all-versions/vts/functional/Android.bp b/audio/core/all-versions/vts/functional/Android.bp
index 6ac9b20..c7bfe08 100644
--- a/audio/core/all-versions/vts/functional/Android.bp
+++ b/audio/core/all-versions/vts/functional/Android.bp
@@ -19,9 +19,6 @@
defaults: ["VtsHalTargetTestDefaults"],
static_libs: [
"android.hardware.audio.common.test.utility",
- "libaudiofoundation",
- "libaudiopolicycomponents",
- "libmedia_helper",
"libxml2",
],
shared_libs: [
@@ -44,6 +41,9 @@
"2.0/AudioPrimaryHidlHalTest.cpp",
],
static_libs: [
+ "libaudiofoundation",
+ "libaudiopolicycomponents",
+ "libmedia_helper",
"android.hardware.audio@2.0",
"android.hardware.audio.common@2.0",
],
@@ -67,6 +67,9 @@
"4.0/AudioPrimaryHidlHalTest.cpp",
],
static_libs: [
+ "libaudiofoundation",
+ "libaudiopolicycomponents",
+ "libmedia_helper",
"android.hardware.audio@4.0",
"android.hardware.audio.common@4.0",
],
@@ -90,6 +93,9 @@
"5.0/AudioPrimaryHidlHalTest.cpp",
],
static_libs: [
+ "libaudiofoundation",
+ "libaudiopolicycomponents",
+ "libmedia_helper",
"android.hardware.audio@5.0",
"android.hardware.audio.common@5.0",
],
@@ -113,6 +119,9 @@
"6.0/AudioPrimaryHidlHalTest.cpp",
],
static_libs: [
+ "libaudiofoundation",
+ "libaudiopolicycomponents",
+ "libmedia_helper",
"android.hardware.audio@6.0",
"android.hardware.audio.common@6.0",
],
@@ -130,7 +139,6 @@
}
cc_test {
- enabled: false,
name: "VtsHalAudioV7_0TargetTest",
defaults: ["VtsHalAudioTargetTest_defaults"],
srcs: [
@@ -139,6 +147,7 @@
static_libs: [
"android.hardware.audio@7.0",
"android.hardware.audio.common@7.0",
+ "android.hardware.audio.common@7.0-enums",
],
cflags: [
"-DMAJOR_VERSION=7",
diff --git a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
index 01bdd69..5e4b414 100644
--- a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
+++ b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
@@ -42,8 +42,11 @@
#include PATH(android/hardware/audio/FILE_VERSION/IPrimaryDevice.h)
#include PATH(android/hardware/audio/FILE_VERSION/types.h)
#include PATH(android/hardware/audio/common/FILE_VERSION/types.h)
+#if MAJOR_VERSION >= 7
+#include <audio_policy_configuration_V7_0-enums.h>
+#include <audio_policy_configuration_V7_0.h>
+#endif
-#include <Serializer.h>
#include <fmq/EventFlag.h>
#include <fmq/MessageQueue.h>
#include <hidl/GtestPrinter.h>
@@ -63,14 +66,6 @@
#include "4.0/AudioPrimaryHidlHalUtils.h"
#endif
-using std::initializer_list;
-using std::list;
-using std::string;
-using std::to_string;
-using std::vector;
-
-using ::android::AudioPolicyConfig;
-using ::android::HwModule;
using ::android::NO_INIT;
using ::android::OK;
using ::android::sp;
@@ -93,6 +88,12 @@
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::common::test::utility;
using namespace ::android::hardware::audio::CPP_VERSION;
+#if MAJOR_VERSION >= 7
+// Make an alias for enumerations generated from the APM config XSD.
+namespace xsd {
+using namespace ::audio::policy::configuration::CPP_VERSION;
+}
+#endif
// Typical accepted results from interface methods
static auto okOrNotSupported = {Result::OK, Result::NOT_SUPPORTED};
@@ -103,8 +104,12 @@
static auto invalidArgsOrNotSupported = {Result::INVALID_ARGUMENTS, Result::NOT_SUPPORTED};
static auto invalidStateOrNotSupported = {Result::INVALID_STATE, Result::NOT_SUPPORTED};
-#define AUDIO_PRIMARY_HIDL_HAL_TEST
#include "DeviceManager.h"
+#if MAJOR_VERSION <= 6
+#include "PolicyConfig.h"
+#elif MAJOR_VERSION >= 7
+#include "7.0/PolicyConfig.h"
+#endif
class HidlTest : public ::testing::Test {
public:
@@ -136,83 +141,16 @@
////////////////////////// Audio policy configuration ////////////////////////
//////////////////////////////////////////////////////////////////////////////
-static constexpr char kConfigFileName[] = "audio_policy_configuration.xml";
-
// Stringify the argument.
#define QUOTE(x) #x
#define STRINGIFY(x) QUOTE(x)
-struct PolicyConfigData {
- android::HwModuleCollection hwModules;
- android::DeviceVector availableOutputDevices;
- android::DeviceVector availableInputDevices;
- sp<android::DeviceDescriptor> defaultOutputDevice;
-};
-
-class PolicyConfig : private PolicyConfigData, public AudioPolicyConfig {
- public:
- PolicyConfig()
- : AudioPolicyConfig(hwModules, availableOutputDevices, availableInputDevices,
- defaultOutputDevice) {
- for (const auto& location : android::audio_get_configuration_paths()) {
- std::string path = location + '/' + kConfigFileName;
- if (access(path.c_str(), F_OK) == 0) {
- mFilePath = path;
- break;
- }
- }
- mStatus = android::deserializeAudioPolicyFile(mFilePath.c_str(), this);
- if (mStatus == OK) {
- mPrimaryModule = getHwModules().getModuleFromName(DeviceManager::kPrimaryDevice);
- // Available devices are not 'attached' to modules at this moment.
- // Need to go over available devices and find their module.
- for (const auto& device : availableOutputDevices) {
- for (const auto& module : hwModules) {
- if (module->getDeclaredDevices().indexOf(device) >= 0) {
- mModulesWithDevicesNames.insert(module->getName());
- break;
- }
- }
- }
- for (const auto& device : availableInputDevices) {
- for (const auto& module : hwModules) {
- if (module->getDeclaredDevices().indexOf(device) >= 0) {
- mModulesWithDevicesNames.insert(module->getName());
- break;
- }
- }
- }
- }
- }
- status_t getStatus() const { return mStatus; }
- std::string getError() const {
- if (mFilePath.empty()) {
- return std::string{"Could not find "} + kConfigFileName +
- " file in: " + testing::PrintToString(android::audio_get_configuration_paths());
- } else {
- return "Invalid config file: " + mFilePath;
- }
- }
- const std::string& getFilePath() const { return mFilePath; }
- sp<const HwModule> getModuleFromName(const std::string& name) const {
- return getHwModules().getModuleFromName(name.c_str());
- }
- sp<const HwModule> getPrimaryModule() const { return mPrimaryModule; }
- const std::set<std::string>& getModulesWithDevicesNames() const {
- return mModulesWithDevicesNames;
- }
-
- private:
- status_t mStatus = NO_INIT;
- std::string mFilePath;
- sp<HwModule> mPrimaryModule = nullptr;
- std::set<std::string> mModulesWithDevicesNames;
-};
+static constexpr char kConfigFileName[] = "audio_policy_configuration.xml";
// Cached policy config after parsing for faster test startup
const PolicyConfig& getCachedPolicyConfig() {
static std::unique_ptr<PolicyConfig> policyConfig = [] {
- auto config = std::make_unique<PolicyConfig>();
+ auto config = std::make_unique<PolicyConfig>(kConfigFileName);
return config;
}();
return *policyConfig;
@@ -449,9 +387,10 @@
* The getter and/or the setter may return NOT_SUPPORTED if optionality == OPTIONAL.
*/
template <Optionality optionality = REQUIRED, class IUTGetter, class Getter, class Setter>
- void testAccessors(IUTGetter iutGetter, const string& propertyName,
- const Initial expectedInitial, list<Property> valuesToTest, Setter setter,
- Getter getter, const vector<Property>& invalidValues = {}) {
+ void testAccessors(IUTGetter iutGetter, const std::string& propertyName,
+ const Initial expectedInitial, std::list<Property> valuesToTest,
+ Setter setter, Getter getter,
+ const std::vector<Property>& invalidValues = {}) {
const auto expectedResults = {Result::OK,
optionality == OPTIONAL ? Result::NOT_SUPPORTED : Result::OK};
@@ -495,9 +434,9 @@
EXPECT_RESULT(expectedResults, ((this->*iutGetter)().get()->*setter)(initialValue));
}
template <Optionality optionality = REQUIRED, class Getter, class Setter>
- void testAccessors(const string& propertyName, const Initial expectedInitial,
- list<Property> valuesToTest, Setter setter, Getter getter,
- const vector<Property>& invalidValues = {}) {
+ void testAccessors(const std::string& propertyName, const Initial expectedInitial,
+ std::list<Property> valuesToTest, Setter setter, Getter getter,
+ const std::vector<Property>& invalidValues = {}) {
testAccessors<optionality>(&BaseTestClass::getDevice, propertyName, expectedInitial,
valuesToTest, setter, getter, invalidValues);
}
@@ -573,9 +512,13 @@
// Nesting a tuple in another tuple allows to use GTest Combine function to generate
// all combinations of devices and configs.
enum { PARAM_DEVICE, PARAM_CONFIG, PARAM_FLAGS };
+#if MAJOR_VERSION <= 6
enum { INDEX_INPUT, INDEX_OUTPUT };
using DeviceConfigParameter =
std::tuple<DeviceParameter, AudioConfig, std::variant<AudioInputFlag, AudioOutputFlag>>;
+#elif MAJOR_VERSION >= 7
+using DeviceConfigParameter = std::tuple<DeviceParameter, AudioConfig, std::vector<AudioInOutFlag>>;
+#endif
#if MAJOR_VERSION >= 6
const std::vector<DeviceConfigParameter>& getInputDeviceConfigParameters();
@@ -583,8 +526,8 @@
#endif
#if MAJOR_VERSION >= 4
-static string SanitizeStringForGTestName(const string& s) {
- string result = s;
+static std::string SanitizeStringForGTestName(const std::string& s) {
+ std::string result = s;
for (size_t i = 0; i < result.size(); i++) {
// gtest test names must only contain alphanumeric characters
if (!std::isalnum(result[i])) result[i] = '_';
@@ -598,43 +541,57 @@
* As the only parameter changing are channel mask and sample rate,
* only print those ones in the test name.
*/
-static string DeviceConfigParameterToString(
+static std::string DeviceConfigParameterToString(
const testing::TestParamInfo<DeviceConfigParameter>& info) {
const AudioConfig& config = std::get<PARAM_CONFIG>(info.param);
const auto deviceName = DeviceParameterToString(::testing::TestParamInfo<DeviceParameter>{
std::get<PARAM_DEVICE>(info.param), info.index});
- return (deviceName.empty() ? "" : deviceName + "_") + to_string(info.index) + "__" +
- to_string(config.sampleRateHz) + "_" +
- // "MONO" is more clear than "FRONT_LEFT"
- ((config.channelMask == mkEnumBitfield(AudioChannelMask::OUT_MONO) ||
- config.channelMask == mkEnumBitfield(AudioChannelMask::IN_MONO))
- ? "MONO"
+ const auto devicePart =
+ (deviceName.empty() ? "" : deviceName + "_") + std::to_string(info.index);
+ // The types had changed a lot between versions 2, 4..6 and 7. Use separate
+ // code sections for easier understanding.
#if MAJOR_VERSION == 2
- : ::testing::PrintToString(config.channelMask)
-#elif MAJOR_VERSION >= 4
- // In V4 and above the channel mask is a bitfield.
- // Printing its value using HIDL's toString for a bitfield emits a lot of extra
- // text due to overlapping constant values. Instead, we print the bitfield value
- // as if it was a single value + its hex representation
- : SanitizeStringForGTestName(
- ::testing::PrintToString(AudioChannelMask(config.channelMask)) + "_" +
- toHexString(config.channelMask))
+ const auto configPart =
+ std::to_string(config.sampleRateHz) + "_" +
+ // "MONO" is more clear than "FRONT_LEFT"
+ (config.channelMask == AudioChannelMask::OUT_MONO ||
+ config.channelMask == AudioChannelMask::IN_MONO
+ ? "MONO"
+ : ::testing::PrintToString(config.channelMask)) +
+ "_" +
+ std::visit([](auto&& arg) -> std::string { return ::testing::PrintToString(arg); },
+ std::get<PARAM_FLAGS>(info.param));
+#elif MAJOR_VERSION >= 4 && MAJOR_VERSION <= 6
+ const auto configPart =
+ std::to_string(config.sampleRateHz) + "_" +
+ // "MONO" is more clear than "FRONT_LEFT"
+ (config.channelMask == mkEnumBitfield(AudioChannelMask::OUT_MONO) ||
+ config.channelMask == mkEnumBitfield(AudioChannelMask::IN_MONO)
+ ? "MONO"
+ // In V4 and above the channel mask is a bitfield.
+ // Printing its value using HIDL's toString for a bitfield emits a lot of extra
+ // text due to overlapping constant values. Instead, we print the bitfield
+ // value as if it was a single value + its hex representation
+ : SanitizeStringForGTestName(
+ ::testing::PrintToString(AudioChannelMask(config.channelMask)) +
+ "_" + toHexString(config.channelMask))) +
+ "_" +
+ SanitizeStringForGTestName(std::visit(
+ [](auto&& arg) -> std::string {
+ using T = std::decay_t<decltype(arg)>;
+ // Need to use FQN of toString to avoid confusing the compiler
+ return ::android::hardware::audio::common::CPP_VERSION::toString<T>(
+ hidl_bitfield<T>(arg));
+ },
+ std::get<PARAM_FLAGS>(info.param)));
+#elif MAJOR_VERSION >= 7
+ const auto configPart =
+ std::to_string(config.base.sampleRateHz) + "_" +
+ // The channel masks and flags are vectors of strings, just need to sanitize them.
+ SanitizeStringForGTestName(::testing::PrintToString(config.base.channelMask)) + "_" +
+ SanitizeStringForGTestName(::testing::PrintToString(std::get<PARAM_FLAGS>(info.param)));
#endif
- ) +
- "_" +
-#if MAJOR_VERSION == 2
- std::visit([](auto&& arg) -> std::string { return ::testing::PrintToString(arg); },
- std::get<PARAM_FLAGS>(info.param));
-#elif MAJOR_VERSION >= 4
- SanitizeStringForGTestName(std::visit(
- [](auto&& arg) -> std::string {
- using T = std::decay_t<decltype(arg)>;
- // Need to use FQN of toString to avoid confusing the compiler
- return ::android::hardware::audio::common::CPP_VERSION::toString<T>(
- hidl_bitfield<T>(arg));
- },
- std::get<PARAM_FLAGS>(info.param)));
-#endif
+ return devicePart + "__" + configPart;
}
class AudioHidlTestWithDeviceConfigParameter
@@ -660,7 +617,7 @@
AudioOutputFlag getOutputFlags() const {
return std::get<INDEX_OUTPUT>(std::get<PARAM_FLAGS>(GetParam()));
}
-#elif MAJOR_VERSION >= 4
+#elif MAJOR_VERSION >= 4 && MAJOR_VERSION <= 6
hidl_bitfield<AudioInputFlag> getInputFlags() const {
return hidl_bitfield<AudioInputFlag>(
std::get<INDEX_INPUT>(std::get<PARAM_FLAGS>(GetParam())));
@@ -669,10 +626,17 @@
return hidl_bitfield<AudioOutputFlag>(
std::get<INDEX_OUTPUT>(std::get<PARAM_FLAGS>(GetParam())));
}
+#elif MAJOR_VERSION >= 7
+ hidl_vec<AudioInOutFlag> getInputFlags() const { return std::get<PARAM_FLAGS>(GetParam()); }
+ hidl_vec<AudioInOutFlag> getOutputFlags() const { return std::get<PARAM_FLAGS>(GetParam()); }
#endif
};
+#if MAJOR_VERSION <= 6
+#define AUDIO_PRIMARY_HIDL_HAL_TEST
#include "ConfigHelper.h"
+#undef AUDIO_PRIMARY_HIDL_HAL_TEST
+#endif
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// getInputBufferSize /////////////////////////////
@@ -839,7 +803,7 @@
AudioConfig* suggestedConfigPtr) {
// FIXME: Open a stream without an IOHandle
// This is not required to be accepted by hal implementations
- AudioIoHandle ioHandle = (AudioIoHandle)AudioHandleConsts::AUDIO_IO_HANDLE_NONE;
+ AudioIoHandle ioHandle{};
AudioConfig suggestedConfig{};
bool retryWithSuggestedConfig = true;
if (suggestedConfigPtr == nullptr) {
@@ -932,7 +896,11 @@
class OutputStreamTest : public OpenStreamTest<IStreamOut> {
void SetUp() override {
ASSERT_NO_FATAL_FAILURE(OpenStreamTest::SetUp()); // setup base
+#if MAJOR_VERSION <= 6
address.device = AudioDevice::OUT_DEFAULT;
+#elif MAJOR_VERSION >= 7
+ address.deviceType = toString(xsd::AudioDevice::AUDIO_DEVICE_OUT_DEFAULT);
+#endif
const AudioConfig& config = getConfig();
auto flags = getOutputFlags();
testOpen(
@@ -946,13 +914,19 @@
},
config);
}
-#if MAJOR_VERSION >= 4
+#if MAJOR_VERSION >= 4 && MAJOR_VERSION <= 6
- protected:
+ protected:
const SourceMetadata initMetadata = {
{ { AudioUsage::MEDIA,
AudioContentType::MUSIC,
1 /* gain */ } }};
+#elif MAJOR_VERSION >= 7
+ protected:
+ const SourceMetadata initMetadata = {
+ { { toString(xsd::AudioUsage::AUDIO_USAGE_MEDIA),
+ toString(xsd::AudioContentType::AUDIO_CONTENT_TYPE_MUSIC),
+ 1 /* gain */ } }};
#endif
};
TEST_P(OutputStreamTest, OpenOutputStreamTest) {
@@ -995,7 +969,11 @@
class InputStreamTest : public OpenStreamTest<IStreamIn> {
void SetUp() override {
ASSERT_NO_FATAL_FAILURE(OpenStreamTest::SetUp()); // setup base
+#if MAJOR_VERSION <= 6
address.device = AudioDevice::IN_DEFAULT;
+#elif MAJOR_VERSION <= 7
+ address.deviceType = toString(xsd::AudioDevice::AUDIO_DEVICE_IN_DEFAULT);
+#endif
const AudioConfig& config = getConfig();
auto flags = getInputFlags();
testOpen(
@@ -1009,8 +987,11 @@
protected:
#if MAJOR_VERSION == 2
const AudioSource initMetadata = AudioSource::DEFAULT;
-#elif MAJOR_VERSION >= 4
- const SinkMetadata initMetadata = {{{.source = AudioSource::DEFAULT, .gain = 1}}};
+#elif MAJOR_VERSION >= 4 && MAJOR_VERSION <= 6
+ const SinkMetadata initMetadata = {{ {.source = AudioSource::DEFAULT, .gain = 1 } }};
+#elif MAJOR_VERSION >= 7
+ const SinkMetadata initMetadata = {
+ {{.source = toString(xsd::AudioSource::AUDIO_SOURCE_DEFAULT), .gain = 1}}};
#endif
};
@@ -1067,6 +1048,7 @@
TEST_IO_STREAM(GetFrameCount, "Check that getting stream frame count does not crash the HAL.",
ASSERT_TRUE(stream->getFrameCount().isOk()))
+#if MAJOR_VERSION <= 6
TEST_IO_STREAM(GetSampleRate, "Check that the stream sample rate == the one it was opened with",
ASSERT_EQ(audioConfig.sampleRateHz, extract(stream->getSampleRate())))
@@ -1075,6 +1057,7 @@
TEST_IO_STREAM(GetFormat, "Check that the stream format == the one it was opened with",
ASSERT_EQ(audioConfig.format, extract(stream->getFormat())))
+#endif
// TODO: for now only check that the framesize is not incoherent
TEST_IO_STREAM(GetFrameSize, "Check that the stream frame size == the one it was opened with",
@@ -1084,7 +1067,7 @@
ASSERT_GE(extract(stream->getBufferSize()), extract(stream->getFrameSize())));
template <class Property, class CapabilityGetter>
-static void testCapabilityGetter(const string& name, IStream* stream,
+static void testCapabilityGetter(const std::string& name, IStream* stream,
CapabilityGetter capabilityGetter,
Return<Property> (IStream::*getter)(),
Return<Result> (IStream::*setter)(Property),
@@ -1120,6 +1103,7 @@
}
}
+#if MAJOR_VERSION <= 6
TEST_IO_STREAM(SupportedSampleRate, "Check that the stream sample rate is declared as supported",
testCapabilityGetter("getSupportedSampleRate", stream.get(),
&GetSupported::sampleRates, &IStream::getSampleRate,
@@ -1137,19 +1121,71 @@
TEST_IO_STREAM(SupportedFormat, "Check that the stream format is declared as supported",
testCapabilityGetter("getSupportedFormat", stream.get(), &GetSupported::formats,
&IStream::getFormat, &IStream::setFormat))
+#else
+static void testGetSupportedProfiles(IStream* stream) {
+ Result res;
+ hidl_vec<AudioProfile> profiles;
+ auto ret = stream->getSupportedProfiles(returnIn(res, profiles));
+ EXPECT_TRUE(ret.isOk());
+ if (res == Result::OK) {
+ EXPECT_GT(profiles.size(), 0);
+ } else {
+ EXPECT_EQ(Result::NOT_SUPPORTED, res);
+ }
+}
+
+TEST_IO_STREAM(GetSupportedProfiles, "Try to call optional method GetSupportedProfiles",
+ testGetSupportedProfiles(stream.get()))
+
+static void testSetAudioProperties(IStream* stream) {
+ Result res;
+ hidl_vec<AudioProfile> profiles;
+ auto ret = stream->getSupportedProfiles(returnIn(res, profiles));
+ EXPECT_TRUE(ret.isOk());
+ if (res == Result::NOT_SUPPORTED) {
+ GTEST_SKIP() << "Retrieving supported profiles is not implemented";
+ }
+ for (const auto& profile : profiles) {
+ for (const auto& sampleRate : profile.sampleRates) {
+ for (const auto& channelMask : profile.channelMasks) {
+ AudioConfigBase config{.format = profile.format,
+ .sampleRateHz = sampleRate,
+ .channelMask = channelMask};
+ auto ret = stream->setAudioProperties(config);
+ EXPECT_TRUE(ret.isOk());
+ EXPECT_EQ(Result::OK, ret) << config.format << "; " << config.sampleRateHz << "; "
+ << toString(config.channelMask);
+ }
+ }
+ }
+}
+
+TEST_IO_STREAM(SetAudioProperties, "Call setAudioProperties for all supported profiles",
+ testSetAudioProperties(stream.get()))
+#endif
static void testGetAudioProperties(IStream* stream, AudioConfig expectedConfig) {
+#if MAJOR_VERSION <= 6
uint32_t sampleRateHz;
auto mask = mkEnumBitfield<AudioChannelMask>({});
AudioFormat format;
- stream->getAudioProperties(returnIn(sampleRateHz, mask, format));
+ auto ret = stream->getAudioProperties(returnIn(sampleRateHz, mask, format));
+ EXPECT_TRUE(ret.isOk());
// FIXME: the qcom hal it does not currently negotiate the sampleRate &
// channel mask
EXPECT_EQ(expectedConfig.sampleRateHz, sampleRateHz);
EXPECT_EQ(expectedConfig.channelMask, mask);
EXPECT_EQ(expectedConfig.format, format);
+#elif MAJOR_VERSION >= 7
+ AudioConfigBase actualConfig{};
+ auto ret = stream->getAudioProperties(returnIn(actualConfig));
+ EXPECT_TRUE(ret.isOk());
+ EXPECT_EQ(expectedConfig.base.sampleRateHz, actualConfig.sampleRateHz);
+ EXPECT_EQ(expectedConfig.base.channelMask, actualConfig.channelMask);
+ EXPECT_EQ(expectedConfig.base.format, actualConfig.format);
+#endif
}
TEST_IO_STREAM(GetAudioProperties,
@@ -1160,7 +1196,7 @@
ASSERT_RESULT(okOrNotSupportedOrInvalidArgs, stream->setHwAvSync(666)))
static void checkGetNoParameter(IStream* stream, hidl_vec<hidl_string> keys,
- initializer_list<Result> expectedResults) {
+ std::initializer_list<Result> expectedResults) {
hidl_vec<ParameterValue> parameters;
Result res;
ASSERT_OK(Parameters::get(stream, keys, returnIn(res, parameters)));
@@ -1271,7 +1307,11 @@
return;
}
ASSERT_OK(res);
+#if MAJOR_VERSION <= 6
ASSERT_EQ(AudioSource::DEFAULT, source);
+#elif MAJOR_VERSION >= 7
+ ASSERT_EQ(xsd::AudioSource::AUDIO_SOURCE_DEFAULT, xsd::stringToAudioSource(source));
+#endif
}
static void testUnitaryGain(std::function<Return<Result>(float)> setGain) {
@@ -1286,7 +1326,7 @@
}
static void testOptionalUnitaryGain(std::function<Return<Result>(float)> setGain,
- string debugName) {
+ std::string debugName) {
auto result = setGain(1);
ASSERT_IS_OK(result);
if (result == Result::NOT_SUPPORTED) {
@@ -1306,7 +1346,7 @@
Result res;
// Ignore output parameters as the call should fail
ASSERT_OK(stream->prepareForReading(frameSize, framesCount,
- [&res](auto r, auto&, auto&, auto&, auto&) { res = r; }));
+ [&res](auto r, auto&, auto&, auto&, auto) { res = r; }));
EXPECT_RESULT(Result::INVALID_ARGUMENTS, res);
}
@@ -1371,7 +1411,7 @@
Result res;
// Ignore output parameters as the call should fail
ASSERT_OK(stream->prepareForWriting(frameSize, framesCount,
- [&res](auto r, auto&, auto&, auto&, auto&) { res = r; }));
+ [&res](auto r, auto&, auto&, auto&, auto) { res = r; }));
EXPECT_RESULT(Result::INVALID_ARGUMENTS, res);
}
diff --git a/audio/core/all-versions/vts/functional/ConfigHelper.h b/audio/core/all-versions/vts/functional/ConfigHelper.h
index 8ef2b43..1a1dbea 100644
--- a/audio/core/all-versions/vts/functional/ConfigHelper.h
+++ b/audio/core/all-versions/vts/functional/ConfigHelper.h
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#pragma once
+
// Code in this file uses 'getCachedPolicyConfig'
#ifndef AUDIO_PRIMARY_HIDL_HAL_TEST
#error Must be included from AudioPrimaryHidlTest.h
@@ -46,32 +48,32 @@
}
// Cache result ?
- static const vector<AudioConfig> getRequiredSupportPlaybackAudioConfig() {
+ static const std::vector<AudioConfig> getRequiredSupportPlaybackAudioConfig() {
return combineAudioConfig({AudioChannelMask::OUT_STEREO, AudioChannelMask::OUT_MONO},
{8000, 11025, 16000, 22050, 32000, 44100},
{AudioFormat::PCM_16_BIT});
}
- static const vector<AudioConfig> getRecommendedSupportPlaybackAudioConfig() {
+ static const std::vector<AudioConfig> getRecommendedSupportPlaybackAudioConfig() {
return combineAudioConfig({AudioChannelMask::OUT_STEREO, AudioChannelMask::OUT_MONO},
{24000, 48000}, {AudioFormat::PCM_16_BIT});
}
- static const vector<AudioConfig> getRequiredSupportCaptureAudioConfig() {
+ static const std::vector<AudioConfig> getRequiredSupportCaptureAudioConfig() {
if (!primaryHasMic()) return {};
return combineAudioConfig({AudioChannelMask::IN_MONO}, {8000, 11025, 16000, 44100},
{AudioFormat::PCM_16_BIT});
}
- static const vector<AudioConfig> getRecommendedSupportCaptureAudioConfig() {
+ static const std::vector<AudioConfig> getRecommendedSupportCaptureAudioConfig() {
if (!primaryHasMic()) return {};
return combineAudioConfig({AudioChannelMask::IN_STEREO}, {22050, 48000},
{AudioFormat::PCM_16_BIT});
}
- static vector<AudioConfig> combineAudioConfig(vector<audio_channel_mask_t> channelMasks,
- vector<uint32_t> sampleRates,
- audio_format_t format) {
- vector<AudioConfig> configs;
+ static std::vector<AudioConfig> combineAudioConfig(
+ std::vector<audio_channel_mask_t> channelMasks, std::vector<uint32_t> sampleRates,
+ audio_format_t format) {
+ std::vector<AudioConfig> configs;
configs.reserve(channelMasks.size() * sampleRates.size());
for (auto channelMask : channelMasks) {
for (auto sampleRate : sampleRates) {
@@ -86,10 +88,10 @@
return configs;
}
- static vector<AudioConfig> combineAudioConfig(vector<AudioChannelMask> channelMasks,
- vector<uint32_t> sampleRates,
- vector<AudioFormat> formats) {
- vector<AudioConfig> configs;
+ static std::vector<AudioConfig> combineAudioConfig(std::vector<AudioChannelMask> channelMasks,
+ std::vector<uint32_t> sampleRates,
+ std::vector<AudioFormat> formats) {
+ std::vector<AudioConfig> configs;
configs.reserve(channelMasks.size() * sampleRates.size() * formats.size());
for (auto channelMask : channelMasks) {
for (auto sampleRate : sampleRates) {
diff --git a/audio/core/all-versions/vts/functional/DeviceManager.h b/audio/core/all-versions/vts/functional/DeviceManager.h
index 0c0727f..6efed79 100644
--- a/audio/core/all-versions/vts/functional/DeviceManager.h
+++ b/audio/core/all-versions/vts/functional/DeviceManager.h
@@ -14,10 +14,11 @@
* limitations under the License.
*/
-// Code in this file uses 'environment'
-#ifndef AUDIO_PRIMARY_HIDL_HAL_TEST
-#error Must be included from AudioPrimaryHidlTest.h
-#endif
+#pragma once
+
+// Note: it is assumed that this file is included from AudioPrimaryHidlTest.h
+// and thus it doesn't have all '#include' and 'using' directives required
+// for a standalone compilation.
template <class Derived, class Key, class Interface>
class InterfaceManager {
diff --git a/audio/core/all-versions/vts/functional/PolicyConfig.h b/audio/core/all-versions/vts/functional/PolicyConfig.h
new file mode 100644
index 0000000..c9e0c0d
--- /dev/null
+++ b/audio/core/all-versions/vts/functional/PolicyConfig.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+// Note: it is assumed that this file is included from AudioPrimaryHidlTest.h
+// and thus it doesn't have all '#include' and 'using' directives required
+// for a standalone compilation.
+
+#include <Serializer.h>
+
+struct PolicyConfigData {
+ android::HwModuleCollection hwModules;
+ android::DeviceVector availableOutputDevices;
+ android::DeviceVector availableInputDevices;
+ sp<android::DeviceDescriptor> defaultOutputDevice;
+};
+
+class PolicyConfig : private PolicyConfigData, public android::AudioPolicyConfig {
+ public:
+ explicit PolicyConfig(const std::string& configFileName)
+ : android::AudioPolicyConfig(hwModules, availableOutputDevices, availableInputDevices,
+ defaultOutputDevice),
+ mConfigFileName{configFileName} {
+ for (const auto& location : android::audio_get_configuration_paths()) {
+ std::string path = location + '/' + mConfigFileName;
+ if (access(path.c_str(), F_OK) == 0) {
+ mFilePath = path;
+ break;
+ }
+ }
+ mStatus = android::deserializeAudioPolicyFile(mFilePath.c_str(), this);
+ if (mStatus == OK) {
+ mPrimaryModule = getModuleFromName(DeviceManager::kPrimaryDevice);
+ // Available devices are not 'attached' to modules at this moment.
+ // Need to go over available devices and find their module.
+ for (const auto& device : availableOutputDevices) {
+ for (const auto& module : hwModules) {
+ if (module->getDeclaredDevices().indexOf(device) >= 0) {
+ mModulesWithDevicesNames.insert(module->getName());
+ break;
+ }
+ }
+ }
+ for (const auto& device : availableInputDevices) {
+ for (const auto& module : hwModules) {
+ if (module->getDeclaredDevices().indexOf(device) >= 0) {
+ mModulesWithDevicesNames.insert(module->getName());
+ break;
+ }
+ }
+ }
+ }
+ }
+ status_t getStatus() const { return mStatus; }
+ std::string getError() const {
+ if (mFilePath.empty()) {
+ return std::string{"Could not find "} + mConfigFileName +
+ " file in: " + testing::PrintToString(android::audio_get_configuration_paths());
+ } else {
+ return "Invalid config file: " + mFilePath;
+ }
+ }
+ const std::string& getFilePath() const { return mFilePath; }
+ sp<const android::HwModule> getModuleFromName(const std::string& name) const {
+ return getHwModules().getModuleFromName(name.c_str());
+ }
+ sp<const android::HwModule> getPrimaryModule() const { return mPrimaryModule; }
+ const std::set<std::string>& getModulesWithDevicesNames() const {
+ return mModulesWithDevicesNames;
+ }
+ bool haveInputProfilesInModule(const std::string& name) const {
+ auto module = getModuleFromName(name);
+ return module && !module->getInputProfiles().empty();
+ }
+
+ private:
+ const std::string mConfigFileName;
+ status_t mStatus = NO_INIT;
+ std::string mFilePath;
+ sp<const android::HwModule> mPrimaryModule = nullptr;
+ std::set<std::string> mModulesWithDevicesNames;
+};
diff --git a/audio/effect/7.0/IEffect.hal b/audio/effect/7.0/IEffect.hal
index 5b176dc..aa94f6d 100644
--- a/audio/effect/7.0/IEffect.hal
+++ b/audio/effect/7.0/IEffect.hal
@@ -56,7 +56,6 @@
*
* @return retval operation completion status.
*/
- @callflow(next={"prepareForProcessing"})
enable() generates (Result retval);
/**
@@ -64,7 +63,6 @@
*
* @return retval operation completion status.
*/
- @callflow(next={"close"})
disable() generates (Result retval);
/**
@@ -78,7 +76,7 @@
* @param device output device specification.
* @return retval operation completion status.
*/
- setDevice(bitfield<AudioDevice> device) generates (Result retval);
+ setDevice(DeviceAddress device) generates (Result retval);
/**
* Set and get volume. Used by audio framework to delegate volume control to
@@ -147,7 +145,7 @@
* @param device input device specification.
* @return retval operation completion status.
*/
- setInputDevice(bitfield<AudioDevice> device) generates (Result retval);
+ setInputDevice(DeviceAddress device) generates (Result retval);
/**
* Read audio parameters configurations for input and output buffers.
@@ -251,7 +249,6 @@
* the queue.
* @return statusMQ a message queue used for passing status from the effect.
*/
- @callflow(next={"setProcessBuffers"})
prepareForProcessing() generates (Result retval, fmq_sync<Result> statusMQ);
/**
@@ -416,6 +413,5 @@
* @return retval OK in case the success.
* INVALID_STATE if the effect was already closed.
*/
- @exit
close() generates (Result retval);
};
diff --git a/audio/effect/7.0/IVirtualizerEffect.hal b/audio/effect/7.0/IVirtualizerEffect.hal
index 0e6ff54..141b4e6 100644
--- a/audio/effect/7.0/IVirtualizerEffect.hal
+++ b/audio/effect/7.0/IVirtualizerEffect.hal
@@ -48,7 +48,7 @@
struct SpeakerAngle {
/** Speaker channel mask */
- bitfield<AudioChannelMask> mask;
+ vec<AudioChannelMask> mask;
// all angles are expressed in degrees and
// are relative to the listener.
int16_t azimuth; // 0 is the direction the listener faces
@@ -61,17 +61,17 @@
* Retrieves virtual speaker angles for the given channel mask on the
* specified device.
*/
- getVirtualSpeakerAngles(bitfield<AudioChannelMask> mask, AudioDevice device)
+ getVirtualSpeakerAngles(vec<AudioChannelMask> mask, DeviceAddress device)
generates (Result retval, vec<SpeakerAngle> speakerAngles);
/**
* Forces the virtualizer effect for the given output device.
*/
- forceVirtualizationMode(AudioDevice device) generates (Result retval);
+ forceVirtualizationMode(DeviceAddress device) generates (Result retval);
/**
* Returns audio device reflecting the current virtualization mode,
- * AUDIO_DEVICE_NONE when not virtualizing.
+ * Device type can be empty when not virtualizing.
*/
- getVirtualizationMode() generates (Result retval, AudioDevice device);
+ getVirtualizationMode() generates (Result retval, DeviceAddress device);
};
diff --git a/audio/effect/7.0/types.hal b/audio/effect/7.0/types.hal
index 7f5a382..fe4ee51 100644
--- a/audio/effect/7.0/types.hal
+++ b/audio/effect/7.0/types.hal
@@ -257,7 +257,7 @@
struct EffectBufferConfig {
AudioBuffer buffer;
uint32_t samplingRateHz;
- bitfield<AudioChannelMask> channels;
+ AudioChannelMask channels;
AudioFormat format;
EffectBufferAccess accessMode;
bitfield<EffectConfigParameters> mask;
@@ -276,8 +276,8 @@
};
struct EffectAuxChannelsConfig {
- bitfield<AudioChannelMask> mainChannels; // channel mask for main channels
- bitfield<AudioChannelMask> auxChannels; // channel mask for auxiliary channels
+ vec<AudioChannelMask> mainChannels; // channel mask for main channels
+ vec<AudioChannelMask> auxChannels; // channel mask for auxiliary channels
};
struct EffectOffloadParameter {
diff --git a/audio/effect/all-versions/vts/functional/Android.bp b/audio/effect/all-versions/vts/functional/Android.bp
index 7cdb18f..f4a7283 100644
--- a/audio/effect/all-versions/vts/functional/Android.bp
+++ b/audio/effect/all-versions/vts/functional/Android.bp
@@ -118,7 +118,6 @@
}
cc_test {
- enabled: false,
name: "VtsHalAudioEffectV7_0TargetTest",
defaults: ["VtsHalAudioEffectTargetTest_default"],
// Use test_config for vts suite.
@@ -126,6 +125,7 @@
test_config: "VtsHalAudioEffectV7_0TargetTest.xml",
static_libs: [
"android.hardware.audio.common@7.0",
+ "android.hardware.audio.common@7.0-enums",
"android.hardware.audio.effect@7.0",
],
data: [
diff --git a/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp b/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp
index 4787c09..b64f105 100644
--- a/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp
+++ b/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp
@@ -16,7 +16,9 @@
#define LOG_TAG "AudioEffectHidlHalTest"
#include <android-base/logging.h>
+#if MAJOR_VERSION <= 6
#include <system/audio.h>
+#endif
#include PATH(android/hardware/audio/effect/FILE_VERSION/IEffect.h)
#include PATH(android/hardware/audio/effect/FILE_VERSION/IEffectsFactory.h)
@@ -25,6 +27,10 @@
#include PATH(android/hardware/audio/effect/FILE_VERSION/types.h)
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
+#if MAJOR_VERSION >= 7
+#include <audio_policy_configuration_V7_0-enums.h>
+#include <audio_policy_configuration_V7_0.h>
+#endif
#include <common/all-versions/VersionUtils.h>
@@ -45,6 +51,12 @@
using ::android::hidl::memory::V1_0::IMemory;
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
+#if MAJOR_VERSION >= 7
+// Make an alias for enumerations generated from the APM config XSD.
+namespace xsd {
+using namespace ::audio::policy::configuration::CPP_VERSION;
+}
+#endif
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
@@ -171,7 +183,7 @@
effectsFactory = IEffectsFactory::getService(std::get<PARAM_FACTORY_NAME>(GetParam()));
ASSERT_NE(nullptr, effectsFactory.get());
- findAndCreateEffect(getEffectType());
+ ASSERT_NO_FATAL_FAILURE(findAndCreateEffect(getEffectType()));
ASSERT_NE(nullptr, effect.get());
Return<Result> ret = effect->init();
@@ -201,7 +213,7 @@
void AudioEffectHidlTest::findAndCreateEffect(const Uuid& type) {
Uuid effectUuid;
- findEffectInstance(type, &effectUuid);
+ ASSERT_NO_FATAL_FAILURE(findEffectInstance(type, &effectUuid));
Return<void> ret = effectsFactory->createEffect(
effectUuid, 1 /*session*/, 1 /*ioHandle*/,
#if MAJOR_VERSION >= 6
@@ -244,10 +256,16 @@
});
ASSERT_TRUE(ret.isOk());
ASSERT_EQ(Result::OK, retval);
+#if MAJOR_VERSION <= 6
ASSERT_TRUE(audio_channel_mask_is_valid(
static_cast<audio_channel_mask_t>(currentConfig.outputCfg.channels)));
*channelCount = audio_channel_count_from_out_mask(
static_cast<audio_channel_mask_t>(currentConfig.outputCfg.channels));
+#else
+ *channelCount =
+ audio::policy::configuration::V7_0::getChannelCount(currentConfig.outputCfg.channels);
+ ASSERT_NE(*channelCount, 0);
+#endif
}
TEST_P(AudioEffectHidlTest, Close) {
@@ -391,7 +409,12 @@
TEST_P(AudioEffectHidlTest, SetDevice) {
description("Verify that SetDevice works for an output chain effect");
+#if MAJOR_VERSION <= 6
Return<Result> ret = effect->setDevice(mkEnumBitfield(AudioDevice::OUT_SPEAKER));
+#else
+ DeviceAddress device{.deviceType = toString(xsd::AudioDevice::AUDIO_DEVICE_OUT_SPEAKER)};
+ Return<Result> ret = effect->setDevice(device);
+#endif
EXPECT_TRUE(ret.isOk());
EXPECT_EQ(Result::OK, ret);
}
@@ -441,22 +464,28 @@
TEST_P(AudioEffectHidlTest, SetInputDevice) {
description("Verify that SetInputDevice does not crash");
+#if MAJOR_VERSION <= 6
Return<Result> ret = effect->setInputDevice(mkEnumBitfield(AudioDevice::IN_BUILTIN_MIC));
+#else
+ DeviceAddress device{.deviceType = toString(xsd::AudioDevice::AUDIO_DEVICE_IN_BUILTIN_MIC)};
+ Return<Result> ret = effect->setInputDevice(device);
+#endif
EXPECT_TRUE(ret.isOk());
}
TEST_P(AudioEffectHidlTest, SetAudioSource) {
description("Verify that SetAudioSource does not crash");
+#if MAJOR_VERSION <= 6
Return<Result> ret = effect->setAudioSource(AudioSource::MIC);
+#else
+ Return<Result> ret = effect->setAudioSource(toString(xsd::AudioSource::AUDIO_SOURCE_MIC));
+#endif
EXPECT_TRUE(ret.isOk());
}
TEST_P(AudioEffectHidlTest, Offload) {
description("Verify that calling Offload method does not crash");
- EffectOffloadParameter offloadParam;
- offloadParam.isOffload = false;
- offloadParam.ioHandle = static_cast<int>(AudioHandleConsts::AUDIO_IO_HANDLE_NONE);
- Return<Result> ret = effect->offload(offloadParam);
+ Return<Result> ret = effect->offload(EffectOffloadParameter{});
EXPECT_TRUE(ret.isOk());
}
diff --git a/bluetooth/1.0/default/h4_protocol.cc b/bluetooth/1.0/default/h4_protocol.cc
index 8c24f76..43abbe4 100644
--- a/bluetooth/1.0/default/h4_protocol.cc
+++ b/bluetooth/1.0/default/h4_protocol.cc
@@ -90,6 +90,7 @@
hci_packet_type_ = static_cast<HciPacketType>(buffer[0]);
if (hci_packet_type_ != HCI_PACKET_TYPE_ACL_DATA &&
hci_packet_type_ != HCI_PACKET_TYPE_SCO_DATA &&
+ hci_packet_type_ != HCI_PACKET_TYPE_ISO_DATA &&
hci_packet_type_ != HCI_PACKET_TYPE_EVENT) {
LOG_ALWAYS_FATAL("%s: Unimplemented packet type %d", __func__,
static_cast<int>(hci_packet_type_));
diff --git a/bluetooth/1.0/default/hci_internals.h b/bluetooth/1.0/default/hci_internals.h
index 24e944f..6f7ff90 100644
--- a/bluetooth/1.0/default/hci_internals.h
+++ b/bluetooth/1.0/default/hci_internals.h
@@ -44,6 +44,10 @@
const size_t HCI_EVENT_PREAMBLE_SIZE = 2;
const size_t HCI_LENGTH_OFFSET_EVT = 1;
+// 2 bytes for handle and flags, 2 byte for data length (Volume 4, Part E, 5.4.5)
+const size_t HCI_ISO_PREAMBLE_SIZE = 4;
+const size_t HCI_LENGTH_OFFSET_ISO = 2;
+
const size_t HCI_PREAMBLE_SIZE_MAX = HCI_ACL_PREAMBLE_SIZE;
// Event codes (Volume 2, Part E, 7.7.14)
diff --git a/bluetooth/1.0/default/hci_packetizer.cc b/bluetooth/1.0/default/hci_packetizer.cc
index 7cb3a11..78ce61d 100644
--- a/bluetooth/1.0/default/hci_packetizer.cc
+++ b/bluetooth/1.0/default/hci_packetizer.cc
@@ -26,17 +26,27 @@
namespace {
-const size_t preamble_size_for_type[] = {
- 0, HCI_COMMAND_PREAMBLE_SIZE, HCI_ACL_PREAMBLE_SIZE, HCI_SCO_PREAMBLE_SIZE,
- HCI_EVENT_PREAMBLE_SIZE};
-const size_t packet_length_offset_for_type[] = {
- 0, HCI_LENGTH_OFFSET_CMD, HCI_LENGTH_OFFSET_ACL, HCI_LENGTH_OFFSET_SCO,
- HCI_LENGTH_OFFSET_EVT};
+const size_t preamble_size_for_type[] = {0,
+ HCI_COMMAND_PREAMBLE_SIZE,
+ HCI_ACL_PREAMBLE_SIZE,
+ HCI_SCO_PREAMBLE_SIZE,
+ HCI_EVENT_PREAMBLE_SIZE,
+ HCI_ISO_PREAMBLE_SIZE};
+const size_t packet_length_offset_for_type[] = {0,
+ HCI_LENGTH_OFFSET_CMD,
+ HCI_LENGTH_OFFSET_ACL,
+ HCI_LENGTH_OFFSET_SCO,
+ HCI_LENGTH_OFFSET_EVT,
+ HCI_LENGTH_OFFSET_ISO};
size_t HciGetPacketLengthForType(HciPacketType type, const uint8_t* preamble) {
size_t offset = packet_length_offset_for_type[type];
- if (type != HCI_PACKET_TYPE_ACL_DATA) return preamble[offset];
- return (((preamble[offset + 1]) << 8) | preamble[offset]);
+ if (type == HCI_PACKET_TYPE_ACL_DATA) {
+ return (((preamble[offset + 1]) << 8) | preamble[offset]);
+ } else if (type == HCI_PACKET_TYPE_ISO_DATA) {
+ return ((((preamble[offset + 1]) & 0x3f) << 8) | preamble[offset]);
+ }
+ return preamble[offset];
}
} // namespace
diff --git a/bluetooth/1.0/default/test/h4_protocol_unittest.cc b/bluetooth/1.0/default/test/h4_protocol_unittest.cc
index 283243d..174861c 100644
--- a/bluetooth/1.0/default/test/h4_protocol_unittest.cc
+++ b/bluetooth/1.0/default/test/h4_protocol_unittest.cc
@@ -190,8 +190,10 @@
void WriteAndExpectInboundIsoData(char* payload) {
// h4 type[1] + handle[2] + size[1]
- char preamble[4] = {HCI_PACKET_TYPE_ISO_DATA, 20, 17, 0};
- preamble[3] = strlen(payload) & 0xFF;
+ char preamble[5] = {HCI_PACKET_TYPE_ISO_DATA, 19, 92, 0, 0};
+ int length = strlen(payload);
+ preamble[3] = length & 0xFF;
+ preamble[4] = (length >> 8) & 0x3F;
ALOGD("%s writing", __func__);
TEMP_FAILURE_RETRY(write(fake_uart_, preamble, sizeof(preamble)));
diff --git a/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp b/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp
index 0f349a4..092038b 100644
--- a/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp
+++ b/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp
@@ -55,12 +55,17 @@
const V2_0::AudioConfiguration& audioConfig, startSession_cb _hidl_cb) {
AudioConfiguration audioConfig_2_1;
- audioConfig_2_1.codecConfig() = audioConfig.codecConfig();
- audioConfig_2_1.pcmConfig() = {
- .sampleRate = static_cast<SampleRate>(audioConfig.pcmConfig().sampleRate),
- .channelMode = audioConfig.pcmConfig().channelMode,
- .bitsPerSample = audioConfig.pcmConfig().bitsPerSample,
- .dataIntervalUs = 0};
+ if (audioConfig.getDiscriminator() ==
+ V2_0::AudioConfiguration::hidl_discriminator::pcmConfig) {
+ audioConfig_2_1.pcmConfig() = {
+ .sampleRate =
+ static_cast<SampleRate>(audioConfig.pcmConfig().sampleRate),
+ .channelMode = audioConfig.pcmConfig().channelMode,
+ .bitsPerSample = audioConfig.pcmConfig().bitsPerSample,
+ .dataIntervalUs = 0};
+ } else {
+ audioConfig_2_1.codecConfig() = audioConfig.codecConfig();
+ }
return startSession_2_1(hostIf, audioConfig_2_1, _hidl_cb);
}
diff --git a/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp b/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp
index c0ec907..37d1281 100644
--- a/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp
+++ b/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp
@@ -1043,6 +1043,7 @@
} else {
EXPECT_EQ(status, BluetoothAudioStatus::UNSUPPORTED_CODEC_CONFIGURATION);
EXPECT_FALSE(dataMQ.isHandleValid());
+ tempDataMQ.reset(nullptr);
}
};
android::hardware::bluetooth::audio::V2_1::AudioConfiguration audio_config =
@@ -1064,6 +1065,8 @@
ASSERT_TRUE(hidl_retval.isOk());
if (is_codec_config_valid) {
EXPECT_TRUE(tempDataMQ != nullptr && tempDataMQ->isValid());
+ } else {
+ EXPECT_TRUE(tempDataMQ == nullptr);
}
EXPECT_TRUE(audio_provider_2_1_->endSession().isOk());
} // uint32_t (data interval in microseconds)
@@ -1132,6 +1135,7 @@
} else {
EXPECT_EQ(status, BluetoothAudioStatus::UNSUPPORTED_CODEC_CONFIGURATION);
EXPECT_FALSE(dataMQ.isHandleValid());
+ tempDataMQ.reset(nullptr);
}
};
android::hardware::bluetooth::audio::V2_1::AudioConfiguration audio_config =
diff --git a/identity/support/src/IdentityCredentialSupport.cpp b/identity/support/src/IdentityCredentialSupport.cpp
index fbf3ab1..57cdc98 100644
--- a/identity/support/src/IdentityCredentialSupport.cpp
+++ b/identity/support/src/IdentityCredentialSupport.cpp
@@ -55,6 +55,7 @@
#include <keymaster/contexts/soft_attestation_cert.h>
#include <keymaster/keymaster_tags.h>
#include <keymaster/km_openssl/attestation_utils.h>
+#include <keymaster/km_openssl/certificate_utils.h>
namespace android {
namespace hardware {
@@ -962,6 +963,18 @@
return {};
}
+ ::keymaster::X509_NAME_Ptr subjectName;
+ if (KM_ERROR_OK !=
+ ::keymaster::make_name_from_str("Android Identity Credential Key", &subjectName)) {
+ LOG(ERROR) << "Cannot create attestation subject";
+ return {};
+ }
+
+ vector<uint8_t> subject(i2d_X509_NAME(subjectName.get(), NULL));
+ unsigned char* subjectPtr = subject.data();
+
+ i2d_X509_NAME(subjectName.get(), &subjectPtr);
+
::keymaster::AuthorizationSet auth_set(
::keymaster::AuthorizationSetBuilder()
.Authorization(::keymaster::TAG_ATTESTATION_CHALLENGE, challenge.data(),
@@ -976,6 +989,8 @@
// includes app id.
.Authorization(::keymaster::TAG_ATTESTATION_APPLICATION_ID,
applicationId.data(), applicationId.size())
+ .Authorization(::keymaster::TAG_CERTIFICATE_SUBJECT, subject.data(),
+ subject.size())
.Authorization(::keymaster::TAG_USAGE_EXPIRE_DATETIME, expireTimeMilliSeconds));
// Unique id and device id is not applicable for identity credential attestation,
@@ -1010,10 +1025,9 @@
//
::keymaster::PureSoftKeymasterContext context(KM_SECURITY_LEVEL_TRUSTED_ENVIRONMENT);
- error = generate_attestation_from_EVP_with_subject_name(
- key, swEnforced, hwEnforced, auth_set, context, ::keymaster::kCurrentKeymasterVersion,
- *attestation_chain, *attestation_signing_key, "Android Identity Credential Key",
- &cert_chain_out);
+ error = generate_attestation_from_EVP(key, swEnforced, hwEnforced, auth_set, context,
+ ::keymaster::kCurrentKeymasterVersion, *attestation_chain,
+ *attestation_signing_key, &cert_chain_out);
if (KM_ERROR_OK != error || !cert_chain_out) {
LOG(ERROR) << "Error generate attestation from EVP key" << error;
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
index 4403a57..ee103ba 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -59,13 +59,13 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index f301065..6cf9073 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -290,10 +290,8 @@
}
nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
- const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size);
- // Copy memory to force the native_handle_t to be copied.
- auto copiedMemory = hidlMemory;
- return copiedMemory;
+ return hidl_memory(memory.name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory.handle)),
+ memory.size);
}
nn::GeneralResult<Model> convert(const nn::Model& model) {
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index 8292f17..671416b 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -157,8 +157,8 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
- nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
- const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+ nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+ const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
@@ -181,8 +181,8 @@
}
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
- nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
- const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+ nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+ const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "IDevice::prepareModelFromCache not supported on 1.0 HAL service";
}
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
index f55ac6c..c1e95fe1a 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
@@ -59,13 +59,13 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
index 03b0d6e..a0378c9 100644
--- a/neuralnetworks/1.1/utils/src/Device.cpp
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -159,8 +159,8 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
- nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
- const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+ nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+ const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
@@ -184,8 +184,8 @@
}
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
- nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
- const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+ nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+ const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
<< "IDevice::prepareModelFromCache not supported on 1.1 HAL service";
}
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index e6de011..24911fe 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -44,11 +44,11 @@
GeneralResult<Extension> convert(const hal::V1_2::Extension& extension);
GeneralResult<Extension::OperandTypeInformation> convert(
const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation);
-GeneralResult<NativeHandle> convert(const hardware::hidl_handle& handle);
+GeneralResult<SharedHandle> convert(const hardware::hidl_handle& handle);
GeneralResult<std::vector<Extension>> convert(
const hardware::hidl_vec<hal::V1_2::Extension>& extensions);
-GeneralResult<std::vector<NativeHandle>> convert(
+GeneralResult<std::vector<SharedHandle>> convert(
const hardware::hidl_vec<hardware::hidl_handle>& handles);
GeneralResult<std::vector<OutputShape>> convert(
const hardware::hidl_vec<hal::V1_2::OutputShape>& outputShapes);
@@ -77,10 +77,10 @@
nn::GeneralResult<Extension> convert(const nn::Extension& extension);
nn::GeneralResult<Extension::OperandTypeInformation> convert(
const nn::Extension::OperandTypeInformation& operandTypeInformation);
-nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle);
+nn::GeneralResult<hidl_handle> convert(const nn::SharedHandle& handle);
nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions);
-nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles);
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles);
nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
} // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
index eb317b1..bbd5343 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -68,13 +68,13 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 378719a..08c94de 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -257,16 +257,15 @@
};
}
-GeneralResult<NativeHandle> convert(const hidl_handle& handle) {
- auto* cloned = native_handle_clone(handle.getNativeHandle());
- return ::android::NativeHandle::create(cloned, /*ownsHandle=*/true);
+GeneralResult<SharedHandle> convert(const hidl_handle& hidlHandle) {
+ return hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle());
}
GeneralResult<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) {
return convertVec(extensions);
}
-GeneralResult<std::vector<NativeHandle>> convert(const hidl_vec<hidl_handle>& handles) {
+GeneralResult<std::vector<SharedHandle>> convert(const hidl_vec<hidl_handle>& handles) {
return convertVec(handles);
}
@@ -487,18 +486,15 @@
};
}
-nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle) {
- const auto hidlHandle = hidl_handle(handle->handle());
- // Copy memory to force the native_handle_t to be copied.
- auto copiedHandle = hidlHandle;
- return copiedHandle;
+nn::GeneralResult<hidl_handle> convert(const nn::SharedHandle& handle) {
+ return hal::utils::hidlHandleFromSharedHandle(handle);
}
nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions) {
return convertVec(extensions);
}
-nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles) {
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles) {
return convertVec(handles);
}
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index ca236f1..517d61f 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -257,8 +257,8 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
- nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
@@ -286,8 +286,8 @@
}
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
- nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
const auto hidlModelCache = NN_TRY(convert(modelCache));
const auto hidlDataCache = NN_TRY(convert(dataCache));
const auto hidlToken = token;
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
index 2f6c46a..0f5234b 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
@@ -61,13 +61,13 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index c215f39..5e3d5c2 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -179,8 +179,8 @@
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
@@ -211,8 +211,8 @@
}
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
const auto hidlDeadline = NN_TRY(convert(deadline));
const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index df9b280..2781053 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -53,15 +53,6 @@
return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
}
-nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
- const std::vector<nn::SyncFence>& syncFences) {
- hidl_vec<hidl_handle> handles(syncFences.size());
- for (size_t i = 0; i < syncFences.size(); ++i) {
- handles[i] = NN_TRY(V1_2::utils::convert(syncFences[i].getHandle()));
- }
- return handles;
-}
-
nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
return std::make_pair(NN_TRY(validatedConvertToCanonical(timingLaunched)),
@@ -221,7 +212,7 @@
NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
const auto hidlRequest = NN_TRY(convert(requestInShared));
- const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
+ const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
const auto hidlMeasure = NN_TRY(V1_2::utils::convert(measure));
const auto hidlDeadline = NN_TRY(convert(deadline));
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 254a3d4..43bb0c6 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
+#include <cutils/native_handle.h>
+#include <hidl/HidlSupport.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <functional>
@@ -57,6 +59,13 @@
std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
const std::vector<nn::Operation>& operations);
+nn::GeneralResult<nn::Memory> createSharedMemoryFromHidlMemory(const hidl_memory& memory);
+
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle);
+nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle);
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+ const std::vector<nn::SyncFence>& fences);
+
} // namespace android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
index 4f1afb9..4a84e4d 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -63,13 +63,13 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedBuffer> allocate(
@@ -81,12 +81,12 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
nn::Priority priority, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
bool blocking, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
nn::GeneralResult<nn::SharedBuffer> allocateInternal(
bool blocking, const nn::BufferDesc& desc,
const std::vector<nn::SharedPreparedModel>& preparedModels,
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 2565972..c04c8df 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -19,6 +19,7 @@
#include "HandleError.h"
#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
#include <nnapi/Result.h>
#include <nnapi/SharedMemory.h>
#include <nnapi/TypeUtils.h>
@@ -247,4 +248,67 @@
return nn::countNumberOfConsumers(numberOfOperands, operations);
}
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle) {
+ if (handle == nullptr) {
+ return {};
+ }
+
+ std::vector<base::unique_fd> fds;
+ fds.reserve(handle->fds.size());
+ for (const auto& fd : handle->fds) {
+ int dupFd = dup(fd);
+ if (dupFd == -1) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+ }
+ fds.emplace_back(dupFd);
+ }
+
+ native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size());
+ if (nativeHandle == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
+ }
+ for (size_t i = 0; i < fds.size(); ++i) {
+ nativeHandle->data[i] = fds[i].release();
+ }
+ std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]);
+
+ hidl_handle hidlHandle;
+ hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
+ return hidlHandle;
+}
+
+nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
+ if (handle == nullptr) {
+ return nullptr;
+ }
+
+ std::vector<base::unique_fd> fds;
+ fds.reserve(handle->numFds);
+ for (int i = 0; i < handle->numFds; ++i) {
+ int dupFd = dup(handle->data[i]);
+ if (dupFd == -1) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+ }
+ fds.emplace_back(dupFd);
+ }
+
+ std::vector<int> ints(&handle->data[handle->numFds],
+ &handle->data[handle->numFds + handle->numInts]);
+
+ return std::make_shared<const nn::Handle>(nn::Handle{
+ .fds = std::move(fds),
+ .ints = std::move(ints),
+ });
+}
+
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+ const std::vector<nn::SyncFence>& syncFences) {
+ hidl_vec<hidl_handle> handles(syncFences.size());
+ for (size_t i = 0; i < syncFences.size(); ++i) {
+ handles[i] =
+ NN_TRY(hal::utils::hidlHandleFromSharedHandle(syncFences[i].getSharedHandle()));
+ }
+ return handles;
+}
+
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 95662d9..26025a5 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -161,8 +161,8 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
auto self = shared_from_this();
ResilientPreparedModel::Factory makePreparedModel =
[device = std::move(self), model, preference, priority, deadline, modelCache, dataCache,
@@ -174,8 +174,8 @@
}
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
auto self = shared_from_this();
ResilientPreparedModel::Factory makePreparedModel =
[device = std::move(self), deadline, modelCache, dataCache,
@@ -202,8 +202,8 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
nn::Priority priority, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
token](const nn::IDevice& device) {
return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
@@ -214,8 +214,8 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCacheInternal(
bool blocking, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
};
diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp
index 87d27c7..402598c 100644
--- a/neuralnetworks/utils/service/Android.bp
+++ b/neuralnetworks/utils/service/Android.bp
@@ -26,6 +26,7 @@
"neuralnetworks_utils_hal_1_1",
"neuralnetworks_utils_hal_1_2",
"neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_common",
],
shared_libs: [
"android.hardware.neuralnetworks@1.0",
diff --git a/radio/1.6/IRadio.hal b/radio/1.6/IRadio.hal
index a093dee..8afbf22 100644
--- a/radio/1.6/IRadio.hal
+++ b/radio/1.6/IRadio.hal
@@ -114,6 +114,9 @@
* addresses of the existing data connection. The format is defined in RFC-4291 section 2.2.
* For example, "192.0.1.3" or "2001:db8::1". This parameter must be ignored unless reason
* is DataRequestReason:HANDOVER.
+ * @param pduSessionId The pdu session id to be used for this data call. A value of 0 means
+ * no pdu session id was attached to this call.
+ * Reference: 3GPP TS 24.007 section 11.2.3.1b
*
* Response function is IRadioResponse.setupDataCallResponse_1_6()
*
@@ -121,7 +124,8 @@
*/
oneway setupDataCall_1_6(int32_t serial, AccessNetwork accessNetwork,
DataProfileInfo dataProfileInfo, bool roamingAllowed,
- DataRequestReason reason, vec<LinkAddress> addresses, vec<string> dnses);
+ DataRequestReason reason, vec<LinkAddress> addresses, vec<string> dnses,
+ int32_t pduSessionId);
/**
* Send an SMS message
@@ -290,12 +294,12 @@
* @param serial Serial number of request.
* @param id callId The identifier of the data call which is provided in SetupDataCallResult
*
- * Response function is IRadioResponse.beginHandoverResponse()
+ * Response function is IRadioResponse.startHandoverResponse()
*/
- oneway beginHandover(int32_t serial, int32_t callId);
+ oneway startHandover(int32_t serial, int32_t callId);
/**
- * Indicates that a handover was cancelled after a call to IRadio::beginHandover.
+ * Indicates that a handover was cancelled after a call to IRadio::startHandover.
*
* Since the handover was unsuccessful, the modem retains ownership over any of the resources
* being transferred and is still responsible for releasing them.
@@ -320,4 +324,29 @@
*/
oneway setAllowedNetworkTypeBitmap(
uint32_t serial, bitfield<RadioAccessFamily> networkTypeBitmap);
+
+ /**
+ * Control data throttling at modem.
+ * - DataThrottlingAction:NO_DATA_THROTTLING should clear any existing
+ * data throttling within the requested completion window.
+ * - DataThrottlingAction:THROTTLE_SECONDARY_CARRIER: Remove any existing
+ * throttling on anchor carrier and achieve maximum data throttling on
+ * secondary carrier within the requested completion window.
+ * - DataThrottlingAction:THROTTLE_ANCHOR_CARRIER: disable secondary
+ * carrier and achieve maximum data throttling on anchor carrier by
+ * requested completion window.
+ * - DataThrottlingAction:HOLD: Immediately hold on to current level of
+ * throttling.
+ *
+ * @param serial Serial number of request.
+ * @param dataThrottlingAction DataThrottlingAction as defined in types.hal
+ * @param completionWindowSecs window, in seconds, in which the requested
+ * throttling action has to be achieved. This must be 0 when
+ * dataThrottlingAction is DataThrottlingAction:HOLD.
+ *
+ * Response function is IRadioResponse.setDataThrottlingResponse()
+ */
+ oneway setDataThrottling(int32_t serial,
+ DataThrottlingAction dataThrottlingAction,
+ int32_t completionWindowSecs);
};
diff --git a/radio/1.6/IRadioResponse.hal b/radio/1.6/IRadioResponse.hal
index 0379e00..5a71c1f 100644
--- a/radio/1.6/IRadioResponse.hal
+++ b/radio/1.6/IRadioResponse.hal
@@ -241,7 +241,7 @@
/**
* @param info Response info struct containing response type, serial no. and error
- * @param id The allocated id. On an error, this is set to -1
+ * @param id The allocated id. On an error, this is set to 0.
*
* Valid errors returned:
* RadioError:NONE
@@ -275,7 +275,7 @@
* RadioError:REQUEST_NOT_SUPPORTED
* RadioError:INVALID_CALL_ID
*/
- oneway beginHandoverResponse(RadioResponseInfo info);
+ oneway startHandoverResponse(RadioResponseInfo info);
/**
* @param info Response info struct containing response type, serial no. and error
@@ -306,4 +306,15 @@
* RadioError:NO_RESOURCES
*/
oneway setAllowedNetworkTypeBitmapResponse(RadioResponseInfo info);
+
+ /**
+ * @param info Response info struct containing response type, serial no. and error
+ *
+ * Valid errors returned:
+ * RadioError:NONE
+ * RadioError:RADIO_NOT_AVAILABLE
+ * RadioError:MODEM_ERR
+ * RadioError:INVALID_ARGUMENTS
+ */
+ oneway setDataThrottlingResponse(RadioResponseInfo info);
};
diff --git a/radio/1.6/types.hal b/radio/1.6/types.hal
index 32da295..556d8a3 100644
--- a/radio/1.6/types.hal
+++ b/radio/1.6/types.hal
@@ -18,7 +18,10 @@
import @1.0::RadioError;
import @1.0::RadioResponseType;
-import @1.5::SetupDataCallResult;
+import @1.4::DataCallFailCause;
+import @1.4::DataConnActiveStatus;
+import @1.4::PdpProtocolType;
+import @1.5::LinkAddress;
import android.hidl.safe_union@1.0::Monostate;
@@ -240,8 +243,72 @@
NO_FALLBACK_RETRY_SETUP_NORMAL = 3
};
+/**
+ * Overwritten from @1.5::SetupDataCallResult in order to change the suggestedRetryTime
+ * to 64-bit value. In the future, this must be extended instead of overwritten.
+ * Also added defaultQos, qosSessions, and handoverFailureMode in this version.
+ */
struct SetupDataCallResult {
- @1.5::SetupDataCallResult base;
+ /** Data call fail cause. DataCallFailCause.NONE if no error. */
+ DataCallFailCause cause;
+
+ /**
+ * If cause is not DataCallFailCause.NONE, this field indicates the network suggested data
+ * retry back-off time in milliseconds. Negative value indicates network does not give any
+ * suggestion. 0 indicates retry should be performed immediately. 0x7fffffffffffffff indicates
+ * the device should not retry data setup anymore.
+ */
+ uint64_t suggestedRetryTime;
+
+ /** Context ID, uniquely identifies this data connection. */
+ int32_t cid;
+
+ /** Data connection active status. */
+ DataConnActiveStatus active;
+
+ /**
+ * PDP protocol type. If cause is DataCallFailCause.ONLY_SINGLE_BEARER_ALLOWED, this is the
+ * protocol type supported, such as "IP" or "IPV6".
+ */
+ PdpProtocolType type;
+
+ /** The network interface name. */
+ string ifname;
+
+ /**
+ * List of link address.
+ */
+ vec<LinkAddress> addresses;
+
+ /**
+ * List of DNS server addresses, e.g., "192.0.1.3" or "192.0.1.11 2001:db8::1". Empty if no dns
+ * server addresses returned.
+ */
+ vec<string> dnses;
+
+ /**
+ * List of default gateway addresses, e.g., "192.0.1.3" or "192.0.1.11 2001:db8::1".
+ * When empty, the addresses represent point to point connections.
+ */
+ vec<string> gateways;
+
+ /**
+ * List of P-CSCF(Proxy Call State Control Function) addresses via PCO(Protocol Configuration
+ * Option), e.g., "2001:db8::1 2001:db8::2 2001:db8::3". Empty if not IMS client.
+ */
+ vec<string> pcscf;
+
+ /**
+ * MTU received from network for IPv4.
+ * Value <= 0 means network has either not sent a value or sent an invalid value.
+ */
+ int32_t mtuV4;
+
+ /**
+ * MTU received from network for IPv6.
+ * Value <= 0 means network has either not sent a value or sent an invalid value.
+ */
+ int32_t mtuV6;
/** Default bearer QoS. Applicable to LTE and NR */
Qos defaultQos;
@@ -328,3 +395,25 @@
*/
uint32_t secondaryUplinkCapacityKbps;
};
+
+enum DataThrottlingAction : int32_t {
+ /* Clear all existing data throttling. */
+ NO_DATA_THROTTLING = 0,
+
+ /**
+ * Enact secondary carrier data throttling and remove any existing data
+ * throttling on anchor carrier.
+ */
+ THROTTLE_SECONDARY_CARRIER = 1,
+
+ /**
+ * Enact anchor carrier data throttling and disable data on secondary
+ * carrier if currently enabled.
+ */
+ THROTTLE_ANCHOR_CARRIER = 2,
+
+ /**
+ * Immediately hold on to current level of throttling.
+ */
+ HOLD = 3
+};
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
index 6547611..d3ffba9 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
@@ -57,7 +57,7 @@
::android::hardware::radio::V1_2::DataRequestReason::NORMAL;
Return<void> res = radio_v1_6->setupDataCall_1_6(serial, accessNetwork, dataProfileInfo,
- roamingAllowed, reason, addresses, dnses);
+ roamingAllowed, reason, addresses, dnses, -1);
ASSERT_OK(res);
EXPECT_EQ(std::cv_status::no_timeout, wait());
@@ -295,3 +295,69 @@
::android::hardware::radio::V1_6::RadioError::INTERNAL_ERR,
::android::hardware::radio::V1_6::RadioError::NONE}));
}
+
+/*
+ * Test IRadio.setDataThrottling() for the response returned.
+ */
+TEST_P(RadioHidlTest_v1_6, setDataThrottling) {
+ serial = GetRandomSerialNumber();
+
+ Return<void> res = radio_v1_6->setDataThrottling(
+ serial, DataThrottlingAction::THROTTLE_SECONDARY_CARRIER, 60);
+ ASSERT_OK(res);
+
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_6->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_6->rspInfo.serial);
+ ASSERT_TRUE(
+ CheckAnyOfErrors(radioRsp_v1_6->rspInfo.error,
+ {::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
+ ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
+ ::android::hardware::radio::V1_6::RadioError::NONE,
+ ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS}));
+
+ serial = GetRandomSerialNumber();
+
+ res = radio_v1_6->setDataThrottling(serial, DataThrottlingAction::THROTTLE_ANCHOR_CARRIER, 60);
+ ASSERT_OK(res);
+
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_6->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_6->rspInfo.serial);
+ ASSERT_TRUE(
+ CheckAnyOfErrors(radioRsp_v1_6->rspInfo.error,
+ {::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
+ ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
+ ::android::hardware::radio::V1_6::RadioError::NONE,
+ ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS}));
+
+ serial = GetRandomSerialNumber();
+
+ res = radio_v1_6->setDataThrottling(serial, DataThrottlingAction::HOLD, 60);
+ ASSERT_OK(res);
+
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_6->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_6->rspInfo.serial);
+ ASSERT_TRUE(
+ CheckAnyOfErrors(radioRsp_v1_6->rspInfo.error,
+ {::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
+ ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
+ ::android::hardware::radio::V1_6::RadioError::NONE,
+ ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS}));
+
+ serial = GetRandomSerialNumber();
+
+ res = radio_v1_6->setDataThrottling(serial, DataThrottlingAction::NO_DATA_THROTTLING, 60);
+ ASSERT_OK(res);
+
+ EXPECT_EQ(std::cv_status::no_timeout, wait());
+ EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_6->rspInfo.type);
+ EXPECT_EQ(serial, radioRsp_v1_6->rspInfo.serial);
+ ASSERT_TRUE(
+ CheckAnyOfErrors(radioRsp_v1_6->rspInfo.error,
+ {::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
+ ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
+ ::android::hardware::radio::V1_6::RadioError::NONE,
+ ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS}));
+}
\ No newline at end of file
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h b/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
index 6189be6..fcf679c 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
+++ b/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
@@ -784,7 +784,7 @@
Return<void> releasePduSessionIdResponse(
const ::android::hardware::radio::V1_6::RadioResponseInfo& info);
- Return<void> beginHandoverResponse(
+ Return<void> startHandoverResponse(
const ::android::hardware::radio::V1_6::RadioResponseInfo& info);
Return<void> cancelHandoverResponse(
@@ -792,6 +792,9 @@
Return<void> setAllowedNetworkTypeBitmapResponse(
const ::android::hardware::radio::V1_6::RadioResponseInfo& info);
+
+ Return<void> setDataThrottlingResponse(
+ const ::android::hardware::radio::V1_6::RadioResponseInfo& info);
};
/* Callback class for radio indication */
diff --git a/radio/1.6/vts/functional/radio_response.cpp b/radio/1.6/vts/functional/radio_response.cpp
index 18cda6a..788038a 100644
--- a/radio/1.6/vts/functional/radio_response.cpp
+++ b/radio/1.6/vts/functional/radio_response.cpp
@@ -1136,7 +1136,7 @@
return Void();
}
-Return<void> RadioResponse_v1_6::beginHandoverResponse(
+Return<void> RadioResponse_v1_6::startHandoverResponse(
const ::android::hardware::radio::V1_6::RadioResponseInfo& info) {
rspInfo = info;
parent_v1_6.notify(info.serial);
@@ -1156,3 +1156,10 @@
parent_v1_6.notify(info.serial);
return Void();
}
+
+Return<void> RadioResponse_v1_6::setDataThrottlingResponse(
+ const ::android::hardware::radio::V1_6::RadioResponseInfo& info) {
+ rspInfo = info;
+ parent_v1_6.notify(info.serial);
+ return Void();
+}