Merge commit '38014a7c' into lmp-dev
Conflicts:
include/hardware/sensors.h
Change-Id: I21517f717761af7f52c003d535e86059ebb75f05
diff --git a/include/hardware/activity_recognition.h b/include/hardware/activity_recognition.h
new file mode 100644
index 0000000..8f99459
--- /dev/null
+++ b/include/hardware/activity_recognition.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Activity Recognition HAL. The goal is to provide low power, low latency, always-on activity
+ * recognition implemented in hardware (i.e. these activity recognition algorithms/classifers
+ * should NOT be run on the AP). By low power we mean that this may be activated 24/7 without
+ * impacting the battery drain speed (goal in order of 1mW including the power for sensors).
+ * This HAL does not specify the input sources that are used towards detecting these activities.
+ * It has one monitor interface which can be used to batch activities for always-on
+ * activity_recognition and if the latency is zero, the same interface can be used for low latency
+ * detection.
+ */
+
+#ifndef ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H
+#define ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H
+
+#include <hardware/hardware.h>
+
+__BEGIN_DECLS
+
+#define ACTIVITY_RECOGNITION_HEADER_VERSION 1
+#define ACTIVITY_RECOGNITION_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION_2(0, 1, ACTIVITY_RECOGNITION_HEADER_VERSION)
+
+#define ACTIVITY_RECOGNITION_HARDWARE_MODULE_ID "activity_recognition"
+#define ACTIVITY_RECOGNITION_HARDWARE_INTERFACE "activity_recognition_hw_if"
+
+/*
+ * Define types for various activities. Multiple activities may be active at the same time and
+ * sometimes none of these activities may be active.
+ *
+ * Each activity has a corresponding type. Only activities that are defined here should use
+ * android.activity_recognition.* prefix. OEM defined activities should not use this prefix.
+ * Activity type of OEM-defined activities should start with the reverse domain name of the entity
+ * defining the activity.
+ *
+ * When android introduces a new activity type that can potentially replace an OEM-defined activity
+ * type, the OEM must use the official activity type on versions of the HAL that support this new
+ * official activity type.
+ *
+ * Example (made up): Suppose Google's Glass team wants to detect nodding activity.
+ * - Such an activity is not officially supported in android L
+ * - Glass devices launching on L can implement a custom activity with
+ * type = "com.google.glass.nodding"
+ * - In M android release, if android decides to define ACITIVITY_TYPE_NODDING, those types
+ * should replace the Glass-team-specific types in all future launches.
+ * - When launching glass on the M release, Google should now use the official activity type
+ * - This way, other applications can use this activity.
+ */
+
+#define ACTIVITY_TYPE_IN_VEHICLE "android.activity_recognition.in_vehicle"
+
+#define ACTIVITY_TYPE_ON_BICYCLE "android.activity_recognition.on_bicycle"
+
+#define ACTIVITY_TYPE_WALKING "android.activity_recognition.walking"
+
+#define ACTIVITY_TYPE_RUNNING "android.activity_recognition.running"
+
+#define ACTIVITY_TYPE_STILL "android.activity_recognition.still"
+
+#define ACTIVITY_TYPE_TILTING "android.activity_recognition.tilting"
+
+/* Values for activity_event.event_types. */
+enum {
+ /*
+ * A flush_complete event which indicates that a flush() has been successfully completed. This
+ * does not correspond to any activity/event. An event of this type should be added to the end
+ * of a batch FIFO and it indicates that all the events in the batch FIFO have been successfully
+ * reported to the framework. An event of this type should be generated only if flush() has been
+ * explicitly called and if the FIFO is empty at the time flush() is called it should trivially
+ * return a flush_complete_event to indicate that the FIFO is empty.
+ *
+ * A flush complete event should have the following parameters set.
+ * activity_event_t.event_type = ACTIVITY_EVENT_FLUSH_COMPLETE
+ * activity_event_t.activity = 0
+ * activity_event_t.timestamp = 0
+ * activity_event_t.reserved = 0
+ * See (*flush)() for more details.
+ */
+ ACTIVITY_EVENT_FLUSH_COMPLETE = 0,
+
+ /* Signifies entering an activity. */
+ ACTIVITY_EVENT_ENTER = 1,
+
+ /* Signifies exiting an activity. */
+ ACTIVITY_EVENT_EXIT = 2
+};
+
+/*
+ * Each event is a separate activity with event_type indicating whether this activity has started
+ * or ended. Eg event: (event_type="enter", activity="ON_FOOT", timestamp)
+ */
+typedef struct activity_event {
+ /* One of the ACTIVITY_EVENT_* constants defined above. */
+ uint32_t event_type;
+
+ /*
+ * Index of the activity in the list returned by get_supported_activities_list. If this event
+ * is a flush complete event, this should be set to zero.
+ */
+ uint32_t activity;
+
+ /* Time at which the transition/event has occurred in nanoseconds using elapsedRealTimeNano. */
+ int64_t timestamp;
+
+ /* Set to zero. */
+ int32_t reserved[4];
+} activity_event_t;
+
+typedef struct activity_recognition_module {
+ /**
+ * Common methods of the activity recognition module. This *must* be the first member of
+ * activity_recognition_module as users of this structure will cast a hw_module_t to
+ * activity_recognition_module pointer in contexts where it's known the hw_module_t
+ * references an activity_recognition_module.
+ */
+ hw_module_t common;
+
+ /*
+ * List of all activities supported by this module including OEM defined activities. Each
+ * activity is represented using a string defined above. Each string should be null terminated.
+ * The index of the activity in this array is used as a "handle" for enabling/disabling and
+ * event delivery.
+ * Return value is the size of this list.
+ */
+ int (*get_supported_activities_list)(struct activity_recognition_module* module,
+ char const* const* *activity_list);
+} activity_recognition_module_t;
+
+struct activity_recognition_device;
+
+typedef struct activity_recognition_callback_procs {
+ // Callback for activity_data. This is guaranteed to not invoke any HAL methods.
+ // Memory allocated for the events can be reused after this method returns.
+ // events - Array of activity_event_t s that are reported.
+ // count - size of the array.
+ void (*activity_callback)(const struct activity_recognition_callback_procs* procs,
+ const activity_event_t* events, int count);
+} activity_recognition_callback_procs_t;
+
+typedef struct activity_recognition_device {
+ /**
+ * Common methods of the activity recognition device. This *must* be the first member of
+ * activity_recognition_device as users of this structure will cast a hw_device_t to
+ * activity_recognition_device pointer in contexts where it's known the hw_device_t
+ * references an activity_recognition_device.
+ */
+ hw_device_t common;
+
+ /*
+ * Sets the callback to invoke when there are events to report. This call overwrites the
+ * previously registered callback (if any).
+ */
+ void (*register_activity_callback)(const struct activity_recognition_device* dev,
+ const activity_recognition_callback_procs_t* callback);
+
+ /*
+ * Activates monitoring of activity transitions. Activities need not be reported as soon as they
+ * are detected. The detected activities are stored in a FIFO and reported in batches when the
+ * "max_batch_report_latency" expires or when the batch FIFO is full. The implementation should
+ * allow the AP to go into suspend mode while the activities are detected and stored in the
+ * batch FIFO. Whenever events need to be reported (like when the FIFO is full or when the
+ * max_batch_report_latency has expired for an activity, event pair), it should wake_up the AP
+ * so that no events are lost. Activities are stored as transitions and they are allowed to
+ * overlap with each other. Each (activity, event_type) pair can be activated or deactivated
+ * independently of the other. The HAL implementation needs to keep track of which pairs are
+ * currently active and needs to detect only those pairs.
+ *
+ * activity_handle - Index of the specific activity that needs to be detected in the list
+ * returned by get_supported_activities_list.
+ * event_type - Specific transition of the activity that needs to be detected.
+ * max_batch_report_latency_ns - a transition can be delayed by at most
+ * “max_batch_report_latency” nanoseconds.
+ * Return 0 on success, negative errno code otherwise.
+ */
+ int (*enable_activity_event)(const struct activity_recognition_device* dev,
+ uint32_t activity_handle, uint32_t event_type, int64_t max_batch_report_latency_ns);
+
+ /*
+ * Disables detection of a specific (activity, event_type) pair.
+ */
+ int (*disable_activity_event)(const struct activity_recognition_device* dev,
+ uint32_t activity_handle, uint32_t event_type);
+
+ /*
+ * Flush all the batch FIFOs. Report all the activities that were stored in the FIFO so far as
+ * if max_batch_report_latency had expired. This shouldn't change the latency in any way. Add
+ * a flush_complete_event to indicate the end of the FIFO after all events are delivered.
+ * See ACTIVITY_EVENT_FLUSH_COMPLETE for more details.
+ * Return 0 on success, negative errno code otherwise.
+ */
+ int (*flush)(const struct activity_recognition_device* dev);
+
+ // Must be set to NULL.
+ void (*reserved_procs[16 - 4])(void);
+} activity_recognition_device_t;
+
+static inline int activity_recognition_open(const hw_module_t* module,
+ activity_recognition_device_t** device) {
+ return module->methods->open(module,
+ ACTIVITY_RECOGNITION_HARDWARE_INTERFACE, (hw_device_t**)device);
+}
+
+static inline int activity_recognition_close(activity_recognition_device_t* device) {
+ return device->common.close(&device->common);
+}
+
+__END_DECLS
+
+#endif // ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H
diff --git a/include/hardware/audio.h b/include/hardware/audio.h
index 6ba2544..763ca58 100644
--- a/include/hardware/audio.h
+++ b/include/hardware/audio.h
@@ -54,7 +54,10 @@
#define AUDIO_DEVICE_API_VERSION_0_0 HARDWARE_DEVICE_API_VERSION(0, 0)
#define AUDIO_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION(1, 0)
#define AUDIO_DEVICE_API_VERSION_2_0 HARDWARE_DEVICE_API_VERSION(2, 0)
-#define AUDIO_DEVICE_API_VERSION_CURRENT AUDIO_DEVICE_API_VERSION_2_0
+#define AUDIO_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0)
+#define AUDIO_DEVICE_API_VERSION_CURRENT AUDIO_DEVICE_API_VERSION_3_0
+/* Minimal audio HAL version supported by the audio framework */
+#define AUDIO_DEVICE_API_VERSION_MIN AUDIO_DEVICE_API_VERSION_2_0
/**
* List of known audio HAL modules. This is the base name of the audio HAL
@@ -91,22 +94,37 @@
#define AUDIO_PARAMETER_VALUE_TTY_HCO "tty_hco"
#define AUDIO_PARAMETER_VALUE_TTY_FULL "tty_full"
+/* Hearing Aid Compatibility - Telecoil (HAC-T) mode on/off
+ Strings must be in sync with CallFeaturesSetting.java */
+#define AUDIO_PARAMETER_KEY_HAC "HACSetting"
+#define AUDIO_PARAMETER_VALUE_HAC_ON "ON"
+#define AUDIO_PARAMETER_VALUE_HAC_OFF "OFF"
+
/* A2DP sink address set by framework */
#define AUDIO_PARAMETER_A2DP_SINK_ADDRESS "a2dp_sink_address"
+/* A2DP source address set by framework */
+#define AUDIO_PARAMETER_A2DP_SOURCE_ADDRESS "a2dp_source_address"
+
/* Screen state */
#define AUDIO_PARAMETER_KEY_SCREEN_STATE "screen_state"
+/* Bluetooth SCO wideband */
+#define AUDIO_PARAMETER_KEY_BT_SCO_WB "bt_wbs"
+
+
/**
* audio stream parameters
*/
-#define AUDIO_PARAMETER_STREAM_ROUTING "routing" // audio_devices_t
-#define AUDIO_PARAMETER_STREAM_FORMAT "format" // audio_format_t
-#define AUDIO_PARAMETER_STREAM_CHANNELS "channels" // audio_channel_mask_t
-#define AUDIO_PARAMETER_STREAM_FRAME_COUNT "frame_count" // size_t
-#define AUDIO_PARAMETER_STREAM_INPUT_SOURCE "input_source" // audio_source_t
-#define AUDIO_PARAMETER_STREAM_SAMPLING_RATE "sampling_rate" // uint32_t
+#define AUDIO_PARAMETER_STREAM_ROUTING "routing" /* audio_devices_t */
+#define AUDIO_PARAMETER_STREAM_FORMAT "format" /* audio_format_t */
+#define AUDIO_PARAMETER_STREAM_CHANNELS "channels" /* audio_channel_mask_t */
+#define AUDIO_PARAMETER_STREAM_FRAME_COUNT "frame_count" /* size_t */
+#define AUDIO_PARAMETER_STREAM_INPUT_SOURCE "input_source" /* audio_source_t */
+#define AUDIO_PARAMETER_STREAM_SAMPLING_RATE "sampling_rate" /* uint32_t */
+
+#define AUDIO_PARAMETER_DEVICE_DISCONNECT "disconnect" /* audio_devices_t */
/* Query supported formats. The response is a '|' separated list of strings from
* audio_format_t enum e.g: "sup_formats=AUDIO_FORMAT_PCM_16_BIT" */
@@ -118,6 +136,11 @@
* "sup_sampling_rates=44100|48000" */
#define AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES "sup_sampling_rates"
+/* Get the HW synchronization source used for an output stream.
+ * Return a valid source (positive integer) or AUDIO_HW_SYNC_INVALID if an error occurs
+ * or no HW sync source is used. */
+#define AUDIO_PARAMETER_STREAM_HW_AV_SYNC "hw_av_sync"
+
/**
* audio codec parameters
*/
@@ -137,18 +160,6 @@
/**************************************/
-/* common audio stream configuration parameters
- * You should memset() the entire structure to zero before use to
- * ensure forward compatibility
- */
-struct audio_config {
- uint32_t sample_rate;
- audio_channel_mask_t channel_mask;
- audio_format_t format;
- audio_offload_info_t offload_info;
-};
-typedef struct audio_config audio_config_t;
-
/* common audio stream parameters and operations */
struct audio_stream {
@@ -257,6 +268,11 @@
*/
struct audio_stream_out {
+ /**
+ * Common methods of the audio stream out. This *must* be the first member of audio_stream_out
+ * as users of this structure will cast a audio_stream to audio_stream_out pointer in contexts
+ * where it's known the audio_stream references an audio_stream_out.
+ */
struct audio_stream common;
/**
@@ -380,6 +396,11 @@
typedef struct audio_stream_out audio_stream_out_t;
struct audio_stream_in {
+ /**
+ * Common methods of the audio stream in. This *must* be the first member of audio_stream_in
+ * as users of this structure will cast a audio_stream to audio_stream_in pointer in contexts
+ * where it's known the audio_stream references an audio_stream_in.
+ */
struct audio_stream common;
/** set the input gain for the audio driver. This method is for
@@ -409,7 +430,10 @@
/**
* return the frame size (number of bytes per sample).
+ *
+ * Deprecated: use audio_stream_out_frame_size() or audio_stream_in_frame_size() instead.
*/
+__attribute__((__deprecated__))
static inline size_t audio_stream_frame_size(const struct audio_stream *s)
{
size_t chan_samp_sz;
@@ -423,6 +447,37 @@
return sizeof(int8_t);
}
+/**
+ * return the frame size (number of bytes per sample) of an output stream.
+ */
+static inline size_t audio_stream_out_frame_size(const struct audio_stream_out *s)
+{
+ size_t chan_samp_sz;
+ audio_format_t format = s->common.get_format(&s->common);
+
+ if (audio_is_linear_pcm(format)) {
+ chan_samp_sz = audio_bytes_per_sample(format);
+ return audio_channel_count_from_out_mask(s->common.get_channels(&s->common)) * chan_samp_sz;
+ }
+
+ return sizeof(int8_t);
+}
+
+/**
+ * return the frame size (number of bytes per sample) of an input stream.
+ */
+static inline size_t audio_stream_in_frame_size(const struct audio_stream_in *s)
+{
+ size_t chan_samp_sz;
+ audio_format_t format = s->common.get_format(&s->common);
+
+ if (audio_is_linear_pcm(format)) {
+ chan_samp_sz = audio_bytes_per_sample(format);
+ return audio_channel_count_from_in_mask(s->common.get_channels(&s->common)) * chan_samp_sz;
+ }
+
+ return sizeof(int8_t);
+}
/**********************************************************************/
@@ -436,6 +491,11 @@
};
struct audio_hw_device {
+ /**
+ * Common methods of the audio device. This *must* be the first member of audio_hw_device
+ * as users of this structure will cast a hw_device_t to audio_hw_device pointer in contexts
+ * where it's known the hw_device_t references an audio_hw_device.
+ */
struct hw_device_t common;
/**
@@ -505,13 +565,21 @@
size_t (*get_input_buffer_size)(const struct audio_hw_device *dev,
const struct audio_config *config);
- /** This method creates and opens the audio hardware output stream */
+ /** This method creates and opens the audio hardware output stream.
+ * The "address" parameter qualifies the "devices" audio device type if needed.
+ * The format format depends on the device type:
+ * - Bluetooth devices use the MAC address of the device in the form "00:11:22:AA:BB:CC"
+ * - USB devices use the ALSA card and device numbers in the form "card=X;device=Y"
+ * - Other devices may use a number or any other string.
+ */
+
int (*open_output_stream)(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
- struct audio_stream_out **stream_out);
+ struct audio_stream_out **stream_out,
+ const char *address);
void (*close_output_stream)(struct audio_hw_device *dev,
struct audio_stream_out* stream_out);
@@ -521,7 +589,10 @@
audio_io_handle_t handle,
audio_devices_t devices,
struct audio_config *config,
- struct audio_stream_in **stream_in);
+ struct audio_stream_in **stream_in,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source);
void (*close_input_stream)(struct audio_hw_device *dev,
struct audio_stream_in *stream_in);
@@ -543,6 +614,38 @@
* method may leave it set to NULL.
*/
int (*get_master_mute)(struct audio_hw_device *dev, bool *mute);
+
+ /**
+ * Routing control
+ */
+
+ /* Creates an audio patch between several source and sink ports.
+ * The handle is allocated by the HAL and should be unique for this
+ * audio HAL module. */
+ int (*create_audio_patch)(struct audio_hw_device *dev,
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *handle);
+
+ /* Release an audio patch */
+ int (*release_audio_patch)(struct audio_hw_device *dev,
+ audio_patch_handle_t handle);
+
+ /* Fills the list of supported attributes for a given audio port.
+ * As input, "port" contains the information (type, role, address etc...)
+ * needed by the HAL to identify the port.
+ * As output, "port" contains possible attributes (sampling rates, formats,
+ * channel masks, gain controllers...) for this port.
+ */
+ int (*get_audio_port)(struct audio_hw_device *dev,
+ struct audio_port *port);
+
+ /* Set audio port configuration */
+ int (*set_audio_port_config)(struct audio_hw_device *dev,
+ const struct audio_port_config *config);
+
};
typedef struct audio_hw_device audio_hw_device_t;
diff --git a/include/hardware/audio_alsaops.h b/include/hardware/audio_alsaops.h
new file mode 100644
index 0000000..0d266ff
--- /dev/null
+++ b/include/hardware/audio_alsaops.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains shared utility functions to handle the tinyalsa
+ * implementation for Android internal audio, generally in the hardware layer.
+ * Some routines may log a fatal error on failure, as noted.
+ */
+
+#ifndef ANDROID_AUDIO_ALSAOPS_H
+#define ANDROID_AUDIO_ALSAOPS_H
+
+#include <cutils/log.h>
+#include <system/audio.h>
+#include <tinyalsa/asoundlib.h>
+
+__BEGIN_DECLS
+
+/* Converts audio_format to pcm_format.
+ * Parameters:
+ * format the audio_format_t to convert
+ *
+ * Logs a fatal error if format is not a valid convertible audio_format_t.
+ */
+static inline enum pcm_format pcm_format_from_audio_format(audio_format_t format)
+{
+ switch (format) {
+#ifdef HAVE_BIG_ENDIAN
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return PCM_FORMAT_S16_BE;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ return PCM_FORMAT_S24_3BE;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ return PCM_FORMAT_S32_BE;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ return PCM_FORMAT_S24_BE;
+#else
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return PCM_FORMAT_S16_LE;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ return PCM_FORMAT_S24_3LE;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ return PCM_FORMAT_S32_LE;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ return PCM_FORMAT_S24_LE;
+#endif
+ case AUDIO_FORMAT_PCM_FLOAT: /* there is no equivalent for float */
+ default:
+ LOG_ALWAYS_FATAL("pcm_format_from_audio_format: invalid audio format %#x", format);
+ return 0;
+ }
+}
+
+/* Converts pcm_format to audio_format.
+ * Parameters:
+ * format the pcm_format to convert
+ *
+ * Logs a fatal error if format is not a valid convertible pcm_format.
+ */
+static inline audio_format_t audio_format_from_pcm_format(enum pcm_format format)
+{
+ switch (format) {
+#ifdef HAVE_BIG_ENDIAN
+ case PCM_FORMAT_S16_BE:
+ return AUDIO_FORMAT_PCM_16_BIT;
+ case PCM_FORMAT_S24_3BE:
+ return AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ case PCM_FORMAT_S24_BE:
+ return AUDIO_FORMAT_PCM_8_24_BIT;
+ case PCM_FORMAT_S32_BE:
+ return AUDIO_FORMAT_PCM_32_BIT;
+#else
+ case PCM_FORMAT_S16_LE:
+ return AUDIO_FORMAT_PCM_16_BIT;
+ case PCM_FORMAT_S24_3LE:
+ return AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ case PCM_FORMAT_S24_LE:
+ return AUDIO_FORMAT_PCM_8_24_BIT;
+ case PCM_FORMAT_S32_LE:
+ return AUDIO_FORMAT_PCM_32_BIT;
+#endif
+ default:
+ LOG_ALWAYS_FATAL("audio_format_from_pcm_format: invalid pcm format %#x", format);
+ return 0;
+ }
+}
+
+__END_DECLS
+
+#endif /* ANDROID_AUDIO_ALSAOPS_H */
diff --git a/include/hardware/audio_effect.h b/include/hardware/audio_effect.h
index b49d02d..ee48e4c 100644
--- a/include/hardware/audio_effect.h
+++ b/include/hardware/audio_effect.h
@@ -815,7 +815,7 @@
uint32_t samplingRate; // sampling rate
uint32_t channels; // channel mask (see audio_channel_mask_t in audio.h)
buffer_provider_t bufferProvider; // buffer provider
- uint8_t format; // Audio format (see see audio_format_t in audio.h)
+ uint8_t format; // Audio format (see audio_format_t in audio.h)
uint8_t accessMode; // read/write or accumulate in buffer (effect_buffer_access_e)
uint16_t mask; // indicates which of the above fields is valid
} buffer_config_t;
diff --git a/include/hardware/audio_policy.h b/include/hardware/audio_policy.h
index 4e75e02..99cb044 100644
--- a/include/hardware/audio_policy.h
+++ b/include/hardware/audio_policy.h
@@ -248,9 +248,6 @@
const audio_offload_info_t *info);
};
-/* audio hw module handle used by load_hw_module(), open_output_on_module()
- * and open_input_on_module() */
-typedef int audio_module_handle_t;
struct audio_policy_service_ops {
/*
@@ -332,10 +329,9 @@
audio_io_handle_t output,
int delay_ms);
- /* reroute a given stream type to the specified output */
- int (*set_stream_output)(void *service,
- audio_stream_type_t stream,
- audio_io_handle_t output);
+ /* invalidate a stream type, causing a reroute to an unspecified new output */
+ int (*invalidate_stream)(void *service,
+ audio_stream_type_t stream);
/* function enabling to send proprietary informations directly from audio
* policy manager to audio hardware interface. */
@@ -424,6 +420,12 @@
} audio_policy_module_t;
struct audio_policy_device {
+ /**
+ * Common methods of the audio policy device. This *must* be the first member of
+ * audio_policy_device as users of this structure will cast a hw_device_t to
+ * audio_policy_device pointer in contexts where it's known the hw_device_t references an
+ * audio_policy_device.
+ */
struct hw_device_t common;
int (*create_audio_policy)(const struct audio_policy_device *device,
diff --git a/include/hardware/bluetooth.h b/include/hardware/bluetooth.h
index 5e100ec..74cd1fc 100644
--- a/include/hardware/bluetooth.h
+++ b/include/hardware/bluetooth.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_INCLUDE_BLUETOOTH_H
#define ANDROID_INCLUDE_BLUETOOTH_H
+#include <stdbool.h>
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/types.h>
@@ -37,14 +38,18 @@
/* Bluetooth profile interface IDs */
#define BT_PROFILE_HANDSFREE_ID "handsfree"
+#define BT_PROFILE_HANDSFREE_CLIENT_ID "handsfree_client"
#define BT_PROFILE_ADVANCED_AUDIO_ID "a2dp"
+#define BT_PROFILE_ADVANCED_AUDIO_SINK_ID "a2dp_sink"
#define BT_PROFILE_HEALTH_ID "health"
#define BT_PROFILE_SOCKETS_ID "socket"
#define BT_PROFILE_HIDHOST_ID "hidhost"
#define BT_PROFILE_PAN_ID "pan"
+#define BT_PROFILE_MAP_CLIENT_ID "map_client"
#define BT_PROFILE_GATT_ID "gatt"
#define BT_PROFILE_AV_RC_ID "avrcp"
+#define BT_PROFILE_AV_RC_CTRL_ID "avrcp_ctrl"
/** Bluetooth Address */
typedef struct {
@@ -83,7 +88,8 @@
BT_STATUS_PARM_INVALID,
BT_STATUS_UNHANDLED,
BT_STATUS_AUTH_FAILURE,
- BT_STATUS_RMT_DEV_DOWN
+ BT_STATUS_RMT_DEV_DOWN,
+ BT_STATUS_AUTH_REJECTED
} bt_status_t;
@@ -92,6 +98,15 @@
uint8_t pin[16];
} __attribute__((packed))bt_pin_code_t;
+typedef struct {
+ uint8_t status;
+ uint8_t ctrl_state; /* stack reported state */
+ uint64_t tx_time; /* in ms */
+ uint64_t rx_time; /* in ms */
+ uint64_t idle_time; /* in ms */
+ uint64_t energy_used; /* a product of mA, V and ms */
+} __attribute__((packed))bt_activity_energy_info;
+
/** Bluetooth Adapter Discovery state */
typedef enum {
BT_DISCOVERY_STOPPED,
@@ -126,6 +141,18 @@
int manufacturer;
} bt_remote_version_t;
+typedef struct
+{
+ uint8_t local_privacy_enabled;
+ uint8_t max_adv_instance;
+ uint8_t rpa_offload_supported;
+ uint8_t max_irk_list_size;
+ uint8_t max_adv_filter_supported;
+ uint8_t scan_result_storage_size_lobyte;
+ uint8_t scan_result_storage_size_hibyte;
+ uint8_t activity_energy_info_supported;
+}bt_local_le_features_t;
+
/* Bluetooth Adapter and Remote Device property types */
typedef enum {
/* Properties common to both adapter and remote device */
@@ -208,6 +235,13 @@
BT_PROPERTY_REMOTE_VERSION_INFO,
+ /**
+ * Description - Local LE features
+ * Access mode - GET.
+ * Data type - bt_local_le_features_t.
+ */
+ BT_PROPERTY_LOCAL_LE_FEATURES,
+
BT_PROPERTY_REMOTE_DEVICE_TIMESTAMP = 0xFF,
} bt_property_type_t;
@@ -219,6 +253,7 @@
void *val;
} bt_property_t;
+
/** Bluetooth Device Type */
typedef enum {
BT_DEVICE_DEVTYPE_BREDR = 0x1,
@@ -321,13 +356,18 @@
* This callback shall be invoked whenever the le_tx_test, le_rx_test or le_test_end is invoked
* The num_packets is valid only for le_test_end command */
typedef void (*le_test_mode_callback)(bt_status_t status, uint16_t num_packets);
+
+/** Callback invoked when energy details are obtained */
+/* Ctrl_state-Current controller state-Active-1,scan-2,or idle-3 state as defined by HCI spec.
+ * If the ctrl_state value is 0, it means the API call failed
+ * Time values-In milliseconds as returned by the controller
+ * Energy used-Value as returned by the controller
+ * Status-Provides the status of the read_energy_info API call */
+typedef void (*energy_info_callback)(bt_activity_energy_info *energy_info);
+
/** TODO: Add callbacks for Link Up/Down and other generic
* notifications/callbacks */
-/** Wakelock callback */
-/* Called to take/release wakelock to allow timers to work (temporary kluge) */
-typedef void (*bt_wakelock_callback)(int acquire);
-
/** Bluetooth DM callback structure. */
typedef struct {
/** set to sizeof(bt_callbacks_t) */
@@ -344,9 +384,27 @@
callback_thread_event thread_evt_cb;
dut_mode_recv_callback dut_mode_recv_cb;
le_test_mode_callback le_test_mode_cb;
- bt_wakelock_callback bt_wakelock_cb;
+ energy_info_callback energy_info_cb;
} bt_callbacks_t;
+typedef void (*alarm_cb)(void *data);
+typedef bool (*set_wake_alarm_callout)(uint64_t delay_millis, bool should_wake, alarm_cb cb, void *data);
+typedef int (*acquire_wake_lock_callout)(const char *lock_name);
+typedef int (*release_wake_lock_callout)(const char *lock_name);
+
+/** The set of functions required by bluedroid to set wake alarms and
+ * grab wake locks. This struct is passed into the stack through the
+ * |set_os_callouts| function on |bt_interface_t|.
+ */
+typedef struct {
+ /* set to sizeof(bt_os_callouts_t) */
+ size_t size;
+
+ set_wake_alarm_callout set_wake_alarm;
+ acquire_wake_lock_callout acquire_wake_lock;
+ release_wake_lock_callout release_wake_lock;
+} bt_os_callouts_t;
+
/** NOTE: By default, no profiles are initialized at the time of init/enable.
* Whenever the application invokes the 'init' API of a profile, then one of
* the following shall occur:
@@ -422,7 +480,7 @@
int (*cancel_discovery)(void);
/** Create Bluetooth Bonding */
- int (*create_bond)(const bt_bdaddr_t *bd_addr);
+ int (*create_bond)(const bt_bdaddr_t *bd_addr, int transport);
/** Remove Bond */
int (*remove_bond)(const bt_bdaddr_t *bd_addr);
@@ -430,6 +488,13 @@
/** Cancel Bond */
int (*cancel_bond)(const bt_bdaddr_t *bd_addr);
+ /**
+ * Get the connection status for a given remote device.
+ * return value of 0 means the device is not connected,
+ * non-zero return status indicates an active connection.
+ */
+ int (*get_connection_state)(const bt_bdaddr_t *bd_addr);
+
/** BT Legacy PinKey Reply */
/** If accept==FALSE, then pin_len and pin_code shall be 0x0 */
int (*pin_reply)(const bt_bdaddr_t *bd_addr, uint8_t accept,
@@ -458,6 +523,16 @@
/* enable or disable bluetooth HCI snoop log */
int (*config_hci_snoop_log)(uint8_t enable);
+
+ /** Sets the OS call-out functions that bluedroid needs for alarms and wake locks.
+ * This should be called immediately after a successful |init|.
+ */
+ int (*set_os_callouts)(bt_os_callouts_t *callouts);
+
+ /** Read Energy info details - return value indicates BT_STATUS_SUCCESS or BT_STATUS_NOT_READY
+ * Success indicates that the VSC command was sent to controller
+ */
+ int (*read_energy_info)();
} bt_interface_t;
/** TODO: Need to add APIs for Service Discovery, Service authorization and
diff --git a/include/hardware/bt_av.h b/include/hardware/bt_av.h
index 2ec00c3..5252a17 100644
--- a/include/hardware/bt_av.h
+++ b/include/hardware/bt_av.h
@@ -47,12 +47,23 @@
typedef void (* btav_audio_state_callback)(btav_audio_state_t state,
bt_bdaddr_t *bd_addr);
+/** Callback for audio configuration change.
+ * Used only for the A2DP sink interface.
+ * state will have one of the values from btav_audio_state_t
+ * sample_rate: sample rate in Hz
+ * channel_count: number of channels (1 for mono, 2 for stereo)
+ */
+typedef void (* btav_audio_config_callback)(bt_bdaddr_t *bd_addr,
+ uint32_t sample_rate,
+ uint8_t channel_count);
+
/** BT-AV callback structure. */
typedef struct {
/** set to sizeof(btav_callbacks_t) */
size_t size;
btav_connection_state_callback connection_state_cb;
btav_audio_state_callback audio_state_cb;
+ btav_audio_config_callback audio_config_cb;
} btav_callbacks_t;
/**
@@ -65,7 +76,9 @@
* android_audio_hw library and the Bluetooth stack.
*
*/
-/** Represents the standard BT-AV interface. */
+/** Represents the standard BT-AV interface.
+ * Used for both the A2DP source and sink interfaces.
+ */
typedef struct {
/** set to sizeof(btav_interface_t) */
diff --git a/include/hardware/bt_gatt_client.h b/include/hardware/bt_gatt_client.h
index 11b146d..8073dd1 100644
--- a/include/hardware/bt_gatt_client.h
+++ b/include/hardware/bt_gatt_client.h
@@ -152,13 +152,59 @@
int rssi, int status);
/**
- * Callback indicationg the status of a listen() operation
+ * Callback indicating the status of a listen() operation
*/
typedef void (*listen_callback)(int status, int server_if);
/** Callback invoked when the MTU for a given connection changes */
typedef void (*configure_mtu_callback)(int conn_id, int status, int mtu);
+/** Callback invoked when a scan filter configuration command has completed */
+typedef void (*scan_filter_cfg_callback)(int action, int client_if, int status, int filt_type,
+ int avbl_space);
+
+/** Callback invoked when scan param has been added, cleared, or deleted */
+typedef void (*scan_filter_param_callback)(int action, int client_if, int status,
+ int avbl_space);
+
+/** Callback invoked when a scan filter configuration command has completed */
+typedef void (*scan_filter_status_callback)(int enable, int client_if, int status);
+
+/** Callback invoked when multi-adv enable operation has completed */
+typedef void (*multi_adv_enable_callback)(int client_if, int status);
+
+/** Callback invoked when multi-adv param update operation has completed */
+typedef void (*multi_adv_update_callback)(int client_if, int status);
+
+/** Callback invoked when multi-adv instance data set operation has completed */
+typedef void (*multi_adv_data_callback)(int client_if, int status);
+
+/** Callback invoked when multi-adv disable operation has completed */
+typedef void (*multi_adv_disable_callback)(int client_if, int status);
+
+/**
+ * Callback notifying an application that a remote device connection is currently congested
+ * and cannot receive any more data. An application should avoid sending more data until
+ * a further callback is received indicating the congestion status has been cleared.
+ */
+typedef void (*congestion_callback)(int conn_id, bool congested);
+/** Callback invoked when batchscan storage config operation has completed */
+typedef void (*batchscan_cfg_storage_callback)(int client_if, int status);
+
+/** Callback invoked when batchscan enable / disable operation has completed */
+typedef void (*batchscan_enable_disable_callback)(int action, int client_if, int status);
+
+/** Callback invoked when batchscan reports are obtained */
+typedef void (*batchscan_reports_callback)(int client_if, int status, int report_format,
+ int num_records, int data_len, uint8_t* rep_data);
+
+/** Callback invoked when batchscan storage threshold limit is crossed */
+typedef void (*batchscan_threshold_callback)(int client_if);
+
+/** Track ADV VSE callback invoked when tracked device is found or lost */
+typedef void (*track_adv_event_callback)(int client_if, int filt_index, int addr_type,
+ bt_bdaddr_t* bda, int adv_state);
+
typedef struct {
register_client_callback register_client_cb;
scan_result_callback scan_result_cb;
@@ -179,6 +225,19 @@
read_remote_rssi_callback read_remote_rssi_cb;
listen_callback listen_cb;
configure_mtu_callback configure_mtu_cb;
+ scan_filter_cfg_callback scan_filter_cfg_cb;
+ scan_filter_param_callback scan_filter_param_cb;
+ scan_filter_status_callback scan_filter_status_cb;
+ multi_adv_enable_callback multi_adv_enable_cb;
+ multi_adv_update_callback multi_adv_update_cb;
+ multi_adv_data_callback multi_adv_data_cb;
+ multi_adv_disable_callback multi_adv_disable_cb;
+ congestion_callback congestion_cb;
+ batchscan_cfg_storage_callback batchscan_cfg_storage_cb;
+ batchscan_enable_disable_callback batchscan_enb_disable_cb;
+ batchscan_reports_callback batchscan_reports_cb;
+ batchscan_threshold_callback batchscan_threshold_cb;
+ track_adv_event_callback track_adv_event_cb;
} btgatt_client_callbacks_t;
/** Represents the standard BT-GATT client interface. */
@@ -191,11 +250,11 @@
bt_status_t (*unregister_client)(int client_if );
/** Start or stop LE device scanning */
- bt_status_t (*scan)( int client_if, bool start );
+ bt_status_t (*scan)( bool start );
/** Create a connection to a remote LE or dual-mode device */
bt_status_t (*connect)( int client_if, const bt_bdaddr_t *bd_addr,
- bool is_direct );
+ bool is_direct, int transport );
/** Disconnect a remote device or cancel a pending connection */
bt_status_t (*disconnect)( int client_if, const bt_bdaddr_t *bd_addr,
@@ -276,11 +335,32 @@
/** Request RSSI for a given remote device */
bt_status_t (*read_remote_rssi)( int client_if, const bt_bdaddr_t *bd_addr);
+ /** Setup scan filter params */
+ bt_status_t (*scan_filter_param_setup)(int client_if, int action, int filt_index, int feat_seln,
+ int list_logic_type, int filt_logic_type, int rssi_high_thres,
+ int rssi_low_thres, int dely_mode, int found_timeout,
+ int lost_timeout, int found_timeout_cnt);
+
+
+ /** Configure a scan filter condition */
+ bt_status_t (*scan_filter_add_remove)(int client_if, int action, int filt_type,
+ int filt_index, int company_id,
+ int company_id_mask, const bt_uuid_t *p_uuid,
+ const bt_uuid_t *p_uuid_mask, const bt_bdaddr_t *bd_addr,
+ char addr_type, int data_len, char* p_data, int mask_len,
+ char* p_mask);
+
+ /** Clear all scan filter conditions for specific filter index*/
+ bt_status_t (*scan_filter_clear)(int client_if, int filt_index);
+
+ /** Enable / disable scan filter feature*/
+ bt_status_t (*scan_filter_enable)(int client_if, bool enable);
+
/** Determine the type of the remote device (LE, BR/EDR, Dual-mode) */
int (*get_device_type)( const bt_bdaddr_t *bd_addr );
/** Set the advertising data or scan response data */
- bt_status_t (*set_adv_data)(int server_if, bool set_scan_rsp, bool include_name,
+ bt_status_t (*set_adv_data)(int client_if, bool set_scan_rsp, bool include_name,
bool include_txpower, int min_interval, int max_interval, int appearance,
uint16_t manufacturer_len, char* manufacturer_data,
uint16_t service_data_len, char* service_data,
@@ -289,8 +369,47 @@
/** Configure the MTU for a given connection */
bt_status_t (*configure_mtu)(int conn_id, int mtu);
+ /** Request a connection parameter update */
+ bt_status_t (*conn_parameter_update)(const bt_bdaddr_t *bd_addr, int min_interval,
+ int max_interval, int latency, int timeout);
+
+ /** Sets the LE scan interval and window in units of N*0.625 msec */
+ bt_status_t (*set_scan_parameters)(int scan_interval, int scan_window);
+
+ /* Setup the parameters as per spec, user manual specified values and enable multi ADV */
+ bt_status_t (*multi_adv_enable)(int client_if, int min_interval,int max_interval,int adv_type,
+ int chnl_map, int tx_power, int timeout_s);
+
+ /* Update the parameters as per spec, user manual specified values and restart multi ADV */
+ bt_status_t (*multi_adv_update)(int client_if, int min_interval,int max_interval,int adv_type,
+ int chnl_map, int tx_power, int timeout_s);
+
+ /* Setup the data for the specified instance */
+ bt_status_t (*multi_adv_set_inst_data)(int client_if, bool set_scan_rsp, bool include_name,
+ bool incl_txpower, int appearance, int manufacturer_len,
+ char* manufacturer_data, int service_data_len,
+ char* service_data, int service_uuid_len, char* service_uuid);
+
+ /* Disable the multi adv instance */
+ bt_status_t (*multi_adv_disable)(int client_if);
+
+ /* Configure the batchscan storage */
+ bt_status_t (*batchscan_cfg_storage)(int client_if, int batch_scan_full_max,
+ int batch_scan_trunc_max, int batch_scan_notify_threshold);
+
+ /* Enable batchscan */
+ bt_status_t (*batchscan_enb_batch_scan)(int client_if, int scan_mode,
+ int scan_interval, int scan_window, int addr_type, int discard_rule);
+
+ /* Disable batchscan */
+ bt_status_t (*batchscan_dis_batch_scan)(int client_if);
+
+ /* Read out batchscan reports */
+ bt_status_t (*batchscan_read_reports)(int client_if, int scan_mode);
+
/** Test mode interface */
bt_status_t (*test_command)( int command, btgatt_test_params_t* params);
+
} btgatt_client_interface_t;
__END_DECLS
diff --git a/include/hardware/bt_gatt_server.h b/include/hardware/bt_gatt_server.h
index 1a5a400..2b1de27 100644
--- a/include/hardware/bt_gatt_server.h
+++ b/include/hardware/bt_gatt_server.h
@@ -104,6 +104,19 @@
*/
typedef void (*response_confirmation_callback)(int status, int handle);
+/**
+ * Callback confirming that a notification or indication has been sent
+ * to a remote device.
+ */
+typedef void (*indication_sent_callback)(int conn_id, int status);
+
+/**
+ * Callback notifying an application that a remote device connection is currently congested
+ * and cannot receive any more data. An application should avoid sending more data until
+ * a further callback is received indicating the congestion status has been cleared.
+ */
+typedef void (*congestion_callback)(int conn_id, bool congested);
+
typedef struct {
register_server_callback register_server_cb;
connection_callback connection_cb;
@@ -118,6 +131,8 @@
request_write_callback request_write_cb;
request_exec_write_callback request_exec_write_cb;
response_confirmation_callback response_confirmation_cb;
+ indication_sent_callback indication_sent_cb;
+ congestion_callback congestion_cb;
} btgatt_server_callbacks_t;
/** Represents the standard BT-GATT server interface. */
@@ -129,7 +144,8 @@
bt_status_t (*unregister_server)(int server_if );
/** Create a connection to a remote peripheral */
- bt_status_t (*connect)(int server_if, const bt_bdaddr_t *bd_addr, bool is_direct );
+ bt_status_t (*connect)(int server_if, const bt_bdaddr_t *bd_addr,
+ bool is_direct, int transport);
/** Disconnect an established connection or cancel a pending one */
bt_status_t (*disconnect)(int server_if, const bt_bdaddr_t *bd_addr,
@@ -168,6 +184,7 @@
/** Send a response to a read/write operation */
bt_status_t (*send_response)(int conn_id, int trans_id,
int status, btgatt_response_t *response);
+
} btgatt_server_interface_t;
__END_DECLS
diff --git a/include/hardware/bt_gatt_types.h b/include/hardware/bt_gatt_types.h
index 0ac217e..e037ddc 100644
--- a/include/hardware/bt_gatt_types.h
+++ b/include/hardware/bt_gatt_types.h
@@ -43,6 +43,14 @@
uint8_t is_primary;
} btgatt_srvc_id_t;
+/** Preferred physical Transport for GATT connection */
+typedef enum
+{
+ GATT_TRANSPORT_AUTO,
+ GATT_TRANSPORT_BREDR,
+ GATT_TRANSPORT_LE
+} btgatt_transport_t;
+
__END_DECLS
#endif /* ANDROID_INCLUDE_BT_GATT_TYPES_H */
diff --git a/include/hardware/bt_hf.h b/include/hardware/bt_hf.h
index 6135ac4..7dcb40a 100644
--- a/include/hardware/bt_hf.h
+++ b/include/hardware/bt_hf.h
@@ -57,6 +57,14 @@
BTHF_NREC_START
} bthf_nrec_t;
+/* WBS codec setting */
+typedef enum
+{
+ BTHF_WBS_NONE,
+ BTHF_WBS_NO,
+ BTHF_WBS_YES
+}bthf_wbs_config_t;
+
/* CHLD - Call held handling */
typedef enum
{
@@ -79,65 +87,70 @@
/** Callback for VR connection state change.
* state will have one of the values from BtHfVRState
*/
-typedef void (* bthf_vr_cmd_callback)(bthf_vr_state_t state);
+typedef void (* bthf_vr_cmd_callback)(bthf_vr_state_t state, bt_bdaddr_t *bd_addr);
/** Callback for answer incoming call (ATA)
*/
-typedef void (* bthf_answer_call_cmd_callback)();
+typedef void (* bthf_answer_call_cmd_callback)(bt_bdaddr_t *bd_addr);
/** Callback for disconnect call (AT+CHUP)
*/
-typedef void (* bthf_hangup_call_cmd_callback)();
+typedef void (* bthf_hangup_call_cmd_callback)(bt_bdaddr_t *bd_addr);
/** Callback for disconnect call (AT+CHUP)
* type will denote Speaker/Mic gain (BtHfVolumeControl).
*/
-typedef void (* bthf_volume_cmd_callback)(bthf_volume_type_t type, int volume);
+typedef void (* bthf_volume_cmd_callback)(bthf_volume_type_t type, int volume, bt_bdaddr_t *bd_addr);
/** Callback for dialing an outgoing call
* If number is NULL, redial
*/
-typedef void (* bthf_dial_call_cmd_callback)(char *number);
+typedef void (* bthf_dial_call_cmd_callback)(char *number, bt_bdaddr_t *bd_addr);
/** Callback for sending DTMF tones
* tone contains the dtmf character to be sent
*/
-typedef void (* bthf_dtmf_cmd_callback)(char tone);
+typedef void (* bthf_dtmf_cmd_callback)(char tone, bt_bdaddr_t *bd_addr);
/** Callback for enabling/disabling noise reduction/echo cancellation
* value will be 1 to enable, 0 to disable
*/
-typedef void (* bthf_nrec_cmd_callback)(bthf_nrec_t nrec);
+typedef void (* bthf_nrec_cmd_callback)(bthf_nrec_t nrec, bt_bdaddr_t *bd_addr);
+
+/** Callback for AT+BCS and event from BAC
+ * WBS enable, WBS disable
+ */
+typedef void (* bthf_wbs_callback)(bthf_wbs_config_t wbs, bt_bdaddr_t *bd_addr);
/** Callback for call hold handling (AT+CHLD)
* value will contain the call hold command (0, 1, 2, 3)
*/
-typedef void (* bthf_chld_cmd_callback)(bthf_chld_type_t chld);
+typedef void (* bthf_chld_cmd_callback)(bthf_chld_type_t chld, bt_bdaddr_t *bd_addr);
/** Callback for CNUM (subscriber number)
*/
-typedef void (* bthf_cnum_cmd_callback)();
+typedef void (* bthf_cnum_cmd_callback)(bt_bdaddr_t *bd_addr);
/** Callback for indicators (CIND)
*/
-typedef void (* bthf_cind_cmd_callback)();
+typedef void (* bthf_cind_cmd_callback)(bt_bdaddr_t *bd_addr);
/** Callback for operator selection (COPS)
*/
-typedef void (* bthf_cops_cmd_callback)();
+typedef void (* bthf_cops_cmd_callback)(bt_bdaddr_t *bd_addr);
/** Callback for call list (AT+CLCC)
*/
-typedef void (* bthf_clcc_cmd_callback) ();
+typedef void (* bthf_clcc_cmd_callback) (bt_bdaddr_t *bd_addr);
/** Callback for unknown AT command recd from HF
* at_string will contain the unparsed AT string
*/
-typedef void (* bthf_unknown_at_cmd_callback)(char *at_string);
+typedef void (* bthf_unknown_at_cmd_callback)(char *at_string, bt_bdaddr_t *bd_addr);
/** Callback for keypressed (HSP) event.
*/
-typedef void (* bthf_key_pressed_cmd_callback)();
+typedef void (* bthf_key_pressed_cmd_callback)(bt_bdaddr_t *bd_addr);
/** BT-HF callback structure. */
typedef struct {
@@ -152,6 +165,7 @@
bthf_dial_call_cmd_callback dial_call_cmd_cb;
bthf_dtmf_cmd_callback dtmf_cmd_cb;
bthf_nrec_cmd_callback nrec_cmd_cb;
+ bthf_wbs_callback wbs_cb;
bthf_chld_cmd_callback chld_cmd_cb;
bthf_cnum_cmd_callback cnum_cmd_cb;
bthf_cind_cmd_callback cind_cmd_cb;
@@ -213,7 +227,7 @@
/**
* Register the BtHf callbacks
*/
- bt_status_t (*init)( bthf_callbacks_t* callbacks );
+ bt_status_t (*init)( bthf_callbacks_t* callbacks, int max_hf_clients);
/** connect to headset */
bt_status_t (*connect)( bt_bdaddr_t *bd_addr );
@@ -228,33 +242,33 @@
bt_status_t (*disconnect_audio)( bt_bdaddr_t *bd_addr );
/** start voice recognition */
- bt_status_t (*start_voice_recognition)();
+ bt_status_t (*start_voice_recognition)( bt_bdaddr_t *bd_addr );
/** stop voice recognition */
- bt_status_t (*stop_voice_recognition)();
+ bt_status_t (*stop_voice_recognition)( bt_bdaddr_t *bd_addr );
/** volume control */
- bt_status_t (*volume_control) (bthf_volume_type_t type, int volume);
+ bt_status_t (*volume_control) (bthf_volume_type_t type, int volume, bt_bdaddr_t *bd_addr );
/** Combined device status change notification */
bt_status_t (*device_status_notification)(bthf_network_state_t ntk_state, bthf_service_type_t svc_type, int signal,
int batt_chg);
/** Response for COPS command */
- bt_status_t (*cops_response)(const char *cops);
+ bt_status_t (*cops_response)(const char *cops, bt_bdaddr_t *bd_addr );
/** Response for CIND command */
bt_status_t (*cind_response)(int svc, int num_active, int num_held, bthf_call_state_t call_setup_state,
- int signal, int roam, int batt_chg);
+ int signal, int roam, int batt_chg, bt_bdaddr_t *bd_addr );
/** Pre-formatted AT response, typically in response to unknown AT cmd */
- bt_status_t (*formatted_at_response)(const char *rsp);
+ bt_status_t (*formatted_at_response)(const char *rsp, bt_bdaddr_t *bd_addr );
/** ok/error response
* ERROR (0)
* OK (1)
*/
- bt_status_t (*at_response) (bthf_at_response_t response_code, int error_code);
+ bt_status_t (*at_response) (bthf_at_response_t response_code, int error_code, bt_bdaddr_t *bd_addr );
/** response for CLCC command
* Can be iteratively called for each call index
@@ -263,7 +277,7 @@
bt_status_t (*clcc_response) (int index, bthf_call_direction_t dir,
bthf_call_state_t state, bthf_call_mode_t mode,
bthf_call_mpty_type_t mpty, const char *number,
- bthf_call_addrtype_t type);
+ bthf_call_addrtype_t type, bt_bdaddr_t *bd_addr );
/** notify of a call state change
* Each update notifies
@@ -277,6 +291,9 @@
/** Closes the interface. */
void (*cleanup)( void );
+
+ /** configureation for the SCO codec */
+ bt_status_t (*configure_wbs)( bt_bdaddr_t *bd_addr ,bthf_wbs_config_t config );
} bthf_interface_t;
__END_DECLS
diff --git a/include/hardware/bt_hf_client.h b/include/hardware/bt_hf_client.h
new file mode 100644
index 0000000..8acf1b2
--- /dev/null
+++ b/include/hardware/bt_hf_client.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012-2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_INCLUDE_BT_HF_CLIENT_H
+#define ANDROID_INCLUDE_BT_HF_CLIENT_H
+
+__BEGIN_DECLS
+
+typedef enum {
+ BTHF_CLIENT_CONNECTION_STATE_DISCONNECTED = 0,
+ BTHF_CLIENT_CONNECTION_STATE_CONNECTING,
+ BTHF_CLIENT_CONNECTION_STATE_CONNECTED,
+ BTHF_CLIENT_CONNECTION_STATE_SLC_CONNECTED,
+ BTHF_CLIENT_CONNECTION_STATE_DISCONNECTING
+} bthf_client_connection_state_t;
+
+typedef enum {
+ BTHF_CLIENT_AUDIO_STATE_DISCONNECTED = 0,
+ BTHF_CLIENT_AUDIO_STATE_CONNECTING,
+ BTHF_CLIENT_AUDIO_STATE_CONNECTED,
+ BTHF_CLIENT_AUDIO_STATE_CONNECTED_MSBC,
+} bthf_client_audio_state_t;
+
+typedef enum {
+ BTHF_CLIENT_VR_STATE_STOPPED = 0,
+ BTHF_CLIENT_VR_STATE_STARTED
+} bthf_client_vr_state_t;
+
+typedef enum {
+ BTHF_CLIENT_VOLUME_TYPE_SPK = 0,
+ BTHF_CLIENT_VOLUME_TYPE_MIC
+} bthf_client_volume_type_t;
+
+typedef enum
+{
+ BTHF_CLIENT_NETWORK_STATE_NOT_AVAILABLE = 0,
+ BTHF_CLIENT_NETWORK_STATE_AVAILABLE
+} bthf_client_network_state_t;
+
+typedef enum
+{
+ BTHF_CLIENT_SERVICE_TYPE_HOME = 0,
+ BTHF_CLIENT_SERVICE_TYPE_ROAMING
+} bthf_client_service_type_t;
+
+typedef enum {
+ BTHF_CLIENT_CALL_STATE_ACTIVE = 0,
+ BTHF_CLIENT_CALL_STATE_HELD,
+ BTHF_CLIENT_CALL_STATE_DIALING,
+ BTHF_CLIENT_CALL_STATE_ALERTING,
+ BTHF_CLIENT_CALL_STATE_INCOMING,
+ BTHF_CLIENT_CALL_STATE_WAITING,
+ BTHF_CLIENT_CALL_STATE_HELD_BY_RESP_HOLD,
+} bthf_client_call_state_t;
+
+typedef enum {
+ BTHF_CLIENT_CALL_NO_CALLS_IN_PROGRESS = 0,
+ BTHF_CLIENT_CALL_CALLS_IN_PROGRESS
+} bthf_client_call_t;
+
+typedef enum {
+ BTHF_CLIENT_CALLSETUP_NONE = 0,
+ BTHF_CLIENT_CALLSETUP_INCOMING,
+ BTHF_CLIENT_CALLSETUP_OUTGOING,
+ BTHF_CLIENT_CALLSETUP_ALERTING
+
+} bthf_client_callsetup_t;
+
+typedef enum {
+ BTHF_CLIENT_CALLHELD_NONE = 0,
+ BTHF_CLIENT_CALLHELD_HOLD_AND_ACTIVE,
+ BTHF_CLIENT_CALLHELD_HOLD,
+} bthf_client_callheld_t;
+
+typedef enum {
+ BTHF_CLIENT_RESP_AND_HOLD_HELD = 0,
+ BTRH_CLIENT_RESP_AND_HOLD_ACCEPT,
+ BTRH_CLIENT_RESP_AND_HOLD_REJECT,
+} bthf_client_resp_and_hold_t;
+
+typedef enum {
+ BTHF_CLIENT_CALL_DIRECTION_OUTGOING = 0,
+ BTHF_CLIENT_CALL_DIRECTION_INCOMING
+} bthf_client_call_direction_t;
+
+typedef enum {
+ BTHF_CLIENT_CALL_MPTY_TYPE_SINGLE = 0,
+ BTHF_CLIENT_CALL_MPTY_TYPE_MULTI
+} bthf_client_call_mpty_type_t;
+
+typedef enum {
+ BTHF_CLIENT_CMD_COMPLETE_OK = 0,
+ BTHF_CLIENT_CMD_COMPLETE_ERROR,
+ BTHF_CLIENT_CMD_COMPLETE_ERROR_NO_CARRIER,
+ BTHF_CLIENT_CMD_COMPLETE_ERROR_BUSY,
+ BTHF_CLIENT_CMD_COMPLETE_ERROR_NO_ANSWER,
+ BTHF_CLIENT_CMD_COMPLETE_ERROR_DELAYED,
+ BTHF_CLIENT_CMD_COMPLETE_ERROR_BLACKLISTED,
+ BTHF_CLIENT_CMD_COMPLETE_ERROR_CME
+} bthf_client_cmd_complete_t;
+
+typedef enum {
+ BTHF_CLIENT_CALL_ACTION_CHLD_0 = 0,
+ BTHF_CLIENT_CALL_ACTION_CHLD_1,
+ BTHF_CLIENT_CALL_ACTION_CHLD_2,
+ BTHF_CLIENT_CALL_ACTION_CHLD_3,
+ BTHF_CLIENT_CALL_ACTION_CHLD_4,
+ BTHF_CLIENT_CALL_ACTION_CHLD_1x,
+ BTHF_CLIENT_CALL_ACTION_CHLD_2x,
+ BTHF_CLIENT_CALL_ACTION_ATA,
+ BTHF_CLIENT_CALL_ACTION_CHUP,
+ BTHF_CLIENT_CALL_ACTION_BTRH_0,
+ BTHF_CLIENT_CALL_ACTION_BTRH_1,
+ BTHF_CLIENT_CALL_ACTION_BTRH_2,
+} bthf_client_call_action_t;
+
+typedef enum {
+ BTHF_CLIENT_SERVICE_UNKNOWN = 0,
+ BTHF_CLIENT_SERVICE_VOICE,
+ BTHF_CLIENT_SERVICE_FAX
+} bthf_client_subscriber_service_type_t;
+
+typedef enum {
+ BTHF_CLIENT_IN_BAND_RINGTONE_NOT_PROVIDED = 0,
+ BTHF_CLIENT_IN_BAND_RINGTONE_PROVIDED,
+} bthf_client_in_band_ring_state_t;
+
+/* Peer features masks */
+#define BTHF_CLIENT_PEER_FEAT_3WAY 0x00000001 /* Three-way calling */
+#define BTHF_CLIENT_PEER_FEAT_ECNR 0x00000002 /* Echo cancellation and/or noise reduction */
+#define BTHF_CLIENT_PEER_FEAT_VREC 0x00000004 /* Voice recognition */
+#define BTHF_CLIENT_PEER_FEAT_INBAND 0x00000008 /* In-band ring tone */
+#define BTHF_CLIENT_PEER_FEAT_VTAG 0x00000010 /* Attach a phone number to a voice tag */
+#define BTHF_CLIENT_PEER_FEAT_REJECT 0x00000020 /* Ability to reject incoming call */
+#define BTHF_CLIENT_PEER_FEAT_ECS 0x00000040 /* Enhanced Call Status */
+#define BTHF_CLIENT_PEER_FEAT_ECC 0x00000080 /* Enhanced Call Control */
+#define BTHF_CLIENT_PEER_FEAT_EXTERR 0x00000100 /* Extended error codes */
+#define BTHF_CLIENT_PEER_FEAT_CODEC 0x00000200 /* Codec Negotiation */
+
+/* Peer call handling features masks */
+#define BTHF_CLIENT_CHLD_FEAT_REL 0x00000001 /* 0 Release waiting call or held calls */
+#define BTHF_CLIENT_CHLD_FEAT_REL_ACC 0x00000002 /* 1 Release active calls and accept other
+ (waiting or held) cal */
+#define BTHF_CLIENT_CHLD_FEAT_REL_X 0x00000004 /* 1x Release specified active call only */
+#define BTHF_CLIENT_CHLD_FEAT_HOLD_ACC 0x00000008 /* 2 Active calls on hold and accept other
+ (waiting or held) call */
+#define BTHF_CLIENT_CHLD_FEAT_PRIV_X 0x00000010 /* 2x Request private mode with specified
+ call (put the rest on hold) */
+#define BTHF_CLIENT_CHLD_FEAT_MERGE 0x00000020 /* 3 Add held call to multiparty */
+#define BTHF_CLIENT_CHLD_FEAT_MERGE_DETACH 0x00000040 /* 4 Connect two calls and leave
+ (disconnect from) multiparty */
+
+/** Callback for connection state change.
+ * state will have one of the values from BtHfConnectionState
+ * peer/chld_features are valid only for BTHF_CLIENT_CONNECTION_STATE_SLC_CONNECTED state
+ */
+typedef void (* bthf_client_connection_state_callback)(bthf_client_connection_state_t state,
+ unsigned int peer_feat,
+ unsigned int chld_feat,
+ bt_bdaddr_t *bd_addr);
+
+/** Callback for audio connection state change.
+ * state will have one of the values from BtHfAudioState
+ */
+typedef void (* bthf_client_audio_state_callback)(bthf_client_audio_state_t state,
+ bt_bdaddr_t *bd_addr);
+
+/** Callback for VR connection state change.
+ * state will have one of the values from BtHfVRState
+ */
+typedef void (* bthf_client_vr_cmd_callback)(bthf_client_vr_state_t state);
+
+/** Callback for network state change
+ */
+typedef void (* bthf_client_network_state_callback) (bthf_client_network_state_t state);
+
+/** Callback for network roaming status change
+ */
+typedef void (* bthf_client_network_roaming_callback) (bthf_client_service_type_t type);
+
+/** Callback for signal strength indication
+ */
+typedef void (* bthf_client_network_signal_callback) (int signal_strength);
+
+/** Callback for battery level indication
+ */
+typedef void (* bthf_client_battery_level_callback) (int battery_level);
+
+/** Callback for current operator name
+ */
+typedef void (* bthf_client_current_operator_callback) (const char *name);
+
+/** Callback for call indicator
+ */
+typedef void (* bthf_client_call_callback) (bthf_client_call_t call);
+
+/** Callback for callsetup indicator
+ */
+typedef void (* bthf_client_callsetup_callback) (bthf_client_callsetup_t callsetup);
+
+/** Callback for callheld indicator
+ */
+typedef void (* bthf_client_callheld_callback) (bthf_client_callheld_t callheld);
+
+/** Callback for response and hold
+ */
+typedef void (* bthf_client_resp_and_hold_callback) (bthf_client_resp_and_hold_t resp_and_hold);
+
+/** Callback for Calling Line Identification notification
+ * Will be called only when there is an incoming call and number is provided.
+ */
+typedef void (* bthf_client_clip_callback) (const char *number);
+
+/**
+ * Callback for Call Waiting notification
+ */
+typedef void (* bthf_client_call_waiting_callback) (const char *number);
+
+/**
+ * Callback for listing current calls. Can be called multiple time.
+ * If number is unknown NULL is passed.
+ */
+typedef void (*bthf_client_current_calls) (int index, bthf_client_call_direction_t dir,
+ bthf_client_call_state_t state,
+ bthf_client_call_mpty_type_t mpty,
+ const char *number);
+
+/** Callback for audio volume change
+ */
+typedef void (*bthf_client_volume_change_callback) (bthf_client_volume_type_t type, int volume);
+
+/** Callback for command complete event
+ * cme is valid only for BTHF_CLIENT_CMD_COMPLETE_ERROR_CME type
+ */
+typedef void (*bthf_client_cmd_complete_callback) (bthf_client_cmd_complete_t type, int cme);
+
+/** Callback for subscriber information
+ */
+typedef void (* bthf_client_subscriber_info_callback) (const char *name,
+ bthf_client_subscriber_service_type_t type);
+
+/** Callback for in-band ring tone settings
+ */
+typedef void (* bthf_client_in_band_ring_tone_callback) (bthf_client_in_band_ring_state_t state);
+
+/**
+ * Callback for requested number from AG
+ */
+typedef void (* bthf_client_last_voice_tag_number_callback) (const char *number);
+
+/**
+ * Callback for sending ring indication to app
+ */
+typedef void (* bthf_client_ring_indication_callback) (void);
+
+/** BT-HF callback structure. */
+typedef struct {
+ /** set to sizeof(BtHfClientCallbacks) */
+ size_t size;
+ bthf_client_connection_state_callback connection_state_cb;
+ bthf_client_audio_state_callback audio_state_cb;
+ bthf_client_vr_cmd_callback vr_cmd_cb;
+ bthf_client_network_state_callback network_state_cb;
+ bthf_client_network_roaming_callback network_roaming_cb;
+ bthf_client_network_signal_callback network_signal_cb;
+ bthf_client_battery_level_callback battery_level_cb;
+ bthf_client_current_operator_callback current_operator_cb;
+ bthf_client_call_callback call_cb;
+ bthf_client_callsetup_callback callsetup_cb;
+ bthf_client_callheld_callback callheld_cb;
+ bthf_client_resp_and_hold_callback resp_and_hold_cb;
+ bthf_client_clip_callback clip_cb;
+ bthf_client_call_waiting_callback call_waiting_cb;
+ bthf_client_current_calls current_calls_cb;
+ bthf_client_volume_change_callback volume_change_cb;
+ bthf_client_cmd_complete_callback cmd_complete_cb;
+ bthf_client_subscriber_info_callback subscriber_info_cb;
+ bthf_client_in_band_ring_tone_callback in_band_ring_tone_cb;
+ bthf_client_last_voice_tag_number_callback last_voice_tag_number_callback;
+ bthf_client_ring_indication_callback ring_indication_cb;
+} bthf_client_callbacks_t;
+
+/** Represents the standard BT-HF interface. */
+typedef struct {
+
+ /** set to sizeof(BtHfClientInterface) */
+ size_t size;
+ /**
+ * Register the BtHf callbacks
+ */
+ bt_status_t (*init)(bthf_client_callbacks_t* callbacks);
+
+ /** connect to audio gateway */
+ bt_status_t (*connect)(bt_bdaddr_t *bd_addr);
+
+ /** disconnect from audio gateway */
+ bt_status_t (*disconnect)(bt_bdaddr_t *bd_addr);
+
+ /** create an audio connection */
+ bt_status_t (*connect_audio)(bt_bdaddr_t *bd_addr);
+
+ /** close the audio connection */
+ bt_status_t (*disconnect_audio)(bt_bdaddr_t *bd_addr);
+
+ /** start voice recognition */
+ bt_status_t (*start_voice_recognition)(void);
+
+ /** stop voice recognition */
+ bt_status_t (*stop_voice_recognition)(void);
+
+ /** volume control */
+ bt_status_t (*volume_control) (bthf_client_volume_type_t type, int volume);
+
+ /** place a call with number a number
+ * if number is NULL last called number is called (aka re-dial)*/
+ bt_status_t (*dial) (const char *number);
+
+ /** place a call with number specified by location (speed dial) */
+ bt_status_t (*dial_memory) (int location);
+
+ /** perform specified call related action
+ * idx is limited only for enhanced call control related action
+ */
+ bt_status_t (*handle_call_action) (bthf_client_call_action_t action, int idx);
+
+ /** query list of current calls */
+ bt_status_t (*query_current_calls) (void);
+
+ /** query name of current selected operator */
+ bt_status_t (*query_current_operator_name) (void);
+
+ /** Retrieve subscriber information */
+ bt_status_t (*retrieve_subscriber_info) (void);
+
+ /** Send DTMF code*/
+ bt_status_t (*send_dtmf) (char code);
+
+ /** Request a phone number from AG corresponding to last voice tag recorded */
+ bt_status_t (*request_last_voice_tag_number) (void);
+
+ /** Closes the interface. */
+ void (*cleanup)(void);
+
+ /** Send AT Command. */
+ bt_status_t (*send_at_cmd) (int cmd, int val1, int val2, const char *arg);
+} bthf_client_interface_t;
+
+__END_DECLS
+
+#endif /* ANDROID_INCLUDE_BT_HF_CLIENT_H */
diff --git a/include/hardware/bt_mce.h b/include/hardware/bt_mce.h
new file mode 100644
index 0000000..5d159b3
--- /dev/null
+++ b/include/hardware/bt_mce.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_INCLUDE_BT_MCE_H
+#define ANDROID_INCLUDE_BT_MCE_H
+
+__BEGIN_DECLS
+
+/** MAS instance description */
+typedef struct
+{
+ int id;
+ int scn;
+ int msg_types;
+ char *p_name;
+} btmce_mas_instance_t;
+
+/** callback for get_remote_mas_instances */
+typedef void (*btmce_remote_mas_instances_callback)(bt_status_t status, bt_bdaddr_t *bd_addr,
+ int num_instances, btmce_mas_instance_t *instances);
+
+typedef struct {
+ /** set to sizeof(btmce_callbacks_t) */
+ size_t size;
+ btmce_remote_mas_instances_callback remote_mas_instances_cb;
+} btmce_callbacks_t;
+
+typedef struct {
+ /** set to size of this struct */
+ size_t size;
+
+ /** register BT MCE callbacks */
+ bt_status_t (*init)(btmce_callbacks_t *callbacks);
+
+ /** search for MAS instances on remote device */
+ bt_status_t (*get_remote_mas_instances)(bt_bdaddr_t *bd_addr);
+} btmce_interface_t;
+
+__END_DECLS
+
+#endif /* ANDROID_INCLUDE_BT_MCE_H */
diff --git a/include/hardware/bt_rc.h b/include/hardware/bt_rc.h
index d455543..c565c48 100644
--- a/include/hardware/bt_rc.h
+++ b/include/hardware/bt_rc.h
@@ -171,7 +171,7 @@
/** Callback for passthrough commands */
typedef void (* btrc_passthrough_cmd_callback) (int id, int key_state);
-/** BT-RC callback structure. */
+/** BT-RC Target callback structure. */
typedef struct {
/** set to sizeof(BtRcCallbacks) */
size_t size;
@@ -189,7 +189,7 @@
btrc_passthrough_cmd_callback passthrough_cmd_cb;
} btrc_callbacks_t;
-/** Represents the standard BT-RC interface. */
+/** Represents the standard BT-RC AVRCP Target interface. */
typedef struct {
/** set to sizeof(BtRcInterface) */
@@ -261,6 +261,36 @@
void (*cleanup)( void );
} btrc_interface_t;
+
+typedef void (* btrc_passthrough_rsp_callback) (int id, int key_state);
+
+typedef void (* btrc_connection_state_callback) (bool state, bt_bdaddr_t *bd_addr);
+
+/** BT-RC Controller callback structure. */
+typedef struct {
+ /** set to sizeof(BtRcCallbacks) */
+ size_t size;
+ btrc_passthrough_rsp_callback passthrough_rsp_cb;
+ btrc_connection_state_callback connection_state_cb;
+} btrc_ctrl_callbacks_t;
+
+/** Represents the standard BT-RC AVRCP Controller interface. */
+typedef struct {
+
+ /** set to sizeof(BtRcInterface) */
+ size_t size;
+ /**
+ * Register the BtRc callbacks
+ */
+ bt_status_t (*init)( btrc_ctrl_callbacks_t* callbacks );
+
+ /** send pass through command to target */
+ bt_status_t (*send_pass_through_cmd) ( bt_bdaddr_t *bd_addr, uint8_t key_code, uint8_t key_state );
+
+ /** Closes the interface. */
+ void (*cleanup)( void );
+} btrc_ctrl_interface_t;
+
__END_DECLS
#endif /* ANDROID_INCLUDE_BT_RC_H */
diff --git a/include/hardware/camera3.h b/include/hardware/camera3.h
index afc9d9f..b024090 100644
--- a/include/hardware/camera3.h
+++ b/include/hardware/camera3.h
@@ -21,19 +21,25 @@
#include "camera_common.h"
/**
- * Camera device HAL 3.1 [ CAMERA_DEVICE_API_VERSION_3_1 ]
+ * Camera device HAL 3.2 [ CAMERA_DEVICE_API_VERSION_3_2 ]
*
* EXPERIMENTAL.
*
* Supports the android.hardware.Camera API.
*
* Camera devices that support this version of the HAL must return
- * CAMERA_DEVICE_API_VERSION_3_1 in camera_device_t.common.version and in
+ * CAMERA_DEVICE_API_VERSION_3_2 in camera_device_t.common.version and in
* camera_info_t.device_version (from camera_module_t.get_camera_info).
*
- * Camera modules that may contain version 3.1 devices must implement at least
- * version 2.0 of the camera module interface (as defined by
- * camera_module_t.common.module_api_version).
+ * CAMERA_DEVICE_API_VERSION_3_2:
+ * Camera modules that may contain version 3.2 devices must implement at
+ * least version 2.2 of the camera module interface (as defined by
+ * camera_module_t.common.module_api_version).
+ *
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ * Camera modules that may contain version 3.1 (or 3.0) devices must
+ * implement at least version 2.0 of the camera module interface
+ * (as defined by camera_module_t.common.module_api_version).
*
* See camera_common.h for more versioning details.
*
@@ -44,6 +50,9 @@
* S4. 3A modes and state machines
* S5. Cropping
* S6. Error management
+ * S7. Key Performance Indicator (KPI) glossary
+ * S8. Sample Use Cases
+ * S9. Notes on Controls and Metadata
*/
/**
@@ -88,6 +97,27 @@
* - configure_streams passes consumer usage flags to the HAL.
*
* - flush call to drop all in-flight requests/buffers as fast as possible.
+ *
+ * 3.2: Minor revision of expanded-capability HAL:
+ *
+ * - Deprecates get_metadata_vendor_tag_ops. Please use get_vendor_tag_ops
+ * in camera_common.h instead.
+ *
+ * - register_stream_buffers deprecated. All gralloc buffers provided
+ * by framework to HAL in process_capture_request may be new at any time.
+ *
+ * - add partial result support. process_capture_result may be called
+ * multiple times with a subset of the available result before the full
+ * result is available.
+ *
+ * - add manual template to camera3_request_template. The applications may
+ * use this template to control the capture settings directly.
+ *
+ * - Rework the bidirectional and input stream specifications.
+ *
+ * - change the input buffer return path. The buffer is returned in
+ * process_capture_result instead of process_capture_request.
+ *
*/
/**
@@ -108,12 +138,19 @@
* 4. The framework calls camera3_device_t->ops->configure_streams() with a list
* of input/output streams to the HAL device.
*
- * 5. The framework allocates gralloc buffers and calls
+ * 5. <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
+ * The framework allocates gralloc buffers and calls
* camera3_device_t->ops->register_stream_buffers() for at least one of the
* output streams listed in configure_streams. The same stream is registered
* only once.
*
- * 5. The framework requests default settings for some number of use cases with
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * camera3_device_t->ops->register_stream_buffers() is not called and must
+ * be NULL.
+ *
+ * 6. The framework requests default settings for some number of use cases with
* calls to camera3_device_t->ops->construct_default_request_settings(). This
* may occur any time after step 3.
*
@@ -124,23 +161,64 @@
* camera3_device_t->ops->process_capture_request(). The HAL must block the
* return of this call until it is ready for the next request to be sent.
*
- * 8. The framework continues to submit requests, and possibly call
- * register_stream_buffers() for not-yet-registered streams, and call
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * The buffer_handle_t provided in the camera3_stream_buffer_t array
+ * in the camera3_capture_request_t may be new and never-before-seen
+ * by the HAL on any given new request.
+ *
+ * 8. The framework continues to submit requests, and call
* construct_default_request_settings to get default settings buffers for
* other use cases.
*
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
+ * The framework may call register_stream_buffers() at this time for
+ * not-yet-registered streams.
+ *
* 9. When the capture of a request begins (sensor starts exposing for the
* capture), the HAL calls camera3_callback_ops_t->notify() with the SHUTTER
* event, including the frame number and the timestamp for start of exposure.
+ *
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
* This notify call must be made before the first call to
* process_capture_result() for that frame number.
*
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * The camera3_callback_ops_t->notify() call with the SHUTTER event should
+ * be made as early as possible since the framework will be unable to
+ * deliver gralloc buffers to the application layer (for that frame) until
+ * it has a valid timestamp for the start of exposure.
+ *
+ * Both partial metadata results and the gralloc buffers may be sent to the
+ * framework at any time before or after the SHUTTER event.
+ *
* 10. After some pipeline delay, the HAL begins to return completed captures to
* the framework with camera3_callback_ops_t->process_capture_result(). These
* are returned in the same order as the requests were submitted. Multiple
* requests can be in flight at once, depending on the pipeline depth of the
* camera HAL device.
*
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * Once a buffer is returned by process_capture_result as part of the
+ * camera3_stream_buffer_t array, and the fence specified by release_fence
+ * has been signaled (this is a no-op for -1 fences), the ownership of that
+ * buffer is considered to be transferred back to the framework. After that,
+ * the HAL must no longer retain that particular buffer, and the
+ * framework may clean up the memory for it immediately.
+ *
+ * process_capture_result may be called multiple times for a single frame,
+ * each time with a new disjoint piece of metadata and/or set of gralloc
+ * buffers. The framework will accumulate these partial metadata results
+ * into one result.
+ *
+ * In particular, it is legal for a process_capture_result to be called
+ * simultaneously for both a frame N and a frame N+1 as long as the
+ * above rule holds for gralloc buffers (both input and output).
+ *
* 11. After some time, the framework may stop submitting new requests, wait for
* the existing captures to complete (all buffers filled, all results
* returned), and then call configure_streams() again. This resets the camera
@@ -201,30 +279,26 @@
* for processing rate).
*
* - Limited-mode devices do not need to support most of the
- * settings/result/static info metadata. Full-mode devices must support all
- * metadata fields listed in TODO. Specifically, only the following settings
+ * settings/result/static info metadata. Specifically, only the following settings
* are expected to be consumed or produced by a limited-mode HAL device:
*
- * android.control.aeAntibandingMode (controls)
- * android.control.aeExposureCompensation (controls)
- * android.control.aeLock (controls)
- * android.control.aeMode (controls)
- * [OFF means ON_FLASH_TORCH - TODO]
- * android.control.aeRegions (controls)
- * android.control.aeTargetFpsRange (controls)
- * android.control.afMode (controls)
- * [OFF means infinity focus]
- * android.control.afRegions (controls)
- * android.control.awbLock (controls)
- * android.control.awbMode (controls)
- * [OFF not supported]
- * android.control.awbRegions (controls)
- * android.control.captureIntent (controls)
- * android.control.effectMode (controls)
- * android.control.mode (controls)
- * [OFF not supported]
- * android.control.sceneMode (controls)
- * android.control.videoStabilizationMode (controls)
+ * android.control.aeAntibandingMode (controls and dynamic)
+ * android.control.aeExposureCompensation (controls and dynamic)
+ * android.control.aeLock (controls and dynamic)
+ * android.control.aeMode (controls and dynamic)
+ * android.control.aeRegions (controls and dynamic)
+ * android.control.aeTargetFpsRange (controls and dynamic)
+ * android.control.aePrecaptureTrigger (controls and dynamic)
+ * android.control.afMode (controls and dynamic)
+ * android.control.afRegions (controls and dynamic)
+ * android.control.awbLock (controls and dynamic)
+ * android.control.awbMode (controls and dynamic)
+ * android.control.awbRegions (controls and dynamic)
+ * android.control.captureIntent (controls and dynamic)
+ * android.control.effectMode (controls and dynamic)
+ * android.control.mode (controls and dynamic)
+ * android.control.sceneMode (controls and dynamic)
+ * android.control.videoStabilizationMode (controls and dynamic)
* android.control.aeAvailableAntibandingModes (static)
* android.control.aeAvailableModes (static)
* android.control.aeAvailableTargetFpsRanges (static)
@@ -237,67 +311,50 @@
* android.control.awbAvailableModes (static)
* android.control.maxRegions (static)
* android.control.sceneModeOverrides (static)
- * android.control.aeRegions (dynamic)
* android.control.aeState (dynamic)
- * android.control.afMode (dynamic)
- * android.control.afRegions (dynamic)
* android.control.afState (dynamic)
- * android.control.awbMode (dynamic)
- * android.control.awbRegions (dynamic)
* android.control.awbState (dynamic)
- * android.control.mode (dynamic)
*
+ * android.flash.mode (controls and dynamic)
* android.flash.info.available (static)
*
* android.info.supportedHardwareLevel (static)
*
- * android.jpeg.gpsCoordinates (controls)
- * android.jpeg.gpsProcessingMethod (controls)
- * android.jpeg.gpsTimestamp (controls)
- * android.jpeg.orientation (controls)
- * android.jpeg.quality (controls)
- * android.jpeg.thumbnailQuality (controls)
- * android.jpeg.thumbnailSize (controls)
+ * android.jpeg.gpsCoordinates (controls and dynamic)
+ * android.jpeg.gpsProcessingMethod (controls and dynamic)
+ * android.jpeg.gpsTimestamp (controls and dynamic)
+ * android.jpeg.orientation (controls and dynamic)
+ * android.jpeg.quality (controls and dynamic)
+ * android.jpeg.thumbnailQuality (controls and dynamic)
+ * android.jpeg.thumbnailSize (controls and dynamic)
* android.jpeg.availableThumbnailSizes (static)
* android.jpeg.maxSize (static)
- * android.jpeg.gpsCoordinates (dynamic)
- * android.jpeg.gpsProcessingMethod (dynamic)
- * android.jpeg.gpsTimestamp (dynamic)
- * android.jpeg.orientation (dynamic)
- * android.jpeg.quality (dynamic)
- * android.jpeg.size (dynamic)
- * android.jpeg.thumbnailQuality (dynamic)
- * android.jpeg.thumbnailSize (dynamic)
*
* android.lens.info.minimumFocusDistance (static)
*
- * android.request.id (controls)
- * android.request.id (dynamic)
+ * android.request.id (controls and dynamic)
*
- * android.scaler.cropRegion (controls)
- * [ignores (x,y), assumes center-zoom]
- * android.scaler.availableFormats (static)
- * [RAW not supported]
- * android.scaler.availableJpegMinDurations (static)
- * android.scaler.availableJpegSizes (static)
+ * android.scaler.cropRegion (controls and dynamic)
+ * android.scaler.availableStreamConfigurations (static)
+ * android.scaler.availableMinFrameDurations (static)
+ * android.scaler.availableStallDurations (static)
* android.scaler.availableMaxDigitalZoom (static)
- * android.scaler.availableProcessedMinDurations (static)
- * android.scaler.availableProcessedSizes (static)
- * [full resolution not supported]
* android.scaler.maxDigitalZoom (static)
- * android.scaler.cropRegion (dynamic)
+ * android.scaler.croppingType (static)
*
* android.sensor.orientation (static)
* android.sensor.timestamp (dynamic)
*
- * android.statistics.faceDetectMode (controls)
+ * android.statistics.faceDetectMode (controls and dynamic)
* android.statistics.info.availableFaceDetectModes (static)
- * android.statistics.faceDetectMode (dynamic)
* android.statistics.faceIds (dynamic)
* android.statistics.faceLandmarks (dynamic)
* android.statistics.faceRectangles (dynamic)
* android.statistics.faceScores (dynamic)
*
+ * android.sync.frameNumber (dynamic)
+ * android.sync.maxLatency (static)
+ *
* - Captures in limited mode that include high-resolution (> 1080p) output
* buffers may block in process_capture_request() until all the output buffers
* have been filled. A full-mode HAL device must process sequences of
@@ -308,6 +365,14 @@
* that request completes for high-resolution captures for limited-mode
* devices.
*
+ * - Full-mode devices must support below additional capabilities:
+ * - 30fps at maximum resolution is preferred, more than 20fps is required.
+ * - Per frame control (android.sync.maxLatency == PER_FRAME_CONTROL).
+ * - Sensor manual control metadata. See MANUAL_SENSOR defined in
+ * android.request.availableCapabilities.
+ * - Post-processing manual control metadata. See MANUAL_POST_PROCESSING defined
+ * in android.request.availableCapabilities.
+ *
*/
/**
@@ -816,7 +881,12 @@
* view it is receiving based on the crop region, the dimensions of the image
* sensor, and the lens focal length.
*
- * Since the crop region applies to all streams, which may have different aspect
+ * It is assumed that the cropping is applied after raw to other color space
+ * conversion. Raw streams (RAW16 and RAW_OPAQUE) don't have this conversion stage,
+ * and are not croppable. Therefore, the crop region must be ignored by the HAL
+ * for raw streams.
+ *
+ * Since the crop region applies to all non-raw streams, which may have different aspect
* ratios than the crop region, the exact sensor region used for each stream may
* be smaller than the crop region. Specifically, each stream should maintain
* square pixels and its aspect ratio by minimally further cropping the defined
@@ -963,15 +1033,136 @@
* ERROR_BUFFER for each failed buffer.
*
* In each of these transient failure cases, the HAL must still call
- * process_capture_result, with valid output buffer_handle_t. If the result
- * metadata could not be produced, it should be NULL. If some buffers could not
- * be filled, their sync fences must be set to the error state.
+ * process_capture_result, with valid output and input (if an input buffer was
+ * submitted) buffer_handle_t. If the result metadata could not be produced, it
+ * should be NULL. If some buffers could not be filled, they must be returned with
+ * process_capture_result in the error state, their release fences must be set to
+ * the acquire fences passed by the framework, or -1 if they have been waited on by
+ * the HAL already.
*
* Invalid input arguments result in -EINVAL from the appropriate methods. In
* that case, the framework must act as if that call had never been made.
*
*/
+/**
+ * S7. Key Performance Indicator (KPI) glossary:
+ *
+ * This includes some critical definitions that are used by KPI metrics.
+ *
+ * Pipeline Latency:
+ * For a given capture request, the duration from the framework calling
+ * process_capture_request to the HAL sending capture result and all buffers
+ * back by process_capture_result call. To make the Pipeline Latency measure
+ * independent of frame rate, it is measured by frame count.
+ *
+ * For example, when frame rate is 30 (fps), the frame duration (time interval
+ * between adjacent frame capture time) is 33 (ms).
+ * If it takes 5 frames for framework to get the result and buffers back for
+ * a given request, then the Pipeline Latency is 5 (frames), instead of
+ * 5 x 33 = 165 (ms).
+ *
+ * The Pipeline Latency is determined by android.request.pipelineDepth and
+ * android.request.pipelineMaxDepth, see their definitions for more details.
+ *
+ */
+
+/**
+ * S8. Sample Use Cases:
+ *
+ * This includes some typical use case examples the camera HAL may support.
+ *
+ * S8.1 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_INPUT stream.
+ *
+ * When Zero Shutter Lag (ZSL) is supported by the camera device, the INPUT stream
+ * can be used for application/framework implemented ZSL use case. This kind of stream
+ * will be used by the framework as follows:
+ *
+ * 1. Framework configures an opaque raw format output stream that is used to
+ * produce the ZSL output buffers. The stream pixel format will be
+ * HAL_PIXEL_FORMAT_RAW_OPAQUE.
+ *
+ * 2. Framework configures an opaque raw format input stream that is used to
+ * send the reprocess ZSL buffers to the HAL. The stream pixel format will
+ * also be HAL_PIXEL_FORMAT_RAW_OPAQUE.
+ *
+ * 3. Framework configures a YUV/JPEG output stream that is used to receive the
+ * reprocessed data. The stream pixel format will be YCbCr_420/HAL_PIXEL_FORMAT_BLOB.
+ *
+ * 4. Framework picks a ZSL buffer from the output stream when a ZSL capture is
+ * issued by the application, and sends the data back as an input buffer in a
+ * reprocessing request, then sends to the HAL for reprocessing.
+ *
+ * 5. The HAL sends back the output JPEG result to framework.
+ *
+ * The HAL can select the actual raw buffer format and configure the ISP pipeline
+ * appropriately based on the HAL_PIXEL_FORMAT_RAW_OPAQUE format. See this format
+ * definition for more details.
+ *
+ * S8.2 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_BIDIRECTIONAL stream.
+ *
+ * For this use case, the bidirectional stream will be used by the framework as follows:
+ *
+ * 1. The framework includes a buffer from this stream as output buffer in a
+ * request as normal.
+ *
+ * 2. Once the HAL device returns a filled output buffer to the framework,
+ * the framework may do one of two things with the filled buffer:
+ *
+ * 2. a. The framework uses the filled data, and returns the now-used buffer
+ * to the stream queue for reuse. This behavior exactly matches the
+ * OUTPUT type of stream.
+ *
+ * 2. b. The framework wants to reprocess the filled data, and uses the
+ * buffer as an input buffer for a request. Once the HAL device has
+ * used the reprocessing buffer, it then returns it to the
+ * framework. The framework then returns the now-used buffer to the
+ * stream queue for reuse.
+ *
+ * 3. The HAL device will be given the buffer again as an output buffer for
+ * a request at some future point.
+ *
+ * For ZSL use case, the pixel format for bidirectional stream will be
+ * HAL_PIXEL_FORMAT_RAW_OPAQUE or HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED if it
+ * is listed in android.scaler.availableInputOutputFormatsMap. When
+ * HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED is used, the gralloc
+ * usage flags for the consumer endpoint will be set to GRALLOC_USAGE_HW_CAMERA_ZSL.
+ * A configuration stream list that has BIDIRECTIONAL stream used as input, will
+ * usually also have a distinct OUTPUT stream to get the reprocessing data. For example,
+ * for the ZSL use case, the stream list might be configured with the following:
+ *
+ * - A HAL_PIXEL_FORMAT_RAW_OPAQUE bidirectional stream is used
+ * as input.
+ * - And a HAL_PIXEL_FORMAT_BLOB (JPEG) output stream.
+ *
+ */
+
+/**
+ * S9. Notes on Controls and Metadata
+ *
+ * This section contains notes about the interpretation and usage of various metadata tags.
+ *
+ * S9.1 HIGH_QUALITY and FAST modes.
+ *
+ * Many camera post-processing blocks may be listed as having HIGH_QUALITY,
+ * FAST, and OFF operating modes. These blocks will typically also have an
+ * 'available modes' tag representing which of these operating modes are
+ * available on a given device. The general policy regarding implementing
+ * these modes is as follows:
+ *
+ * 1. Operating mode controls of hardware blocks that cannot be disabled
+ * must not list OFF in their corresponding 'available modes' tags.
+ *
+ * 2. OFF will always be included in their corresponding 'available modes'
+ * tag if it is possible to disable that hardware block.
+ *
+ * 3. FAST must always be included in the 'available modes' tags for all
+ * post-processing blocks supported on the device. If a post-processing
+ * block also has a slower and higher quality operating mode that does
+ * not meet the framerate requirements for FAST mode, HIGH_QUALITY should
+ * be included in the 'available modes' tag to represent this operating
+ * mode.
+ */
__BEGIN_DECLS
struct camera3_device;
@@ -1006,6 +1197,21 @@
* for reading buffers from this stream and sending them through the camera
* processing pipeline, as if the buffer was a newly captured image from the
* imager.
+ *
+ * The pixel format for input stream can be any format reported by
+ * android.scaler.availableInputOutputFormatsMap. The pixel format of the
+ * output stream that is used to produce the reprocessing data may be any
+ * format reported by android.scaler.availableStreamConfigurations. The
+ * supported input/output stream combinations depends the camera device
+ * capabilities, see android.scaler.availableInputOutputFormatsMap for
+ * stream map details.
+ *
+ * This kind of stream is generally used to reprocess data into higher
+ * quality images (that otherwise would cause a frame rate performance
+ * loss), or to do off-line reprocessing.
+ *
+ * A typical use case is Zero Shutter Lag (ZSL), see S8.1 for more details.
+ *
*/
CAMERA3_STREAM_INPUT = 1,
@@ -1014,29 +1220,9 @@
* used as an output stream, but occasionally one already-filled buffer may
* be sent back to the HAL device for reprocessing.
*
- * This kind of stream is meant generally for zero-shutter-lag features,
- * where copying the captured image from the output buffer to the
- * reprocessing input buffer would be expensive. The stream will be used by
- * the framework as follows:
- *
- * 1. The framework includes a buffer from this stream as output buffer in a
- * request as normal.
- *
- * 2. Once the HAL device returns a filled output buffer to the framework,
- * the framework may do one of two things with the filled buffer:
- *
- * 2. a. The framework uses the filled data, and returns the now-used buffer
- * to the stream queue for reuse. This behavior exactly matches the
- * OUTPUT type of stream.
- *
- * 2. b. The framework wants to reprocess the filled data, and uses the
- * buffer as an input buffer for a request. Once the HAL device has
- * used the reprocessing buffer, it then returns it to the
- * framework. The framework then returns the now-used buffer to the
- * stream queue for reuse.
- *
- * 3. The HAL device will be given the buffer again as an output buffer for
- * a request at some future point.
+ * This kind of stream is meant generally for Zero Shutter Lag (ZSL)
+ * features, where copying the captured image from the output buffer to the
+ * reprocessing input buffer would be expensive. See S8.2 for more details.
*
* Note that the HAL will always be reprocessing data it produced.
*
@@ -1105,9 +1291,17 @@
* gralloc module will select a format based on the usage flags provided by
* the camera device and the other endpoint of the stream.
*
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
* The camera HAL device must inspect the buffers handed to it in the
* subsequent register_stream_buffers() call to obtain the
* implementation-specific format details, if necessary.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * register_stream_buffers() won't be called by the framework, so the HAL
+ * should configure the ISP and sensor pipeline based purely on the sizes,
+ * usage flags, and formats for the configured streams.
*/
int format;
@@ -1257,6 +1451,14 @@
*
* For input buffers, the HAL must not change the acquire_fence field during
* the process_capture_request() call.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * When the HAL returns an input buffer to the framework with
+ * process_capture_result(), the acquire_fence must be set to -1. If the HAL
+ * never waits on input buffer acquire fence due to an error, the sync
+ * fences should be handled similarly to the way they are handled for output
+ * buffers.
*/
int acquire_fence;
@@ -1265,10 +1467,25 @@
* returning buffers to the framework, or write -1 to indicate that no
* waiting is required for this buffer.
*
- * For the input buffer, the release fence must be set by the
- * process_capture_request() call. For the output buffers, the fences must
- * be set in the output_buffers array passed to process_capture_result().
+ * For the output buffers, the fences must be set in the output_buffers
+ * array passed to process_capture_result().
*
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
+ * For the input buffer, the release fence must be set by the
+ * process_capture_request() call.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * For the input buffer, the fences must be set in the input_buffer
+ * passed to process_capture_result().
+ *
+ * After signaling the release_fence for this buffer, the HAL
+ * should not make any further attempts to access this buffer as the
+ * ownership has been fully transferred back to the framework.
+ *
+ * If a fence of -1 was specified then the ownership of this buffer
+ * is transferred back immediately upon the call of process_capture_result.
*/
int release_fence;
@@ -1280,6 +1497,12 @@
* The complete set of gralloc buffers for a stream. This structure is given to
* register_stream_buffers() to allow the camera HAL device to register/map/etc
* newly allocated stream buffers.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * Deprecated (and not used). In particular,
+ * register_stream_buffers is also deprecated and will never be invoked.
+ *
*/
typedef struct camera3_stream_buffer_set {
/**
@@ -1309,17 +1532,18 @@
* Transport header for compressed JPEG buffers in output streams.
*
* To capture JPEG images, a stream is created using the pixel format
- * HAL_PIXEL_FORMAT_BLOB, and the static metadata field android.jpeg.maxSize is
- * used as the buffer size. Since compressed JPEG images are of variable size,
- * the HAL needs to include the final size of the compressed image using this
- * structure inside the output stream buffer. The JPEG blob ID field must be set
- * to CAMERA3_JPEG_BLOB_ID.
+ * HAL_PIXEL_FORMAT_BLOB. The buffer size for the stream is calculated by the
+ * framework, based on the static metadata field android.jpeg.maxSize. Since
+ * compressed JPEG images are of variable size, the HAL needs to include the
+ * final size of the compressed image using this structure inside the output
+ * stream buffer. The JPEG blob ID field must be set to CAMERA3_JPEG_BLOB_ID.
*
- * Transport header should be at the end of the JPEG output stream buffer. That
- * means the jpeg_blob_id must start at byte[android.jpeg.maxSize -
- * sizeof(camera3_jpeg_blob)]. Any HAL using this transport header must
- * account for it in android.jpeg.maxSize. The JPEG data itself starts at
- * the beginning of the buffer and should be jpeg_size bytes long.
+ * Transport header should be at the end of the JPEG output stream buffer. That
+ * means the jpeg_blob_id must start at byte[buffer_size -
+ * sizeof(camera3_jpeg_blob)], where the buffer_size is the size of gralloc buffer.
+ * Any HAL using this transport header must account for it in android.jpeg.maxSize
+ * The JPEG data itself starts at the beginning of the buffer and should be
+ * jpeg_size bytes long.
*/
typedef struct camera3_jpeg_blob {
uint16_t jpeg_blob_id;
@@ -1534,6 +1758,16 @@
*/
CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG = 5,
+ /**
+ * A basic template for direct application control of capture
+ * parameters. All automatic control is disabled (auto-exposure, auto-white
+ * balance, auto-focus), and post-processing parameters are set to preview
+ * quality. The manual capture parameters (exposure, sensitivity, etc.)
+ * are set to reasonable defaults, but should be overridden by the
+ * application depending on the intended use case.
+ */
+ CAMERA3_TEMPLATE_MANUAL = 6,
+
/* Total number of templates */
CAMERA3_TEMPLATE_COUNT,
@@ -1592,8 +1826,15 @@
* The HAL is required to wait on the acquire sync fence of the input buffer
* before accessing it.
*
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
* Any input buffer included here will have been registered with the HAL
* through register_stream_buffers() before its inclusion in a request.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * The buffers will not have been pre-registered with the HAL.
+ * Subsequent requests may reuse buffers, or provide entirely new buffers.
*/
camera3_stream_buffer_t *input_buffer;
@@ -1606,13 +1847,21 @@
/**
* An array of num_output_buffers stream buffers, to be filled with image
* data from this capture/reprocess. The HAL must wait on the acquire fences
- * of each stream buffer before writing to them. All the buffers included
- * here will have been registered with the HAL through
- * register_stream_buffers() before their inclusion in a request.
+ * of each stream buffer before writing to them.
*
* The HAL takes ownership of the actual buffer_handle_t entries in
* output_buffers; the framework does not access them until they are
* returned in a camera3_capture_result_t.
+ *
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
+ * All the buffers included here will have been registered with the HAL
+ * through register_stream_buffers() before their inclusion in a request.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * Any or all of the buffers included here may be brand new in this
+ * request (having never before seen by the HAL).
*/
const camera3_stream_buffer_t *output_buffers;
@@ -1625,7 +1874,9 @@
* sent to the framework asynchronously with process_capture_result(), in
* response to a single capture request sent to the HAL with
* process_capture_request(). Multiple process_capture_result() calls may be
- * performed by the HAL for each request. Each call, all with the same frame
+ * performed by the HAL for each request.
+ *
+ * Each call, all with the same frame
* number, may contain some subset of the output buffers, and/or the result
* metadata. The metadata may only be provided once for a given frame number;
* all other calls must set the result metadata to NULL.
@@ -1635,6 +1886,29 @@
* output buffer may come with a release sync fence that the framework will wait
* on before reading, in case the buffer has not yet been filled by the HAL.
*
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * The metadata may be provided multiple times for a single frame number. The
+ * framework will accumulate together the final result set by combining each
+ * partial result together into the total result set.
+ *
+ * If an input buffer is given in a request, the HAL must return it in one of
+ * the process_capture_result calls, and the call may be to just return the input
+ * buffer, without metadata and output buffers; the sync fences must be handled
+ * the same way they are done for output buffers.
+ *
+ *
+ * Performance considerations:
+ *
+ * Applications will also receive these partial results immediately, so sending
+ * partial results is a highly recommended performance optimization to avoid
+ * the total pipeline latency before sending the results for what is known very
+ * early on in the pipeline.
+ *
+ * A typical use case might be calculating the AF state halfway through the
+ * pipeline; by sending the state back to the framework immediately, we get a
+ * 50% performance increase and perceived responsiveness of the auto-focus.
+ *
*/
typedef struct camera3_capture_result {
/**
@@ -1657,6 +1931,18 @@
*
* If there was an error producing the result metadata, result must be an
* empty metadata buffer, and notify() must be called with ERROR_RESULT.
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * Multiple calls to process_capture_result() with a given frame_number
+ * may include the result metadata.
+ *
+ * Partial metadata submitted should not include any metadata key returned
+ * in a previous partial result for a given frame. Each new partial result
+ * for that frame must also set a distinct partial_result value.
+ *
+ * If notify has been called with ERROR_RESULT, all further partial
+ * results for that frame are ignored by the framework.
*/
const camera_metadata_t *result;
@@ -1666,7 +1952,8 @@
* less than the buffer count in the capture request, at least one more call
* to process_capture_result with the same frame_number must be made, to
* return the remaining output buffers to the framework. This may only be
- * zero if the structure includes valid result metadata.
+ * zero if the structure includes valid result metadata or an input buffer
+ * is returned in this result.
*/
uint32_t num_output_buffers;
@@ -1690,9 +1977,71 @@
* num_output_buffers is zero, this may be NULL. In that case, at least one
* more process_capture_result call must be made by the HAL to provide the
* output buffers.
+ *
+ * When process_capture_result is called with a new buffer for a frame,
+ * all previous frames' buffers for that corresponding stream must have been
+ * already delivered (the fences need not have yet been signaled).
+ *
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * Gralloc buffers for a frame may be sent to framework before the
+ * corresponding SHUTTER-notify.
+ *
+ * Performance considerations:
+ *
+ * Buffers delivered to the framework will not be dispatched to the
+ * application layer until a start of exposure timestamp has been received
+ * via a SHUTTER notify() call. It is highly recommended to
+ * dispatch that call as early as possible.
*/
const camera3_stream_buffer_t *output_buffers;
+ /**
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * The handle for the input stream buffer for this capture. It may not
+ * yet be consumed at the time the HAL calls process_capture_result(); the
+ * framework will wait on the release sync fences provided by the HAL before
+ * reusing the buffer.
+ *
+ * The HAL should handle the sync fences the same way they are done for
+ * output_buffers.
+ *
+ * Only one input buffer is allowed to be sent per request. Similarly to
+ * output buffers, the ordering of returned input buffers must be
+ * maintained by the HAL.
+ *
+ * Performance considerations:
+ *
+ * The input buffer should be returned as early as possible. If the HAL
+ * supports sync fences, it can call process_capture_result to hand it back
+ * with sync fences being set appropriately. If the sync fences are not
+ * supported, the buffer can only be returned when it is consumed, which
+ * may take long time; the HAL may choose to copy this input buffer to make
+ * the buffer return sooner.
+ */
+ const camera3_stream_buffer_t *input_buffer;
+
+ /**
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * In order to take advantage of partial results, the HAL must set the
+ * static metadata android.request.partialResultCount to the number of
+ * partial results it will send for each frame.
+ *
+ * Each new capture result with a partial result must set
+ * this field (partial_result) to a distinct inclusive value between
+ * 1 and android.request.partialResultCount.
+ *
+ * HALs not wishing to take advantage of this feature must not
+ * set an android.request.partialResultCount or partial_result to a value
+ * other than 1.
+ *
+ * This value must be set to 0 when a capture result contains buffers only
+ * and no metadata.
+ */
+ uint32_t partial_result;
+
} camera3_capture_result_t;
/**********************************************************************
@@ -1768,6 +2117,13 @@
* message. In this case, individual ERROR_RESULT/ERROR_BUFFER messages
* should not be sent.
*
+ * Performance requirements:
+ *
+ * This is a non-blocking call. The framework will return this call in 5ms.
+ *
+ * The pipeline latency (see S7 for definition) should be less than or equal to
+ * 4 frame intervals, and must be less than or equal to 8 frame intervals.
+ *
*/
void (*process_capture_result)(const struct camera3_callback_ops *,
const camera3_capture_result_t *result);
@@ -1781,11 +2137,25 @@
* with the HAL, and the msg only needs to be valid for the duration of this
* call.
*
+ * Multiple threads may call notify() simultaneously.
+ *
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
* The notification for the start of exposure for a given request must be
* sent by the HAL before the first call to process_capture_result() for
* that request is made.
*
- * Multiple threads may call notify() simultaneously.
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * Buffers delivered to the framework will not be dispatched to the
+ * application layer until a start of exposure timestamp has been received
+ * via a SHUTTER notify() call. It is highly recommended to
+ * dispatch this call as early as possible.
+ *
+ * ------------------------------------------------------------------------
+ * Performance requirements:
+ *
+ * This is a non-blocking call. The framework will return this call in 5ms.
*/
void (*notify)(const struct camera3_callback_ops *,
const camera3_notify_msg_t *msg);
@@ -1806,6 +2176,11 @@
* the HAL. Will be called once after a successful open() call, before any
* other functions are called on the camera3_device_ops structure.
*
+ * Performance requirements:
+ *
+ * This should be a non-blocking call. The HAL should return from this call
+ * in 5ms, and must return from this call in 10ms.
+ *
* Return values:
*
* 0: On successful initialization
@@ -1823,6 +2198,8 @@
/**
* configure_streams:
*
+ * CAMERA_DEVICE_API_VERSION_3_0 only:
+ *
* Reset the HAL camera device processing pipeline and set up new input and
* output streams. This call replaces any existing stream configuration with
* the streams defined in the stream_list. This method will be called at
@@ -1835,16 +2212,19 @@
* The stream_list may contain streams that are also in the currently-active
* set of streams (from the previous call to configure_stream()). These
* streams will already have valid values for usage, max_buffers, and the
- * private pointer. If such a stream has already had its buffers registered,
+ * private pointer.
+ *
+ * If such a stream has already had its buffers registered,
* register_stream_buffers() will not be called again for the stream, and
* buffers from the stream can be immediately included in input requests.
*
* If the HAL needs to change the stream configuration for an existing
* stream due to the new configuration, it may rewrite the values of usage
- * and/or max_buffers during the configure call. The framework will detect
- * such a change, and will then reallocate the stream buffers, and call
- * register_stream_buffers() again before using buffers from that stream in
- * a request.
+ * and/or max_buffers during the configure call.
+ *
+ * The framework will detect such a change, and will then reallocate the
+ * stream buffers, and call register_stream_buffers() again before using
+ * buffers from that stream in a request.
*
* If a currently-active stream is not included in stream_list, the HAL may
* safely remove any references to that stream. It will not be reused in a
@@ -1873,6 +2253,115 @@
* of (for example) a preview stream, with allocation for other streams
* happening later or concurrently.
*
+ * ------------------------------------------------------------------------
+ * CAMERA_DEVICE_API_VERSION_3_1 only:
+ *
+ * Reset the HAL camera device processing pipeline and set up new input and
+ * output streams. This call replaces any existing stream configuration with
+ * the streams defined in the stream_list. This method will be called at
+ * least once after initialize() before a request is submitted with
+ * process_capture_request().
+ *
+ * The stream_list must contain at least one output-capable stream, and may
+ * not contain more than one input-capable stream.
+ *
+ * The stream_list may contain streams that are also in the currently-active
+ * set of streams (from the previous call to configure_stream()). These
+ * streams will already have valid values for usage, max_buffers, and the
+ * private pointer.
+ *
+ * If such a stream has already had its buffers registered,
+ * register_stream_buffers() will not be called again for the stream, and
+ * buffers from the stream can be immediately included in input requests.
+ *
+ * If the HAL needs to change the stream configuration for an existing
+ * stream due to the new configuration, it may rewrite the values of usage
+ * and/or max_buffers during the configure call.
+ *
+ * The framework will detect such a change, and will then reallocate the
+ * stream buffers, and call register_stream_buffers() again before using
+ * buffers from that stream in a request.
+ *
+ * If a currently-active stream is not included in stream_list, the HAL may
+ * safely remove any references to that stream. It will not be reused in a
+ * later configure() call by the framework, and all the gralloc buffers for
+ * it will be freed after the configure_streams() call returns.
+ *
+ * The stream_list structure is owned by the framework, and may not be
+ * accessed once this call completes. The address of an individual
+ * camera3_stream_t structure will remain valid for access by the HAL until
+ * the end of the first configure_stream() call which no longer includes
+ * that camera3_stream_t in the stream_list argument. The HAL may not change
+ * values in the stream structure outside of the private pointer, except for
+ * the usage and max_buffers members during the configure_streams() call
+ * itself.
+ *
+ * If the stream is new, max_buffer, and private pointer fields of the
+ * stream structure will all be set to 0. The usage will be set to the
+ * consumer usage flags. The HAL device must set these fields before the
+ * configure_streams() call returns. These fields are then used by the
+ * framework and the platform gralloc module to allocate the gralloc
+ * buffers for each stream.
+ *
+ * Before such a new stream can have its buffers included in a capture
+ * request, the framework will call register_stream_buffers() with that
+ * stream. However, the framework is not required to register buffers for
+ * _all_ streams before submitting a request. This allows for quick startup
+ * of (for example) a preview stream, with allocation for other streams
+ * happening later or concurrently.
+ *
+ * ------------------------------------------------------------------------
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * Reset the HAL camera device processing pipeline and set up new input and
+ * output streams. This call replaces any existing stream configuration with
+ * the streams defined in the stream_list. This method will be called at
+ * least once after initialize() before a request is submitted with
+ * process_capture_request().
+ *
+ * The stream_list must contain at least one output-capable stream, and may
+ * not contain more than one input-capable stream.
+ *
+ * The stream_list may contain streams that are also in the currently-active
+ * set of streams (from the previous call to configure_stream()). These
+ * streams will already have valid values for usage, max_buffers, and the
+ * private pointer.
+ *
+ * If the HAL needs to change the stream configuration for an existing
+ * stream due to the new configuration, it may rewrite the values of usage
+ * and/or max_buffers during the configure call.
+ *
+ * The framework will detect such a change, and may then reallocate the
+ * stream buffers before using buffers from that stream in a request.
+ *
+ * If a currently-active stream is not included in stream_list, the HAL may
+ * safely remove any references to that stream. It will not be reused in a
+ * later configure() call by the framework, and all the gralloc buffers for
+ * it will be freed after the configure_streams() call returns.
+ *
+ * The stream_list structure is owned by the framework, and may not be
+ * accessed once this call completes. The address of an individual
+ * camera3_stream_t structure will remain valid for access by the HAL until
+ * the end of the first configure_stream() call which no longer includes
+ * that camera3_stream_t in the stream_list argument. The HAL may not change
+ * values in the stream structure outside of the private pointer, except for
+ * the usage and max_buffers members during the configure_streams() call
+ * itself.
+ *
+ * If the stream is new, max_buffer, and private pointer fields of the
+ * stream structure will all be set to 0. The usage will be set to the
+ * consumer usage flags. The HAL device must set these fields before the
+ * configure_streams() call returns. These fields are then used by the
+ * framework and the platform gralloc module to allocate the gralloc
+ * buffers for each stream.
+ *
+ * Newly allocated buffers may be included in a capture request at any time
+ * by the framework. Once a gralloc buffer is returned to the framework
+ * with process_capture_result (and its respective release_fence has been
+ * signaled) the framework may free or reuse it at any time.
+ *
+ * ------------------------------------------------------------------------
+ *
* Preconditions:
*
* The framework will only call this method when no captures are being
@@ -1888,7 +2377,7 @@
* frame rate given the sizes and formats of the output streams, as
* documented in the camera device's static metadata.
*
- * Performance expectations:
+ * Performance requirements:
*
* This call is expected to be heavyweight and possibly take several hundred
* milliseconds to complete, since it may require resetting and
@@ -1898,6 +2387,9 @@
* application operational mode changes (such as switching from still
* capture to video recording).
*
+ * The HAL should return from this call in 500ms, and must return from this
+ * call in 1000ms.
+ *
* Return values:
*
* 0: On successful stream configuration
@@ -1933,6 +2425,12 @@
/**
* register_stream_buffers:
*
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * DEPRECATED. This will not be called and must be set to NULL.
+ *
+ * <= CAMERA_DEVICE_API_VERSION_3_1:
+ *
* Register buffers for a given stream with the HAL device. This method is
* called by the framework after a new stream is defined by
* configure_streams, and before buffers from that stream are included in a
@@ -1955,6 +2453,11 @@
* the camera HAL should inspect the passed-in buffers here to determine any
* platform-private pixel format information.
*
+ * Performance requirements:
+ *
+ * This should be a non-blocking call. The HAL should return from this call
+ * in 1ms, and must return from this call in 5ms.
+ *
* Return values:
*
* 0: On successful registration of the new stream buffers
@@ -1992,6 +2495,11 @@
* buffer may be returned for subsequent calls for the same template, or for
* other templates.
*
+ * Performance requirements:
+ *
+ * This should be a non-blocking call. The HAL should return from this call
+ * in 1ms, and must return from this call in 5ms.
+ *
* Return values:
*
* Valid metadata: On successful creation of a default settings
@@ -2036,6 +2544,22 @@
* framework will wait on the sync fence before refilling and reusing the
* input buffer.
*
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ *
+ * The input/output buffers provided by the framework in each request
+ * may be brand new (having never before seen by the HAL).
+ *
+ * ------------------------------------------------------------------------
+ * Performance considerations:
+ *
+ * Handling a new buffer should be extremely lightweight and there should be
+ * no frame rate degradation or frame jitter introduced.
+ *
+ * This call must return fast enough to ensure that the requested frame
+ * rate can be sustained, especially for streaming cases (post-processing
+ * quality settings set to FAST). The HAL should return this call in 1
+ * frame interval, and must return from this call in 4 frame intervals.
+ *
* Return values:
*
* 0: On a successful start to processing the capture request
@@ -2071,6 +2595,10 @@
* The definition of vendor_tag_query_ops_t can be found in
* system/media/camera/include/system/camera_metadata.h.
*
+ * >= CAMERA_DEVICE_API_VERSION_3_2:
+ * DEPRECATED. This function has been deprecated and should be set to
+ * NULL by the HAL. Please implement get_vendor_tag_ops in camera_common.h
+ * instead.
*/
void (*get_metadata_vendor_tag_ops)(const struct camera3_device*,
vendor_tag_query_ops_t* ops);
@@ -2084,6 +2612,14 @@
*
* The passed-in file descriptor can be used to write debugging text using
* dprintf() or write(). The text should be in ASCII encoding only.
+ *
+ * Performance requirements:
+ *
+ * This must be a non-blocking call. The HAL should return from this call
+ * in 1ms, must return from this call in 10ms. This call must avoid
+ * deadlocks, as it may be called at any point during camera operation.
+ * Any synchronization primitives used (such as mutex locks or semaphores)
+ * should be acquired with a timeout.
*/
void (*dump)(const struct camera3_device *, int fd);
@@ -2095,22 +2631,73 @@
* quickly as possible in order to prepare for a configure_streams() call.
*
* No buffers are required to be successfully returned, so every buffer
- * held at the time of flush() (whether sucessfully filled or not) may be
+ * held at the time of flush() (whether successfully filled or not) may be
* returned with CAMERA3_BUFFER_STATUS_ERROR. Note the HAL is still allowed
- * to return valid (STATUS_OK) buffers during this call, provided they are
- * succesfully filled.
+ * to return valid (CAMERA3_BUFFER_STATUS_OK) buffers during this call,
+ * provided they are successfully filled.
*
* All requests currently in the HAL are expected to be returned as soon as
* possible. Not-in-process requests should return errors immediately. Any
* interruptible hardware blocks should be stopped, and any uninterruptible
* blocks should be waited on.
*
+ * More specifically, the HAL must follow below requirements for various cases:
+ *
+ * 1. For captures that are too late for the HAL to cancel/stop, and will be
+ * completed normally by the HAL; i.e. the HAL can send shutter/notify and
+ * process_capture_result and buffers as normal.
+ *
+ * 2. For pending requests that have not done any processing, the HAL must call notify
+ * CAMERA3_MSG_ERROR_REQUEST, and return all the output buffers with
+ * process_capture_result in the error state (CAMERA3_BUFFER_STATUS_ERROR).
+ * The HAL must not place the release fence into an error state, instead,
+ * the release fences must be set to the acquire fences passed by the framework,
+ * or -1 if they have been waited on by the HAL already. This is also the path
+ * to follow for any captures for which the HAL already called notify() with
+ * CAMERA3_MSG_SHUTTER but won't be producing any metadata/valid buffers for.
+ * After CAMERA3_MSG_ERROR_REQUEST, for a given frame, only process_capture_results with
+ * buffers in CAMERA3_BUFFER_STATUS_ERROR are allowed. No further notifys or
+ * process_capture_result with non-null metadata is allowed.
+ *
+ * 3. For partially completed pending requests that will not have all the output
+ * buffers or perhaps missing metadata, the HAL should follow below:
+ *
+ * 3.1. Call notify with CAMERA3_MSG_ERROR_RESULT if some of the expected result
+ * metadata (i.e. one or more partial metadata) won't be available for the capture.
+ *
+ * 3.2. Call notify with CAMERA3_MSG_ERROR_BUFFER for every buffer that won't
+ * be produced for the capture.
+ *
+ * 3.3 Call notify with CAMERA3_MSG_SHUTTER with the capture timestamp before
+ * any buffers/metadata are returned with process_capture_result.
+ *
+ * 3.4 For captures that will produce some results, the HAL must not call
+ * CAMERA3_MSG_ERROR_REQUEST, since that indicates complete failure.
+ *
+ * 3.5. Valid buffers/metadata should be passed to the framework as normal.
+ *
+ * 3.6. Failed buffers should be returned to the framework as described for case 2.
+ * But failed buffers do not have to follow the strict ordering valid buffers do,
+ * and may be out-of-order with respect to valid buffers. For example, if buffers
+ * A, B, C, D, E are sent, D and E are failed, then A, E, B, D, C is an acceptable
+ * return order.
+ *
+ * 3.7. For fully-missing metadata, calling CAMERA3_MSG_ERROR_RESULT is sufficient, no
+ * need to call process_capture_result with NULL metadata or equivalent.
+ *
* flush() should only return when there are no more outstanding buffers or
- * requests left in the HAL. The framework may call configure_streams (as
+ * requests left in the HAL. The framework may call configure_streams (as
* the HAL state is now quiesced) or may issue new requests.
*
- * A flush() call should only take 100ms or less. The maximum time it can
- * take is 1 second.
+ * Note that it's sufficient to only support fully-succeeded and fully-failed result cases.
+ * However, it is highly desirable to support the partial failure cases as well, as it
+ * could help improve the flush call overall performance.
+ *
+ * Performance requirements:
+ *
+ * The HAL should return from this call in 100ms, and must return from this
+ * call in 1000ms. And this call must not be blocked longer than pipeline
+ * latency (see S7 for definition).
*
* Version information:
*
@@ -2141,6 +2728,13 @@
/**
* common.version must equal CAMERA_DEVICE_API_VERSION_3_0 to identify this
* device as implementing version 3.0 of the camera device HAL.
+ *
+ * Performance requirements:
+ *
+ * Camera open (common.module->common.methods->open) should return in 200ms, and must return
+ * in 500ms.
+ * Camera close (common.close) should return in 200ms, and must return in 500ms.
+ *
*/
hw_device_t common;
camera3_device_ops_t *ops;
diff --git a/include/hardware/camera_common.h b/include/hardware/camera_common.h
index 3a1233f..322ed93 100644
--- a/include/hardware/camera_common.h
+++ b/include/hardware/camera_common.h
@@ -24,6 +24,7 @@
#include <sys/types.h>
#include <cutils/native_handle.h>
#include <system/camera.h>
+#include <system/camera_vendor_tags.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
@@ -74,6 +75,16 @@
* This camera module version adds vendor tag support from the module, and
* deprecates the old vendor_tag_query_ops that were previously only
* accessible with a device open.
+ *
+ *******************************************************************************
+ * Version: 2.3 [CAMERA_MODULE_API_VERSION_2_3]
+ *
+ * This camera module version adds open legacy camera HAL device support.
+ * Framework can use it to open the camera device as lower device HAL version
+ * HAL device if the same device can support multiple device API versions.
+ * The standard hardware module open call (common.methods->open) continues
+ * to open the camera device with the latest supported version, which is
+ * also the version listed in camera_info_t.device_version.
*/
/**
@@ -88,8 +99,9 @@
#define CAMERA_MODULE_API_VERSION_2_0 HARDWARE_MODULE_API_VERSION(2, 0)
#define CAMERA_MODULE_API_VERSION_2_1 HARDWARE_MODULE_API_VERSION(2, 1)
#define CAMERA_MODULE_API_VERSION_2_2 HARDWARE_MODULE_API_VERSION(2, 2)
+#define CAMERA_MODULE_API_VERSION_2_3 HARDWARE_MODULE_API_VERSION(2, 3)
-#define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_2
+#define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_3
/**
* All device versions <= HARDWARE_DEVICE_API_VERSION(1, 0xFF) must be treated
@@ -100,8 +112,9 @@
#define CAMERA_DEVICE_API_VERSION_2_1 HARDWARE_DEVICE_API_VERSION(2, 1)
#define CAMERA_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0)
#define CAMERA_DEVICE_API_VERSION_3_1 HARDWARE_DEVICE_API_VERSION(3, 1)
+#define CAMERA_DEVICE_API_VERSION_3_2 HARDWARE_DEVICE_API_VERSION(3, 2)
-// Device version 2.x is outdated; device version 3.0 is experimental
+// Device version 2.x is outdated; device version 3.x is experimental
#define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_1_0
/**
@@ -251,66 +264,34 @@
} camera_module_callbacks_t;
-/**
- * Set up vendor-specific tag query methods. These are needed to properly query
- * entries with vendor-specified tags, potentially returned by get_camera_info.
- *
- * This should be used in place of vendor_tag_query_ops, which are deprecated.
- */
-typedef struct vendor_tag_ops vendor_tag_ops_t;
-struct vendor_tag_ops {
- /**
- * Get the number of vendor tags supported on this platform. Used to
- * calculate the size of buffer needed for holding the array of all tags
- * returned by get_all_tags().
- */
- int (*get_tag_count)(const vendor_tag_ops_t *v);
-
- /**
- * Fill an array with all the supported vendor tags on this platform.
- * get_tag_count() returns the number of tags supported, and
- * tag_array will be allocated with enough space to hold all of the tags.
- */
- void (*get_all_tags)(const vendor_tag_ops_t *v, uint32_t *tag_array);
-
- /**
- * Get vendor section name for a vendor-specified entry tag. Only called for
- * vendor-defined tags. The section name must start with the name of the
- * vendor in the Java package style. For example, CameraZoom Inc. must
- * prefix their sections with "com.camerazoom." Must return NULL if the tag
- * is outside the bounds of vendor-defined sections.
- *
- * There may be different vendor-defined tag sections, for example the
- * phone maker, the chipset maker, and the camera module maker may each
- * have their own "com.vendor."-prefixed section.
- *
- * The memory pointed to by the return value must remain valid for the
- * lifetime that the module is loaded, and is owned by the module.
- */
- const char *(*get_section_name)(const vendor_tag_ops_t *v, uint32_t tag);
-
- /**
- * Get tag name for a vendor-specified entry tag. Only called for
- * vendor-defined tags. Must return NULL if the it is not a vendor-defined
- * tag.
- *
- * The memory pointed to by the return value must remain valid for the
- * lifetime that the module is loaded, and is owned by the module.
- */
- const char *(*get_tag_name)(const vendor_tag_ops_t *v, uint32_t tag);
-
- /**
- * Get tag type for a vendor-specified entry tag. Only called for tags >=
- * 0x80000000. Must return -1 if the tag is outside the bounds of
- * vendor-defined sections.
- */
- int (*get_tag_type)(const vendor_tag_ops_t *v, uint32_t tag);
-
- /* reserved for future use */
- void* reserved[8];
-};
-
typedef struct camera_module {
+ /**
+ * Common methods of the camera module. This *must* be the first member of
+ * camera_module as users of this structure will cast a hw_module_t to
+ * camera_module pointer in contexts where it's known the hw_module_t
+ * references a camera_module.
+ *
+ * The return values for common.methods->open for camera_module are:
+ *
+ * 0: On a successful open of the camera device.
+ *
+ * -ENODEV: The camera device cannot be opened due to an internal
+ * error.
+ *
+ * -EINVAL: The input arguments are invalid, i.e. the id is invalid,
+ * and/or the module is invalid.
+ *
+ * -EBUSY: The camera device was already opened for this camera id
+ * (by using this method or open_legacy),
+ * regardless of the device HAL version it was opened as.
+ *
+ * -EUSERS: The maximal number of camera devices that can be
+ * opened concurrently were opened already, either by
+ * this method or the open_legacy method.
+ *
+ * All other return values from common.methods->open will be treated as
+ * -ENODEV.
+ */
hw_module_t common;
/**
@@ -333,6 +314,15 @@
* Return the static camera information for a given camera device. This
* information may not change for a camera device.
*
+ * Return values:
+ *
+ * 0: On a successful operation
+ *
+ * -ENODEV: The information cannot be provided due to an internal
+ * error.
+ *
+ * -EINVAL: The input arguments are invalid, i.e. the id is invalid,
+ * and/or the module is invalid.
*/
int (*get_camera_info)(int camera_id, struct camera_info *info);
@@ -355,6 +345,15 @@
*
* Valid to be called by the framework.
*
+ * Return values:
+ *
+ * 0: On a successful operation
+ *
+ * -ENODEV: The operation cannot be completed due to an internal
+ * error.
+ *
+ * -EINVAL: The input arguments are invalid, i.e. the callbacks are
+ * null
*/
int (*set_callbacks)(const camera_module_callbacks_t *callbacks);
@@ -365,6 +364,9 @@
* HAL should fill in all the vendor tag operation methods, or leave ops
* unchanged if no vendor tags are defined.
*
+ * The vendor_tag_ops structure used here is defined in:
+ * system/media/camera/include/system/vendor_tags.h
+ *
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_x/2_0/2_1:
@@ -375,8 +377,55 @@
*/
void (*get_vendor_tag_ops)(vendor_tag_ops_t* ops);
+ /**
+ * open_legacy:
+ *
+ * Open a specific legacy camera HAL device if multiple device HAL API
+ * versions are supported by this camera HAL module. For example, if the
+ * camera module supports both CAMERA_DEVICE_API_VERSION_1_0 and
+ * CAMERA_DEVICE_API_VERSION_3_2 device API for the same camera id,
+ * framework can call this function to open the camera device as
+ * CAMERA_DEVICE_API_VERSION_1_0 device.
+ *
+ * This is an optional method. A Camera HAL module does not need to support
+ * more than one device HAL version per device, and such modules may return
+ * -ENOSYS for all calls to this method. For all older HAL device API
+ * versions that are not supported, it may return -EOPNOTSUPP. When above
+ * cases occur, The normal open() method (common.methods->open) will be
+ * used by the framework instead.
+ *
+ * Version information (based on camera_module_t.common.module_api_version):
+ *
+ * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2:
+ * Not provided by HAL module. Framework will not call this function.
+ *
+ * CAMERA_MODULE_API_VERSION_2_3:
+ * Valid to be called by the framework.
+ *
+ * Return values:
+ *
+ * 0: On a successful open of the camera device.
+ *
+ * -ENOSYS This method is not supported.
+ *
+ * -EOPNOTSUPP: The requested HAL version is not supported by this method.
+ *
+ * -EINVAL: The input arguments are invalid, i.e. the id is invalid,
+ * and/or the module is invalid.
+ *
+ * -EBUSY: The camera device was already opened for this camera id
+ * (by using this method or common.methods->open method),
+ * regardless of the device HAL version it was opened as.
+ *
+ * -EUSERS: The maximal number of camera devices that can be
+ * opened concurrently were opened already, either by
+ * this method or common.methods->open method.
+ */
+ int (*open_legacy)(const struct hw_module_t* module, const char* id,
+ uint32_t halVersion, struct hw_device_t** device);
+
/* reserved for future use */
- void* reserved[8];
+ void* reserved[7];
} camera_module_t;
__END_DECLS
diff --git a/include/hardware/consumerir.h b/include/hardware/consumerir.h
index 5adf6be..15334c1 100644
--- a/include/hardware/consumerir.h
+++ b/include/hardware/consumerir.h
@@ -32,10 +32,22 @@
} consumerir_freq_range_t;
typedef struct consumerir_module {
+ /**
+ * Common methods of the consumer IR module. This *must* be the first member of
+ * consumerir_module as users of this structure will cast a hw_module_t to
+ * consumerir_module pointer in contexts where it's known the hw_module_t references a
+ * consumerir_module.
+ */
struct hw_module_t common;
} consumerir_module_t;
typedef struct consumerir_device {
+ /**
+ * Common methods of the consumer IR device. This *must* be the first member of
+ * consumerir_device as users of this structure will cast a hw_device_t to
+ * consumerir_device pointer in contexts where it's known the hw_device_t references a
+ * consumerir_device.
+ */
struct hw_device_t common;
/*
diff --git a/include/hardware/fb.h b/include/hardware/fb.h
index 135e4aa..9df9416 100644
--- a/include/hardware/fb.h
+++ b/include/hardware/fb.h
@@ -36,6 +36,12 @@
/*****************************************************************************/
typedef struct framebuffer_device_t {
+ /**
+ * Common methods of the framebuffer device. This *must* be the first member of
+ * framebuffer_device_t as users of this structure will cast a hw_device_t to
+ * framebuffer_device_t pointer in contexts where it's known the hw_device_t references a
+ * framebuffer_device_t.
+ */
struct hw_device_t common;
/* flags describing some attributes of the framebuffer */
diff --git a/include/hardware/fingerprint.h b/include/hardware/fingerprint.h
new file mode 100644
index 0000000..458ca2d
--- /dev/null
+++ b/include/hardware/fingerprint.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H
+#define ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H
+
+#define FINGERPRINT_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0)
+#define FINGERPRINT_HARDWARE_MODULE_ID "fingerprint"
+
+typedef enum fingerprint_msg_type {
+ FINGERPRINT_ERROR = -1,
+ FINGERPRINT_ACQUIRED = 1,
+ FINGERPRINT_PROCESSED = 2,
+ FINGERPRINT_TEMPLATE_ENROLLING = 3,
+ FINGERPRINT_TEMPLATE_REMOVED = 4
+} fingerprint_msg_type_t;
+
+typedef enum fingerprint_error {
+ FINGERPRINT_ERROR_HW_UNAVAILABLE = 1,
+ FINGERPRINT_ERROR_UNABLE_TO_PROCESS = 2,
+ FINGERPRINT_ERROR_TIMEOUT = 3,
+ FINGERPRINT_ERROR_NO_SPACE = 4 /* No space available to store a template */
+} fingerprint_error_t;
+
+typedef enum fingerprint_acquired_info {
+ FINGERPRINT_ACQUIRED_GOOD = 0,
+ FINGERPRINT_ACQUIRED_PARTIAL = 1,
+ FINGERPRINT_ACQUIRED_INSUFFICIENT = 2,
+ FINGERPRINT_ACQUIRED_IMAGER_DIRTY = 4,
+ FINGERPRINT_ACQUIRED_TOO_SLOW = 8,
+ FINGERPRINT_ACQUIRED_TOO_FAST = 16
+} fingerprint_acquired_info_t;
+
+typedef struct fingerprint_enroll {
+ uint32_t id;
+ /* samples_remaining goes from N (no data collected, but N scans needed)
+ * to 0 (no more data is needed to build a template).
+ * The progress indication may be augmented by a bitmap encoded indication
+ * of finger area that needs to be presented by the user.
+ * Bit numbers mapped to physical location:
+ *
+ * distal
+ * +-+-+-+
+ * |2|1|0|
+ * |5|4|3|
+ * medial |8|7|6| lateral
+ * |b|a|9|
+ * |e|d|c|
+ * +-+-+-+
+ * proximal
+ *
+ */
+ uint16_t data_collected_bmp;
+ uint16_t samples_remaining;
+} fingerprint_enroll_t;
+
+typedef struct fingerprint_removed {
+ uint32_t id;
+} fingerprint_removed_t;
+
+typedef struct fingerprint_acquired {
+ fingerprint_acquired_info_t acquired_info; /* information about the image */
+} fingerprint_acquired_t;
+
+typedef struct fingerprint_processed {
+ uint32_t id; /* 0 is a special id and means no match */
+} fingerprint_processed_t;
+
+typedef struct fingerprint_msg {
+ fingerprint_msg_type_t type;
+ union {
+ uint64_t raw;
+ fingerprint_error_t error;
+ fingerprint_enroll_t enroll;
+ fingerprint_removed_t removed;
+ fingerprint_acquired_t acquired;
+ fingerprint_processed_t processed;
+ } data;
+} fingerprint_msg_t;
+
+/* Callback function type */
+typedef void (*fingerprint_notify_t)(fingerprint_msg_t msg);
+
+/* Synchronous operation */
+typedef struct fingerprint_device {
+ /**
+ * Common methods of the fingerprint device. This *must* be the first member
+ * of fingerprint_device as users of this structure will cast a hw_device_t
+ * to fingerprint_device pointer in contexts where it's known
+ * the hw_device_t references a fingerprint_device.
+ */
+ struct hw_device_t common;
+
+ /*
+ * Fingerprint enroll request:
+ * Switches the HAL state machine to collect and store a new fingerprint
+ * template. Switches back as soon as enroll is complete
+ * (fingerprint_msg.type == FINGERPRINT_TEMPLATE_ENROLLING &&
+ * fingerprint_msg.data.enroll.samples_remaining == 0)
+ * or after timeout_sec seconds.
+ *
+ * Function return: 0 if enrollment process can be successfully started
+ * -1 otherwise. A notify() function may be called
+ * indicating the error condition.
+ */
+ int (*enroll)(struct fingerprint_device *dev, uint32_t timeout_sec);
+
+ /*
+ * Cancel fingerprint enroll request:
+ * Switches the HAL state machine back to accept a fingerprint scan mode.
+ * (fingerprint_msg.type == FINGERPRINT_TEMPLATE_ENROLLING &&
+ * fingerprint_msg.data.enroll.samples_remaining == 0)
+ * will indicate switch back to the scan mode.
+ *
+ * Function return: 0 if cancel request is accepted
+ * -1 otherwise.
+ */
+ int (*enroll_cancel)(struct fingerprint_device *dev);
+
+ /*
+ * Fingerprint remove request:
+ * deletes a fingerprint template.
+ * If the fingerprint id is 0 the entire template database will be removed.
+ * notify() will be called for each template deleted with
+ * fingerprint_msg.type == FINGERPRINT_TEMPLATE_REMOVED and
+ * fingerprint_msg.data.removed.id indicating each template id removed.
+ *
+ * Function return: 0 if fingerprint template(s) can be successfully deleted
+ * -1 otherwise.
+ */
+ int (*remove)(struct fingerprint_device *dev, uint32_t fingerprint_id);
+
+ /*
+ * Set notification callback:
+ * Registers a user function that would receive notifications from the HAL
+ * The call will block if the HAL state machine is in busy state until HAL
+ * leaves the busy state.
+ *
+ * Function return: 0 if callback function is successfuly registered
+ * -1 otherwise.
+ */
+ int (*set_notify)(struct fingerprint_device *dev,
+ fingerprint_notify_t notify);
+
+ /*
+ * Client provided callback function to receive notifications.
+ * Do not set by hand, use the function above instead.
+ */
+ fingerprint_notify_t notify;
+
+ /* Reserved for future use. Must be NULL. */
+ void* reserved[8 - 4];
+} fingerprint_device_t;
+
+typedef struct fingerprint_module {
+ /**
+ * Common methods of the fingerprint module. This *must* be the first member
+ * of fingerprint_module as users of this structure will cast a hw_module_t
+ * to fingerprint_module pointer in contexts where it's known
+ * the hw_module_t references a fingerprint_module.
+ */
+ struct hw_module_t common;
+} fingerprint_module_t;
+
+#endif /* ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H */
diff --git a/include/hardware/gps.h b/include/hardware/gps.h
index 458b5b4..eb44d5c 100644
--- a/include/hardware/gps.h
+++ b/include/hardware/gps.h
@@ -21,6 +21,8 @@
#include <sys/cdefs.h>
#include <sys/types.h>
#include <pthread.h>
+#include <sys/socket.h>
+#include <stdbool.h>
#include <hardware/hardware.h>
@@ -38,6 +40,9 @@
/** Maximum number of SVs for gps_sv_status_callback(). */
#define GPS_MAX_SVS 32
+/** Maximum number of Measurements in gps_measurement_callback(). */
+#define GPS_MAX_MEASUREMENT 32
+
/** Requested operational mode for GPS operation. */
typedef uint32_t GpsPositionMode;
// IMPORTANT: Note that the following values must match
@@ -105,6 +110,10 @@
#define GPS_CAPABILITY_ON_DEMAND_TIME 0x0000010
/** GPS supports Geofencing */
#define GPS_CAPABILITY_GEOFENCING 0x0000020
+/** GPS supports Measurements */
+#define GPS_CAPABILITY_MEASUREMENTS 0x0000040
+/** GPS supports Navigation Messages */
+#define GPS_CAPABILITY_NAV_MESSAGES 0x0000080
/** Flags used to specify which aiding data to delete
when calling delete_aiding_data(). */
@@ -135,6 +144,12 @@
#define AGPS_SETID_TYPE_IMSI 1
#define AGPS_SETID_TYPE_MSISDN 2
+typedef uint16_t ApnIpType;
+#define APN_IP_INVALID 0
+#define APN_IP_IPV4 1
+#define APN_IP_IPV6 2
+#define APN_IP_IPV4V6 3
+
/**
* String length constants
*/
@@ -206,6 +221,137 @@
#define AGPS_RIL_NETWORK_TTYPE_WIMAX 6
/**
+ * Flags to indicate what fields in GpsClock are valid.
+ */
+typedef uint16_t GpsClockFlags;
+/** A valid 'leap second' is stored in the data structure. */
+#define GPS_CLOCK_HAS_LEAP_SECOND (1<<0)
+/** A valid 'time uncertainty' is stored in the data structure. */
+#define GPS_CLOCK_HAS_TIME_UNCERTAINTY (1<<1)
+/** A valid 'full bias' is stored in the data structure. */
+#define GPS_CLOCK_HAS_FULL_BIAS (1<<2)
+/** A valid 'bias' is stored in the data structure. */
+#define GPS_CLOCK_HAS_BIAS (1<<3)
+/** A valid 'bias uncertainty' is stored in the data structure. */
+#define GPS_CLOCK_HAS_BIAS_UNCERTAINTY (1<<4)
+/** A valid 'drift' is stored in the data structure. */
+#define GPS_CLOCK_HAS_DRIFT (1<<5)
+/** A valid 'drift uncertainty' is stored in the data structure. */
+#define GPS_CLOCK_HAS_DRIFT_UNCERTAINTY (1<<6)
+
+/**
+ * Enumeration of the available values for the GPS Clock type.
+ */
+typedef uint8_t GpsClockType;
+/** The type is not available ot it is unknown. */
+#define GPS_CLOCK_TYPE_UNKNOWN 0
+/** The source of the time value reported by GPS clock is the local hardware clock. */
+#define GPS_CLOCK_TYPE_LOCAL_HW_TIME 1
+/**
+ * The source of the time value reported by GPS clock is the GPS time derived from satellites
+ * (epoch = Jan 6, 1980)
+ */
+#define GPS_CLOCK_TYPE_GPS_TIME 2
+
+/**
+ * Flags to indicate what fields in GpsMeasurement are valid.
+ */
+typedef uint32_t GpsMeasurementFlags;
+/** A valid 'snr' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_SNR (1<<0)
+/** A valid 'elevation' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_ELEVATION (1<<1)
+/** A valid 'elevation uncertainty' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_ELEVATION_UNCERTAINTY (1<<2)
+/** A valid 'azimuth' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_AZIMUTH (1<<3)
+/** A valid 'azimuth uncertainty' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_AZIMUTH_UNCERTAINTY (1<<4)
+/** A valid 'pseudorange' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_PSEUDORANGE (1<<5)
+/** A valid 'pseudorange uncertainty' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_PSEUDORANGE_UNCERTAINTY (1<<6)
+/** A valid 'code phase' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_CODE_PHASE (1<<7)
+/** A valid 'code phase uncertainty' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_CODE_PHASE_UNCERTAINTY (1<<8)
+/** A valid 'carrier frequency' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_CARRIER_FREQUENCY (1<<9)
+/** A valid 'carrier cycles' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_CARRIER_CYCLES (1<<10)
+/** A valid 'carrier phase' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_CARRIER_PHASE (1<<11)
+/** A valid 'carrier phase uncertainty' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_CARRIER_PHASE_UNCERTAINTY (1<<12)
+/** A valid 'bit number' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_BIT_NUMBER (1<<13)
+/** A valid 'time from last bit' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_TIME_FROM_LAST_BIT (1<<14)
+/** A valid 'doppler shift' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_DOPPLER_SHIFT (1<<15)
+/** A valid 'doppler shift uncertainty' is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_DOPPLER_SHIFT_UNCERTAINTY (1<<16)
+/** A valid 'used in fix' flag is stored in the data structure. */
+#define GPS_MEASUREMENT_HAS_USED_IN_FIX (1<<17)
+
+/**
+ * Enumeration of the available values for the GPS Measurement's loss of lock.
+ */
+typedef uint8_t GpsLossOfLock;
+/** The indicator is not available or it is unknown. */
+#define GPS_LOSS_OF_LOCK_UNKNOWN 0
+/** The measurement does not present any indication of loss of lock. */
+#define GPS_LOSS_OF_LOCK_OK 1
+/** Loss of lock between previous and current observation: cycle slip possible. */
+#define GPS_LOSS_OF_LOCK_CYCLE_SLIP 2
+
+/**
+ * Enumeration of available values for the GPS Measurement's multipath indicator.
+ */
+typedef uint8_t GpsMultipathIndicator;
+/** The indicator is not available or unknown. */
+#define GPS_MULTIPATH_INDICATOR_UNKNOWN 0
+/** The measurement has been indicated to use multipath. */
+#define GPS_MULTIPATH_INDICATOR_DETECTED 1
+/** The measurement has been indicated Not to use multipath. */
+#define GPS_MULTIPATH_INDICATOR_NOT_USED 2
+
+/**
+ * Flags indicating the GPS measurement state.
+ */
+typedef uint16_t GpsMeasurementState;
+#define GPS_MEASUREMENT_STATE_UNKNOWN 0
+#define GPS_MEASUREMENT_STATE_CODE_LOCK (1<<0)
+#define GPS_MEASUREMENT_STATE_BIT_SYNC (1<<1)
+#define GPS_MEASUREMENT_STATE_SUBFRAME_SYNC (1<<2)
+#define GPS_MEASUREMENT_STATE_TOW_DECODED (1<<3)
+
+/**
+ * Flags indicating the Accumulated Delta Range's states.
+ */
+typedef uint16_t GpsAccumulatedDeltaRangeState;
+#define GPS_ADR_STATE_UNKNOWN 0
+#define GPS_ADR_STATE_VALID (1<<0)
+#define GPS_ADR_STATE_RESET (1<<1)
+#define GPS_ADR_STATE_CYCLE_SLIP (1<<2)
+
+/**
+ * Enumeration of available values to indicate the available GPS Natigation message types.
+ */
+typedef uint8_t GpsNavigationMessageType;
+/** The message type is unknown. */
+#define GPS_NAVIGATION_MESSAGE_TYPE_UNKNOWN 0
+/** L1 C/A message contained in the structure. */
+#define GPS_NAVIGATION_MESSAGE_TYPE_L1CA 1
+/** L2-CNAV message contained in the structure. */
+#define GPS_NAVIGATION_MESSAGE_TYPE_L2CNAV 2
+/** L5-CNAV message contained in the structure. */
+#define GPS_NAVIGATION_MESSAGE_TYPE_L5CNAV 3
+/** CNAV-2 message contained in the structure. */
+#define GPS_NAVIGATION_MESSAGE_TYPE_CNAV2 4
+
+
+/**
* Name for the GPS XTRA interface.
*/
#define GPS_XTRA_INTERFACE "gps-xtra"
@@ -221,6 +367,11 @@
#define AGPS_INTERFACE "agps"
/**
+ * Name of the Supl Certificate interface.
+ */
+#define SUPL_CERTIFICATE_INTERFACE "supl-certificate"
+
+/**
* Name for NI interface
*/
#define GPS_NI_INTERFACE "gps-ni"
@@ -235,6 +386,16 @@
*/
#define GPS_GEOFENCING_INTERFACE "gps_geofencing"
+/**
+ * Name of the GPS Measurements interface.
+ */
+#define GPS_MEASUREMENT_INTERFACE "gps_measurement"
+
+/**
+ * Name of the GPS navigation message interface.
+ */
+ #define GPS_NAVIGATION_MESSAGE_INTERFACE "gps_navigation_message"
+
/** Represents a location. */
typedef struct {
@@ -308,6 +469,7 @@
uint32_t used_in_fix_mask;
} GpsSvStatus;
+
/* 2G and 3G */
/* In 3G lac is discarded */
typedef struct {
@@ -341,8 +503,9 @@
*/
typedef void (* gps_status_callback)(GpsStatus* status);
-/** Callback with SV status information.
- * Can only be called from a thread created by create_thread_cb.
+/**
+ * Callback with SV status information.
+ * Can only be called from a thread created by create_thread_cb.
*/
typedef void (* gps_sv_status_callback)(GpsSvStatus* sv_info);
@@ -394,7 +557,7 @@
size_t size;
/**
* Opens the interface and provides the callback routines
- * to the implemenation of this interface.
+ * to the implementation of this interface.
*/
int (*init)( GpsCallbacks* callbacks );
@@ -455,7 +618,7 @@
size_t size;
/**
* Opens the XTRA interface and provides the callback routines
- * to the implemenation of this interface.
+ * to the implementation of this interface.
*/
int (*init)( GpsXtraCallbacks* callbacks );
/** Injects XTRA data into the GPS. */
@@ -476,13 +639,45 @@
/** Represents the status of AGPS. */
typedef struct {
- /** set to sizeof(AGpsStatus) */
+ /** set to sizeof(AGpsStatus_v1) */
+ size_t size;
+
+ AGpsType type;
+ AGpsStatusValue status;
+} AGpsStatus_v1;
+
+/** Represents the status of AGPS augmented with a IPv4 address field. */
+typedef struct {
+ /** set to sizeof(AGpsStatus_v2) */
size_t size;
AGpsType type;
AGpsStatusValue status;
uint32_t ipaddr;
-} AGpsStatus;
+} AGpsStatus_v2;
+
+/* Represents the status of AGPS augmented to support IPv4 and IPv6. */
+typedef struct {
+ /** set to sizeof(AGpsStatus_v3) */
+ size_t size;
+
+ AGpsType type;
+ AGpsStatusValue status;
+
+ /**
+ * Must be set to a valid IPv4 address if the field 'addr' contains an IPv4
+ * address, or set to INADDR_NONE otherwise.
+ */
+ uint32_t ipaddr;
+
+ /**
+ * Must contain the IPv4 (AF_INET) or IPv6 (AF_INET6) address to report.
+ * Any other value of addr.ss_family will be rejected.
+ * */
+ struct sockaddr_storage addr;
+} AGpsStatus_v3;
+
+typedef AGpsStatus_v3 AGpsStatus;
/** Callback with AGPS status information.
* Can only be called from a thread created by create_thread_cb.
@@ -498,16 +693,16 @@
/** Extended interface for AGPS support. */
typedef struct {
- /** set to sizeof(AGpsInterface) */
+ /** set to sizeof(AGpsInterface_v1) */
size_t size;
/**
* Opens the AGPS interface and provides the callback routines
- * to the implemenation of this interface.
+ * to the implementation of this interface.
*/
void (*init)( AGpsCallbacks* callbacks );
/**
- * Notifies that a data connection is available and sets
+ * Notifies that a data connection is available and sets
* the name of the APN to be used for SUPL.
*/
int (*data_conn_open)( const char* apn );
@@ -516,15 +711,124 @@
*/
int (*data_conn_closed)();
/**
- * Notifies that a data connection is not available for AGPS.
+ * Notifies that a data connection is not available for AGPS.
*/
int (*data_conn_failed)();
/**
* Sets the hostname and port for the AGPS server.
*/
int (*set_server)( AGpsType type, const char* hostname, int port );
-} AGpsInterface;
+} AGpsInterface_v1;
+/**
+ * Extended interface for AGPS support, it is augmented to enable to pass
+ * extra APN data.
+ */
+typedef struct {
+ /** set to sizeof(AGpsInterface_v2) */
+ size_t size;
+
+ /**
+ * Opens the AGPS interface and provides the callback routines to the
+ * implementation of this interface.
+ */
+ void (*init)(AGpsCallbacks* callbacks);
+ /**
+ * Deprecated.
+ * If the HAL supports AGpsInterface_v2 this API will not be used, see
+ * data_conn_open_with_apn_ip_type for more information.
+ */
+ int (*data_conn_open)(const char* apn);
+ /**
+ * Notifies that the AGPS data connection has been closed.
+ */
+ int (*data_conn_closed)();
+ /**
+ * Notifies that a data connection is not available for AGPS.
+ */
+ int (*data_conn_failed)();
+ /**
+ * Sets the hostname and port for the AGPS server.
+ */
+ int (*set_server)(AGpsType type, const char* hostname, int port);
+
+ /**
+ * Notifies that a data connection is available and sets the name of the
+ * APN, and its IP type, to be used for SUPL connections.
+ */
+ int (*data_conn_open_with_apn_ip_type)(
+ const char* apn,
+ ApnIpType apnIpType);
+} AGpsInterface_v2;
+
+typedef AGpsInterface_v2 AGpsInterface;
+
+/** Error codes associated with certificate operations */
+#define AGPS_CERTIFICATE_OPERATION_SUCCESS 0
+#define AGPS_CERTIFICATE_ERROR_GENERIC -100
+#define AGPS_CERTIFICATE_ERROR_TOO_MANY_CERTIFICATES -101
+
+/** A data structure that represents an X.509 certificate using DER encoding */
+typedef struct {
+ size_t length;
+ u_char* data;
+} DerEncodedCertificate;
+
+/**
+ * A type definition for SHA1 Fingerprints used to identify X.509 Certificates
+ * The Fingerprint is a digest of the DER Certificate that uniquely identifies it.
+ */
+typedef struct {
+ u_char data[20];
+} Sha1CertificateFingerprint;
+
+/** AGPS Interface to handle SUPL certificate operations */
+typedef struct {
+ /** set to sizeof(SuplCertificateInterface) */
+ size_t size;
+
+ /**
+ * Installs a set of Certificates used for SUPL connections to the AGPS server.
+ * If needed the HAL should find out internally any certificates that need to be removed to
+ * accommodate the certificates to install.
+ * The certificates installed represent a full set of valid certificates needed to connect to
+ * AGPS SUPL servers.
+ * The list of certificates is required, and all must be available at the same time, when trying
+ * to establish a connection with the AGPS Server.
+ *
+ * Parameters:
+ * certificates - A pointer to an array of DER encoded certificates that are need to be
+ * installed in the HAL.
+ * length - The number of certificates to install.
+ * Returns:
+ * AGPS_CERTIFICATE_OPERATION_SUCCESS if the operation is completed successfully
+ * AGPS_CERTIFICATE_ERROR_TOO_MANY_CERTIFICATES if the HAL cannot store the number of
+ * certificates attempted to be installed, the state of the certificates stored should
+ * remain the same as before on this error case.
+ *
+ * IMPORTANT:
+ * If needed the HAL should find out internally the set of certificates that need to be
+ * removed to accommodate the certificates to install.
+ */
+ int (*install_certificates) ( const DerEncodedCertificate* certificates, size_t length );
+
+ /**
+ * Notifies the HAL that a list of certificates used for SUPL connections are revoked. It is
+ * expected that the given set of certificates is removed from the internal store of the HAL.
+ *
+ * Parameters:
+ * fingerprints - A pointer to an array of SHA1 Fingerprints to identify the set of
+ * certificates to revoke.
+ * length - The number of fingerprints provided.
+ * Returns:
+ * AGPS_CERTIFICATE_OPERATION_SUCCESS if the operation is completed successfully.
+ *
+ * IMPORTANT:
+ * If any of the certificates provided (through its fingerprint) is not known by the HAL,
+ * it should be ignored and continue revoking/deleting the rest of them.
+ */
+ int (*revoke_certificates) ( const Sha1CertificateFingerprint* fingerprints, size_t length );
+} SuplCertificateInterface;
/** Represents an NI request */
typedef struct {
@@ -650,7 +954,7 @@
size_t size;
/**
* Opens the AGPS interface and provides the callback routines
- * to the implemenation of this interface.
+ * to the implementation of this interface.
*/
void (*init)( AGpsRilCallbacks* callbacks );
@@ -789,7 +1093,7 @@
int32_t transition, GpsUtcTime timestamp);
/**
- * The callback associated with the availablity of the GPS system for geofencing
+ * The callback associated with the availability of the GPS system for geofencing
* monitoring. If the GPS system determines that it cannot monitor geofences
* because of lack of reliability or unavailability of the GPS signals, it will
* call this callback with GPS_GEOFENCE_UNAVAILABLE parameter.
@@ -867,7 +1171,7 @@
/**
* Opens the geofence interface and provides the callback routines
- * to the implemenation of this interface.
+ * to the implementation of this interface.
*/
void (*init)( GpsGeofenceCallbacks* callbacks );
@@ -897,14 +1201,12 @@
* sampling the GPS for power-saving reasons; thus the rate of
* sampling may be faster or slower than this.
* unknown_timer_ms - The time limit after which the UNCERTAIN transition
- * should be triggered. This paramter is defined in milliseconds.
+ * should be triggered. This parameter is defined in milliseconds.
* See above for a detailed explanation.
*/
- void (*add_geofence_area) (int32_t geofence_id, double latitude,
- double longitude, double radius_meters,
- int last_transition, int monitor_transitions,
- int notification_responsiveness_ms,
- int unknown_timer_ms);
+ void (*add_geofence_area) (int32_t geofence_id, double latitude, double longitude,
+ double radius_meters, int last_transition, int monitor_transitions,
+ int notification_responsiveness_ms, int unknown_timer_ms);
/**
* Pause monitoring a particular geofence.
@@ -933,6 +1235,531 @@
*/
void (*remove_geofence_area) (int32_t geofence_id);
} GpsGeofencingInterface;
+
+
+/**
+ * Represents an estimate of the GPS clock time.
+ */
+typedef struct {
+ /** set to sizeof(GpsClock) */
+ size_t size;
+
+ /** A set of flags indicating the validity of the fields in this data structure. */
+ GpsClockFlags flags;
+
+ /**
+ * Leap second data.
+ * The sign of the value is defined by the following equation:
+ * utc_time_ns = time_ns + (full_bias_ns + bias_ns) - leap_second * 1,000,000,000
+ *
+ * If the data is available 'flags' must contain GPS_CLOCK_HAS_LEAP_SECOND.
+ */
+ int16_t leap_second;
+
+ /**
+ * Indicates the type of time reported by the 'time_ns' field.
+ * This is a Mandatory field.
+ */
+ GpsClockType type;
+
+ /**
+ * The GPS receiver internal clock value. This can be either the local hardware clock value
+ * (GPS_CLOCK_TYPE_LOCAL_HW_TIME), or the current GPS time derived inside GPS receiver
+ * (GPS_CLOCK_TYPE_GPS_TIME). The field 'type' defines the time reported.
+ *
+ * For local hardware clock, this value is expected to be monotonically increasing during
+ * the reporting session. The real GPS time can be derived by compensating the 'full bias'
+ * (when it is available) from this value.
+ *
+ * For GPS time, this value is expected to be the best estimation of current GPS time that GPS
+ * receiver can achieve. Set the 'time uncertainty' appropriately when GPS time is specified.
+ *
+ * Sub-nanosecond accuracy can be provided by means of the 'bias' field.
+ * The value contains the 'time uncertainty' in it.
+ *
+ * This is a Mandatory field.
+ */
+ int64_t time_ns;
+
+ /**
+ * 1-Sigma uncertainty associated with the clock's time in nanoseconds.
+ * The uncertainty is represented as an absolute (single sided) value.
+ *
+ * This value should be set if GPS_CLOCK_TYPE_GPS_TIME is set.
+ * If the data is available 'flags' must contain GPS_CLOCK_HAS_TIME_UNCERTAINTY.
+ */
+ double time_uncertainty_ns;
+
+ /**
+ * The difference between hardware clock ('time' field) inside GPS receiver and the true GPS
+ * time since 0000Z, January 6, 1980, in nanoseconds.
+ * This value is used if and only if GPS_CLOCK_TYPE_LOCAL_HW_TIME is set, and GPS receiver
+ * has solved the clock for GPS time.
+ * The caller is responsible for using the 'bias uncertainty' field for quality check.
+ *
+ * The sign of the value is defined by the following equation:
+ * true time (GPS time) = time_ns + (full_bias_ns + bias_ns)
+ *
+ * This value contains the 'bias uncertainty' in it.
+ * If the data is available 'flags' must contain GPS_CLOCK_HAS_FULL_BIAS.
+
+ */
+ int64_t full_bias_ns;
+
+ /**
+ * Sub-nanosecond bias.
+ * The value contains the 'bias uncertainty' in it.
+ *
+ * If the data is available 'flags' must contain GPS_CLOCK_HAS_BIAS.
+ */
+ double bias_ns;
+
+ /**
+ * 1-Sigma uncertainty associated with the clock's bias in nanoseconds.
+ * The uncertainty is represented as an absolute (single sided) value.
+ *
+ * If the data is available 'flags' must contain GPS_CLOCK_HAS_BIAS_UNCERTAINTY.
+ */
+ double bias_uncertainty_ns;
+
+ /**
+ * The clock's drift in nanoseconds (per second).
+ * A positive value means that the frequency is higher than the nominal frequency.
+ *
+ * The value contains the 'drift uncertainty' in it.
+ * If the data is available 'flags' must contain GPS_CLOCK_HAS_DRIFT.
+ */
+ double drift_nsps;
+
+ /**
+ * 1-Sigma uncertainty associated with the clock's drift in nanoseconds (per second).
+ * The uncertainty is represented as an absolute (single sided) value.
+ *
+ * If the data is available 'flags' must contain GPS_CLOCK_HAS_DRIFT_UNCERTAINTY.
+ */
+ double drift_uncertainty_nsps;
+} GpsClock;
+
+/**
+ * Represents a GPS Measurement, it contains raw and computed information.
+ */
+typedef struct {
+ /** set to sizeof(GpsMeasurement) */
+ size_t size;
+
+ /** A set of flags indicating the validity of the fields in this data structure. */
+ GpsMeasurementFlags flags;
+
+ /**
+ * Pseudo-random number in the range of [1, 32]
+ * This is a Mandatory value.
+ */
+ int8_t prn;
+
+ /**
+ * Time offset at which the measurement was taken in nanoseconds.
+ * The reference receiver's time is specified by GpsData::clock::time_ns and should be
+ * interpreted in the same way as indicated by GpsClock::type.
+ *
+ * The sign of time_offset_ns is given by the following equation:
+ * measurement time = GpsClock::time_ns + time_offset_ns
+ *
+ * It provides an individual time-stamp for the measurement, and allows sub-nanosecond accuracy.
+ * This is a Mandatory value.
+ */
+ double time_offset_ns;
+
+ /**
+ * Per satellite sync state. It represents the current sync state for the associated satellite.
+ * Based on the sync state, the 'received GPS tow' field should be interpreted accordingly.
+ *
+ * This is a Mandatory value.
+ */
+ GpsMeasurementState state;
+
+ /**
+ * Received GPS Time-of-Week at the measurement time, in nanoseconds.
+ * The value is relative to the beginning of the current GPS week.
+ *
+ * Given the sync state of GPS receiver, per each satellite, valid range for this field can be:
+ * Searching : [ 0 ] : GPS_MEASUREMENT_STATE_UNKNOWN
+ * Ranging code lock : [ 0 1ms ] : GPS_MEASUREMENT_STATE_CODE_LOCK is set
+ * Bit sync : [ 0 20ms ] : GPS_MEASUREMENT_STATE_BIT_SYNC is set
+ * Subframe sync : [ 0 6ms ] : GPS_MEASUREMENT_STATE_SUBFRAME_SYNC is set
+ * TOW decoded : [ 0 1week ] : GPS_MEASUREMENT_STATE_TOW_DECODED is set
+ */
+ int64_t received_gps_tow_ns;
+
+ /**
+ * 1-Sigma uncertainty of the Received GPS Time-of-Week in nanoseconds.
+ */
+ int64_t received_gps_tow_uncertainty_ns;
+
+ /**
+ * Carrier-to-noise density in dB-Hz, in the range [0, 63].
+ * It contains the measured C/N0 value for the signal at the antenna input.
+ *
+ * This is a Mandatory value.
+ */
+ double c_n0_dbhz;
+
+ /**
+ * Pseudorange rate at the timestamp in m/s.
+ * The value also includes the effects of the receiver clock frequency and satellite clock
+ * frequency errors.
+ *
+ * The value includes the 'pseudorange rate uncertainty' in it.
+ * A positive value indicates that the pseudorange is getting larger.
+ *
+ * This is a Mandatory value.
+ */
+ double pseudorange_rate_mps;
+
+ /**
+ * 1-Sigma uncertainty of the pseudurange rate in m/s.
+ * The uncertainty is represented as an absolute (single sided) value.
+ *
+ * This is a Mandatory value.
+ */
+ double pseudorange_rate_uncertainty_mps;
+
+ /**
+ * Accumulated delta range's state. It indicates whether ADR is reset or there is a cycle slip
+ * (indicating loss of lock).
+ *
+ * This is a Mandatory value.
+ */
+ GpsAccumulatedDeltaRangeState accumulated_delta_range_state;
+
+ /**
+ * Accumulated delta range since the last channel reset in meters.
+ * The data is available if 'accumulated delta range state' != GPS_ADR_STATE_UNKNOWN.
+ */
+ double accumulated_delta_range_m;
+
+ /**
+ * 1-Sigma uncertainty of the accumulated delta range in meters.
+ * The data is available if 'accumulated delta range state' != GPS_ADR_STATE_UNKNOWN.
+ */
+ double accumulated_delta_range_uncertainty_m;
+
+ /**
+ * Best derived Pseudorange by the chip-set, in meters.
+ * The value contains the 'pseudorange uncertainty' in it.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_PSEUDORANGE.
+ */
+ double pseudorange_m;
+
+ /**
+ * 1-Sigma uncertainty of the pseudorange in meters.
+ * The value contains the 'pseudorange' and 'clock' uncertainty in it.
+ * The uncertainty is represented as an absolute (single sided) value.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_PSEUDORANGE_UNCERTAINTY.
+ */
+ double pseudorange_uncertainty_m;
+
+ /**
+ * A fraction of the current C/A code cycle, in the range [0.0, 1023.0]
+ * This value contains the time (in Chip units) since the last C/A code cycle (GPS Msec epoch).
+ *
+ * The reference frequency is given by the field 'carrier_frequency_hz'.
+ * The value contains the 'code-phase uncertainty' in it.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_CODE_PHASE.
+ */
+ double code_phase_chips;
+
+ /**
+ * 1-Sigma uncertainty of the code-phase, in a fraction of chips.
+ * The uncertainty is represented as an absolute (single sided) value.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_CODE_PHASE_UNCERTAINTY.
+ */
+ double code_phase_uncertainty_chips;
+
+ /**
+ * Carrier frequency at which codes and messages are modulated, it can be L1 or L2.
+ * If the field is not set, the carrier frequency is assumed to be L1.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_CARRIER_FREQUENCY.
+ */
+ float carrier_frequency_hz;
+
+ /**
+ * The number of full carrier cycles between the satellite and the receiver.
+ * The reference frequency is given by the field 'carrier_frequency_hz'.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_CARRIER_CYCLES.
+ */
+ int64_t carrier_cycles;
+
+ /**
+ * The RF phase detected by the receiver, in the range [0.0, 1.0].
+ * This is usually the fractional part of the complete carrier phase measurement.
+ *
+ * The reference frequency is given by the field 'carrier_frequency_hz'.
+ * The value contains the 'carrier-phase uncertainty' in it.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_CARRIER_PHASE.
+ */
+ double carrier_phase;
+
+ /**
+ * 1-Sigma uncertainty of the carrier-phase.
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_CARRIER_PHASE_UNCERTAINTY.
+ */
+ double carrier_phase_uncertainty;
+
+ /**
+ * An enumeration that indicates the 'loss of lock' state of the event.
+ */
+ GpsLossOfLock loss_of_lock;
+
+ /**
+ * The number of GPS bits transmitted since Sat-Sun midnight (GPS week).
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_BIT_NUMBER.
+ */
+ int16_t bit_number;
+
+ /**
+ * The elapsed time since the last received bit in milliseconds, in the range [0, 20]
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_TIME_FROM_LAST_BIT.
+ */
+ int16_t time_from_last_bit_ms;
+
+ /**
+ * Doppler shift in Hz.
+ * A positive value indicates that the SV is moving toward the receiver.
+ *
+ * The reference frequency is given by the field 'carrier_frequency_hz'.
+ * The value contains the 'doppler shift uncertainty' in it.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_DOPPLER_SHIFT.
+ */
+ double doppler_shift_hz;
+
+ /**
+ * 1-Sigma uncertainty of the doppler shift in Hz.
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_DOPPLER_SHIFT_UNCERTAINTY.
+ */
+ double doppler_shift_uncertainty_hz;
+
+ /**
+ * An enumeration that indicates the 'multipath' state of the event.
+ */
+ GpsMultipathIndicator multipath_indicator;
+
+ /**
+ * Signal-to-noise ratio in dB.
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_SNR.
+ */
+ double snr_db;
+
+ /**
+ * Elevation in degrees, the valid range is [-90, 90].
+ * The value contains the 'elevation uncertainty' in it.
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_ELEVATION.
+ */
+ double elevation_deg;
+
+ /**
+ * 1-Sigma uncertainty of the elevation in degrees, the valid range is [0, 90].
+ * The uncertainty is represented as the absolute (single sided) value.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_ELEVATION_UNCERTAINTY.
+ */
+ double elevation_uncertainty_deg;
+
+ /**
+ * Azimuth in degrees, in the range [0, 360).
+ * The value contains the 'azimuth uncertainty' in it.
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_AZIMUTH.
+ * */
+ double azimuth_deg;
+
+ /**
+ * 1-Sigma uncertainty of the azimuth in degrees, the valid range is [0, 180].
+ * The uncertainty is represented as an absolute (single sided) value.
+ *
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_AZIMUTH_UNCERTAINTY.
+ */
+ double azimuth_uncertainty_deg;
+
+ /**
+ * Whether the GPS represented by the measurement was used for computing the most recent fix.
+ * If the data is available, 'flags' must contain GPS_MEASUREMENT_HAS_USED_IN_FIX.
+ */
+ bool used_in_fix;
+} GpsMeasurement;
+
+/** Represents a reading of GPS measurements. */
+typedef struct {
+ /** set to sizeof(GpsData) */
+ size_t size;
+
+ /** Number of measurements. */
+ size_t measurement_count;
+
+ /** The array of measurements. */
+ GpsMeasurement measurements[GPS_MAX_MEASUREMENT];
+
+ /** The GPS clock time reading. */
+ GpsClock clock;
+} GpsData;
+
+/**
+ * The callback for to report measurements from the HAL.
+ *
+ * Parameters:
+ * data - A data structure containing the measurements.
+ */
+typedef void (*gps_measurement_callback) (GpsData* data);
+
+typedef struct {
+ /** set to sizeof(GpsMeasurementCallbacks) */
+ size_t size;
+ gps_measurement_callback measurement_callback;
+} GpsMeasurementCallbacks;
+
+#define GPS_MEASUREMENT_OPERATION_SUCCESS 0
+#define GPS_MEASUREMENT_ERROR_ALREADY_INIT -100
+#define GPS_MEASUREMENT_ERROR_GENERIC -101
+
+/**
+ * Extended interface for GPS Measurements support.
+ */
+typedef struct {
+ /** Set to sizeof(GpsMeasurementInterface) */
+ size_t size;
+
+ /**
+ * Initializes the interface and registers the callback routines with the HAL.
+ * After a successful call to 'init' the HAL must begin to provide updates at its own phase.
+ *
+ * Status:
+ * GPS_MEASUREMENT_OPERATION_SUCCESS
+ * GPS_MEASUREMENT_ERROR_ALREADY_INIT - if a callback has already been registered without a
+ * corresponding call to 'close'
+ * GPS_MEASUREMENT_ERROR_GENERIC - if any other error occurred, it is expected that the HAL
+ * will not generate any updates upon returning this error code.
+ */
+ int (*init) (GpsMeasurementCallbacks* callbacks);
+
+ /**
+ * Stops updates from the HAL, and unregisters the callback routines.
+ * After a call to stop, the previously registered callbacks must be considered invalid by the
+ * HAL.
+ * If stop is invoked without a previous 'init', this function should perform no work.
+ */
+ void (*close) ();
+
+} GpsMeasurementInterface;
+
+
+/** Represents a GPS navigation message (or a fragment of it). */
+typedef struct {
+ /** set to sizeof(GpsNavigationMessage) */
+ size_t size;
+
+ /**
+ * Pseudo-random number in the range of [1, 32]
+ * This is a Mandatory value.
+ */
+ int8_t prn;
+
+ /**
+ * The type of message contained in the structure.
+ * This is a Mandatory value.
+ */
+ GpsNavigationMessageType type;
+
+ /**
+ * Message identifier.
+ * It provides an index so the complete Navigation Message can be assembled. i.e. fo L1 C/A
+ * subframe 4 and 5, this value corresponds to the 'frame id' of the navigation message.
+ * Subframe 1, 2, 3 does not contain a 'frame id' and this value can be set to -1.
+ */
+ int16_t message_id;
+
+ /**
+ * Sub-message identifier.
+ * If required by the message 'type', this value contains a sub-index within the current
+ * message (or frame) that is being transmitted.
+ * i.e. for L1 C/A the submessage id corresponds to the sub-frame id of the navigation message.
+ */
+ int16_t submessage_id;
+
+ /**
+ * The length of the data (in bytes) contained in the current message.
+ * If this value is different from zero, 'data' must point to an array of the same size.
+ * e.g. for L1 C/A the size of the sub-frame will be 40 bytes (10 words, 30 bits/word).
+ *
+ * This is a Mandatory value.
+ */
+ size_t data_length;
+
+ /**
+ * The data of the reported GPS message.
+ * The bytes (or words) specified using big endian format (MSB first).
+ *
+ * For L1 C/A, each subframe contains 10 30-bit GPS words. Each GPS word (30 bits) should be
+ * fitted into the last 30 bits in a 4-byte word (skip B31 and B32), with MSB first.
+ */
+ uint8_t* data;
+
+} GpsNavigationMessage;
+
+/**
+ * The callback to report an available fragment of a GPS navigation messages from the HAL.
+ *
+ * Parameters:
+ * message - The GPS navigation submessage/subframe representation.
+ */
+typedef void (*gps_navigation_message_callback) (GpsNavigationMessage* message);
+
+typedef struct {
+ /** set to sizeof(GpsNavigationMessageCallbacks) */
+ size_t size;
+ gps_navigation_message_callback navigation_message_callback;
+} GpsNavigationMessageCallbacks;
+
+#define GPS_NAVIGATION_MESSAGE_OPERATION_SUCCESS 0
+#define GPS_NAVIGATION_MESSAGE_ERROR_ALREADY_INIT -100
+#define GPS_NAVIGATION_MESSAGE_ERROR_GENERIC -101
+
+/**
+ * Extended interface for GPS navigation message reporting support.
+ */
+typedef struct {
+ /** Set to sizeof(GpsNavigationMessageInterface) */
+ size_t size;
+
+ /**
+ * Initializes the interface and registers the callback routines with the HAL.
+ * After a successful call to 'init' the HAL must begin to provide updates as they become
+ * available.
+ *
+ * Status:
+ * GPS_NAVIGATION_MESSAGE_OPERATION_SUCCESS
+ * GPS_NAVIGATION_MESSAGE_ERROR_ALREADY_INIT - if a callback has already been registered
+ * without a corresponding call to 'close'.
+ * GPS_NAVIGATION_MESSAGE_ERROR_GENERIC - if any other error occurred, it is expected that
+ * the HAL will not generate any updates upon returning this error code.
+ */
+ int (*init) (GpsNavigationMessageCallbacks* callbacks);
+
+ /**
+ * Stops updates from the HAL, and unregisters the callback routines.
+ * After a call to stop, the previously registered callbacks must be considered invalid by the
+ * HAL.
+ * If stop is invoked without a previous 'init', this function should perform no work.
+ */
+ void (*close) ();
+
+} GpsNavigationMessageInterface;
+
__END_DECLS
#endif /* ANDROID_INCLUDE_HARDWARE_GPS_H */
diff --git a/include/hardware/gralloc.h b/include/hardware/gralloc.h
index e7d0103..91e2f69 100644
--- a/include/hardware/gralloc.h
+++ b/include/hardware/gralloc.h
@@ -124,6 +124,9 @@
*/
GRALLOC_USAGE_PROTECTED = 0x00004000,
+ /* buffer may be used as a cursor */
+ GRALLOC_USAGE_CURSOR = 0x00008000,
+
/* implementation-specific private usage flags */
GRALLOC_USAGE_PRIVATE_0 = 0x10000000,
GRALLOC_USAGE_PRIVATE_1 = 0x20000000,
diff --git a/include/hardware/hardware.h b/include/hardware/hardware.h
index 416ae39..74f57aa 100644
--- a/include/hardware/hardware.h
+++ b/include/hardware/hardware.h
@@ -144,8 +144,12 @@
/** module's dso */
void* dso;
+#ifdef __LP64__
+ uint64_t reserved[32-7];
+#else
/** padding to 128 bytes, reserved for future use */
uint32_t reserved[32-7];
+#endif
} hw_module_t;
@@ -186,7 +190,11 @@
struct hw_module_t* module;
/** padding reserved for future use */
+#ifdef __LP64__
+ uint64_t reserved[12];
+#else
uint32_t reserved[12];
+#endif
/** Close this device */
int (*close)(struct hw_device_t* device);
diff --git a/include/hardware/hdmi_cec.h b/include/hardware/hdmi_cec.h
index f049952..95c0c4e 100644
--- a/include/hardware/hdmi_cec.h
+++ b/include/hardware/hdmi_cec.h
@@ -91,7 +91,7 @@
CEC_MESSAGE_TIMER_CLEARED_STATUS = 0x043,
CEC_MESSAGE_USER_CONTROL_PRESSED = 0x44,
CEC_MESSAGE_USER_CONTROL_RELEASED = 0x45,
- CEC_MESSAGE_GET_OSD_NAME = 0x46,
+ CEC_MESSAGE_GIVE_OSD_NAME = 0x46,
CEC_MESSAGE_SET_OSD_NAME = 0x47,
CEC_MESSAGE_SET_OSD_STRING = 0x64,
CEC_MESSAGE_SET_TIMER_PROGRAM_TITLE = 0x67,
@@ -129,6 +129,12 @@
CEC_MESSAGE_VENDOR_COMMAND_WITH_ID = 0xA0,
CEC_MESSAGE_CLEAR_EXTERNAL_TIMER = 0xA1,
CEC_MESSAGE_SET_EXTERNAL_TIMER = 0xA2,
+ CEC_MESSAGE_INITIATE_ARC = 0xC0,
+ CEC_MESSAGE_REPORT_ARC_INITIATED = 0xC1,
+ CEC_MESSAGE_REPORT_ARC_TERMINATED = 0xC2,
+ CEC_MESSAGE_REQUEST_ARC_INITIATION = 0xC3,
+ CEC_MESSAGE_REQUEST_ARC_TERMINATION = 0xC4,
+ CEC_MESSAGE_TERMINATE_ARC = 0xC5,
CEC_MESSAGE_ABORT = 0xFF
};
@@ -149,7 +155,7 @@
*/
enum {
HDMI_EVENT_CEC_MESSAGE = 1,
- HDMI_EVENT_HOT_PLUG = 2
+ HDMI_EVENT_HOT_PLUG = 2,
};
/*
@@ -162,25 +168,66 @@
};
/*
+ * error code used for send_message.
+ */
+enum {
+ HDMI_RESULT_SUCCESS = 0,
+ HDMI_RESULT_NACK = 1, /* not acknowledged */
+ HDMI_RESULT_BUSY = 2, /* bus is busy */
+ HDMI_RESULT_FAIL = 3,
+};
+
+/*
+ * HDMI port type.
+ */
+typedef enum hdmi_port_type {
+ HDMI_INPUT = 0,
+ HDMI_OUTPUT = 1
+} hdmi_port_type_t;
+
+/*
+ * Flags used for set_option()
+ */
+enum {
+ /* When set to false, HAL does not wake up the system upon receiving
+ * <Image View On> or <Text View On>. Used when user changes the TV
+ * settings to disable the auto TV on functionality.
+ * True by default.
+ */
+ HDMI_OPTION_WAKEUP = 1,
+
+ /* When set to false, all the CEC commands are discarded. Used when
+ * user changes the TV settings to disable CEC functionality.
+ * True by default.
+ */
+ HDMI_OPTION_ENABLE_CEC = 2,
+
+ /* Setting this flag to false means Android system will stop handling
+ * CEC service and yield the control over to the microprocessor that is
+ * powered on through the standby mode. When set to true, the system
+ * will gain the control over, hence telling the microprocessor to stop
+ * handling the cec commands. This is called when system goes
+ * in and out of standby mode to notify the microprocessor that it should
+ * start/stop handling CEC commands on behalf of the system.
+ * False by default.
+ */
+ HDMI_OPTION_SYSTEM_CEC_CONTROL = 3,
+};
+
+/*
* Maximum length in bytes of cec message body (exclude header block),
* should not exceed 16 (spec CEC 6 Frame Description)
*/
#define CEC_MESSAGE_BODY_MAX_LENGTH 16
typedef struct cec_message {
- /*
- * logical address of sender
- */
+ /* logical address of sender */
cec_logical_address_t initiator;
- /*
- * logical address of receiver
- */
+ /* logical address of receiver */
cec_logical_address_t destination;
- /*
- * length in bytes of body, range [0, CEC_MESSAGE_BODY_MAX_LENGTH]
- */
+ /* Length in bytes of body, range [0, CEC_MESSAGE_BODY_MAX_LENGTH] */
size_t length;
unsigned char body[CEC_MESSAGE_BODY_MAX_LENGTH];
} cec_message_t;
@@ -190,8 +237,14 @@
* true if the cable is connected; otherwise false.
*/
int connected;
+ int port_id;
} hotplug_event_t;
+typedef struct tx_status_event {
+ int status;
+ int opcode; /* CEC opcode */
+} tx_status_event_t;
+
/*
* HDMI event generated from HAL.
*/
@@ -205,12 +258,29 @@
} hdmi_event_t;
/*
+ * HDMI port descriptor
+ */
+typedef struct hdmi_port_info {
+ hdmi_port_type_t type;
+ // Port ID should start from 1 which corresponds to HDMI "port 1".
+ int port_id;
+ int cec_supported;
+ int arc_supported;
+ uint16_t physical_address;
+} hdmi_port_info_t;
+
+/*
* Callback function type that will be called by HAL implementation.
* Services can not close/open the device in the callback.
*/
typedef void (*event_callback_t)(const hdmi_event_t* event, void* arg);
typedef struct hdmi_cec_module {
+ /**
+ * Common methods of the HDMI CEC module. This *must* be the first member of
+ * hdmi_cec_module as users of this structure will cast a hw_module_t to hdmi_cec_module
+ * pointer in contexts where it's known the hw_module_t references a hdmi_cec_module.
+ */
struct hw_module_t common;
} hdmi_module_t;
@@ -218,15 +288,22 @@
* HDMI-CEC HAL interface definition.
*/
typedef struct hdmi_cec_device {
+ /**
+ * Common methods of the HDMI CEC device. This *must* be the first member of
+ * hdmi_cec_device as users of this structure will cast a hw_device_t to hdmi_cec_device
+ * pointer in contexts where it's known the hw_device_t references a hdmi_cec_device.
+ */
struct hw_device_t common;
/*
- * (*add_logical_address)() passes the logical address that will be used in this system.
+ * (*add_logical_address)() passes the logical address that will be used
+ * in this system.
*
* HAL may use it to configure the hardware so that the CEC commands addressed
- * the given logical address can be filtered in. This method can be called as many times
- * as necessary in order to support multiple logical devices. addr should be in the range
- * of valid logical addresses for the call to succeed.
+ * the given logical address can be filtered in. This method can be called
+ * as many times as necessary in order to support multiple logical devices.
+ * addr should be in the range of valid logical addresses for the call
+ * to succeed.
*
* Returns 0 on success or -errno on error.
*/
@@ -235,8 +312,9 @@
/*
* (*clear_logical_address)() tells HAL to reset all the logical addresses.
*
- * It is used when the system doesn't need to process CEC command any more, hence to tell
- * HAL to stop receiving commands from the CEC bus, and change the state back to the beginning.
+ * It is used when the system doesn't need to process CEC command any more,
+ * hence to tell HAL to stop receiving commands from the CEC bus, and change
+ * the state back to the beginning.
*/
void (*clear_logical_address)(const struct hdmi_cec_device* dev);
@@ -254,12 +332,17 @@
int (*get_physical_address)(const struct hdmi_cec_device* dev, uint16_t* addr);
/*
- * (*send_message)() transmits HDMI-CEC message to other HDMI device. The method should be
- * designed to return in a certain amount of time not hanging forever, which can happen
- * if CEC signal line is pulled low for some reason. HAL implementation should take
- * the situation into account so as not to wait forever for the message to get sent out.
+ * (*send_message)() transmits HDMI-CEC message to other HDMI device.
*
- * Returns 0 on success or -errno on error.
+ * The method should be designed to return in a certain amount of time not
+ * hanging forever, which can happen if CEC signal line is pulled low for
+ * some reason. HAL implementation should take the situation into account
+ * so as not to wait forever for the message to get sent out.
+ *
+ * It should try retransmission at least once as specified in the standard.
+ *
+ * Returns error code. See HDMI_RESULT_SUCCESS, HDMI_RESULT_NACK, and
+ * HDMI_RESULT_BUSY.
*/
int (*send_message)(const struct hdmi_cec_device* dev, const cec_message_t*);
@@ -285,8 +368,39 @@
*/
void (*get_vendor_id)(const struct hdmi_cec_device* dev, uint32_t* vendor_id);
+ /*
+ * (*get_port_info)() returns the hdmi port information of underlying hardware.
+ * info is the list of HDMI port information, and 'total' is the number of
+ * HDMI ports in the system.
+ */
+ void (*get_port_info)(const struct hdmi_cec_device* dev,
+ struct hdmi_port_info* list[], int* total);
+
+ /*
+ * (*set_option)() passes flags controlling the way HDMI-CEC service works down
+ * to HAL implementation. Those flags will be used in case the feature needs
+ * update in HAL itself, firmware or microcontroller.
+ */
+ void (*set_option)(const struct hdmi_cec_device* dev, int flag, int value);
+
+ /*
+ * (*set_audio_return_channel)() configures ARC circuit in the hardware logic
+ * to start or stop the feature. Flag can be either 1 to start the feature
+ * or 0 to stop it.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ void (*set_audio_return_channel)(const struct hdmi_cec_device* dev, int flag);
+
+ /*
+ * (*is_connected)() returns the connection status of the specified port.
+ * Returns HDMI_CONNECTED if a device is connected, otherwise HDMI_NOT_CONNECTED.
+ * The HAL should watch for +5V power signal to determine the status.
+ */
+ int (*is_connected)(const struct hdmi_cec_device* dev, int port_id);
+
/* Reserved for future use to maximum 16 functions. Must be NULL. */
- void* reserved[16 - 7];
+ void* reserved[16 - 11];
} hdmi_cec_device_t;
/** convenience API for opening and closing a device */
diff --git a/include/hardware/hwcomposer.h b/include/hardware/hwcomposer.h
index 86479d3..3dfb4fd 100644
--- a/include/hardware/hwcomposer.h
+++ b/include/hardware/hwcomposer.h
@@ -121,6 +121,33 @@
* that the layer will be handled by the HWC (ie: it must not be
* composited with OpenGL ES).
*
+ *
+ * HWC_SIDEBAND
+ * Set by the caller before calling (*prepare)(), this value indicates
+ * the contents of this layer come from a sideband video stream.
+ *
+ * The h/w composer is responsible for receiving new image buffers from
+ * the stream at the appropriate time (e.g. synchronized to a separate
+ * audio stream), compositing them with the current contents of other
+ * layers, and displaying the resulting image. This happens
+ * independently of the normal prepare/set cycle. The prepare/set calls
+ * only happen when other layers change, or when properties of the
+ * sideband layer such as position or size change.
+ *
+ * If the h/w composer can't handle the layer as a sideband stream for
+ * some reason (e.g. unsupported scaling/blending/rotation, or too many
+ * sideband layers) it can set compositionType to HWC_FRAMEBUFFER in
+ * (*prepare)(). However, doing so will result in the layer being shown
+ * as a solid color since the platform is not currently able to composite
+ * sideband layers with the GPU. This may be improved in future
+ * versions of the platform.
+ *
+ *
+ * HWC_CURSOR_OVERLAY
+ * Set by the HWC implementation during (*prepare)(), this value
+ * indicates the layer's composition will now be handled by the HWC.
+ * Additionally, the client can now asynchronously update the on-screen
+ * position of this layer using the setCursorPositionAsync() api.
*/
int32_t compositionType;
@@ -141,13 +168,21 @@
hwc_color_t backgroundColor;
struct {
- /* handle of buffer to compose. This handle is guaranteed to have been
- * allocated from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag. If
- * the layer's handle is unchanged across two consecutive prepare calls and
- * the HWC_GEOMETRY_CHANGED flag is not set for the second call then the
- * HWComposer implementation may assume that the contents of the buffer have
- * not changed. */
- buffer_handle_t handle;
+ union {
+ /* When compositionType is HWC_FRAMEBUFFER, HWC_OVERLAY,
+ * HWC_FRAMEBUFFER_TARGET, this is the handle of the buffer to
+ * compose. This handle is guaranteed to have been allocated
+ * from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag.
+ * If the layer's handle is unchanged across two consecutive
+ * prepare calls and the HWC_GEOMETRY_CHANGED flag is not set
+ * for the second call then the HWComposer implementation may
+ * assume that the contents of the buffer have not changed. */
+ buffer_handle_t handle;
+
+ /* When compositionType is HWC_SIDEBAND, this is the handle
+ * of the sideband video stream to compose. */
+ const native_handle_t* sidebandStream;
+ };
/* transformation to apply to the buffer during composition */
uint32_t transform;
@@ -191,6 +226,10 @@
* reads from them are complete before the framebuffer is ready for
* display.
*
+ * HWC_SIDEBAND layers will never have an acquire fence, since
+ * synchronization is handled through implementation-defined
+ * sideband mechanisms.
+ *
* The HWC takes ownership of the acquireFenceFd and is responsible
* for closing it when no longer needed.
*/
@@ -214,6 +253,10 @@
* produce a release fence for them. The releaseFenceFd will be -1
* for these layers when set() is called.
*
+ * Since HWC_SIDEBAND buffers don't pass through the HWC client,
+ * the HWC shouldn't produce a release fence for them. The
+ * releaseFenceFd will be -1 for these layers when set() is called.
+ *
* The HWC client taks ownership of the releaseFenceFd and is
* responsible for closing it when no longer needed.
*/
@@ -261,10 +304,19 @@
};
};
- /* Allow for expansion w/o breaking binary compatibility.
- * Pad layer to 96 bytes, assuming 32-bit pointers.
+#ifdef __LP64__
+ /*
+ * For 64-bit mode, this struct is 120 bytes (and 8-byte aligned), and needs
+ * to be padded as such to maintain binary compatibility.
*/
- int32_t reserved[24 - 19];
+ uint8_t reserved[120 - 96];
+#else
+ /*
+ * For 32-bit mode, this struct is 96 bytes, and needs to be padded as such
+ * to maintain binary compatibility.
+ */
+ uint8_t reserved[96 - 76];
+#endif
} hwc_layer_1_t;
@@ -435,10 +487,22 @@
/*****************************************************************************/
typedef struct hwc_module {
+ /**
+ * Common methods of the hardware composer module. This *must* be the first member of
+ * hwc_module as users of this structure will cast a hw_module_t to
+ * hwc_module pointer in contexts where it's known the hw_module_t references a
+ * hwc_module.
+ */
struct hw_module_t common;
} hwc_module_t;
typedef struct hwc_composer_device_1 {
+ /**
+ * Common methods of the hardware composer device. This *must* be the first member of
+ * hwc_composer_device_1 as users of this structure will cast a hw_device_t to
+ * hwc_composer_device_1 pointer in contexts where it's known the hw_device_t references a
+ * hwc_composer_device_1.
+ */
struct hw_device_t common;
/*
@@ -448,11 +512,12 @@
* (*prepare)() can be called more than once, the last call prevails.
*
* The HWC responds by setting the compositionType field in each layer to
- * either HWC_FRAMEBUFFER or HWC_OVERLAY. In the former case, the
- * composition for the layer is handled by SurfaceFlinger with OpenGL ES,
- * in the later case, the HWC will have to handle the layer's composition.
- * compositionType and hints are preserved between (*prepare)() calles
- * unless the HWC_GEOMETRY_CHANGED flag is set.
+ * either HWC_FRAMEBUFFER, HWC_OVERLAY, or HWC_CURSOR_OVERLAY. For the
+ * HWC_FRAMEBUFFER type, composition for the layer is handled by
+ * SurfaceFlinger with OpenGL ES. For the latter two overlay types,
+ * the HWC will have to handle the layer's composition. compositionType
+ * and hints are preserved between (*prepare)() calles unless the
+ * HWC_GEOMETRY_CHANGED flag is set.
*
* (*prepare)() is called with HWC_GEOMETRY_CHANGED to indicate that the
* list's geometry has changed, that is, when more than just the buffer's
@@ -538,18 +603,49 @@
int (*eventControl)(struct hwc_composer_device_1* dev, int disp,
int event, int enabled);
- /*
- * blank(..., blank)
- * Blanks or unblanks a display's screen.
- *
- * Turns the screen off when blank is nonzero, on when blank is zero.
- * Multiple sequential calls with the same blank value must be supported.
- * The screen state transition must be be complete when the function
- * returns.
- *
- * returns 0 on success, negative on error.
- */
- int (*blank)(struct hwc_composer_device_1* dev, int disp, int blank);
+ union {
+ /*
+ * For HWC 1.3 and earlier, the blank() interface is used.
+ *
+ * blank(..., blank)
+ * Blanks or unblanks a display's screen.
+ *
+ * Turns the screen off when blank is nonzero, on when blank is zero.
+ * Multiple sequential calls with the same blank value must be
+ * supported.
+ * The screen state transition must be be complete when the function
+ * returns.
+ *
+ * returns 0 on success, negative on error.
+ */
+ int (*blank)(struct hwc_composer_device_1* dev, int disp, int blank);
+
+ /*
+ * For HWC 1.4 and above, setPowerMode() will be used in place of
+ * blank().
+ *
+ * setPowerMode(..., mode)
+ * Sets the display screen's power state.
+ *
+ * Refer to the documentation of the HWC_POWER_MODE_* constants
+ * for information about each power mode.
+ *
+ * The functionality is similar to the blank() command in previous
+ * versions of HWC, but with support for more power states.
+ *
+ * The display driver is expected to retain and restore the low power
+ * state of the display while entering and exiting from suspend.
+ *
+ * Multiple sequential calls with the same mode value must be supported.
+ *
+ * The screen state transition must be be complete when the function
+ * returns.
+ *
+ * returns 0 on success, negative on error.
+ */
+ int (*setPowerMode)(struct hwc_composer_device_1* dev, int disp,
+ int mode);
+ };
/*
* Used to retrieve information about the h/w composer
@@ -586,16 +682,24 @@
* total number of configurations available for the display is returned in
* *numConfigs. If *numConfigs is zero on entry, then configs may be NULL.
*
- * HWC_DEVICE_API_VERSION_1_1 does not provide a way to choose a config.
- * For displays that support multiple configurations, the h/w composer
- * implementation should choose one and report it as the first config in
- * the list. Reporting the not-chosen configs is not required.
+ * Hardware composers implementing HWC_DEVICE_API_VERSION_1_3 or prior
+ * shall choose one configuration to activate and report it as the first
+ * entry in the returned list. Reporting the inactive configurations is not
+ * required.
*
- * Returns 0 on success or -errno on error. If disp is a hotpluggable
- * display type and no display is connected, an error should be returned.
+ * HWC_DEVICE_API_VERSION_1_4 and later provide configuration management
+ * through SurfaceFlinger, and hardware composers implementing these APIs
+ * must also provide getActiveConfig and setActiveConfig. Hardware composers
+ * implementing these API versions may choose not to activate any
+ * configuration, leaving configuration selection to higher levels of the
+ * framework.
+ *
+ * Returns 0 on success or a negative error code on error. If disp is a
+ * hotpluggable display type and no display is connected, an error shall be
+ * returned.
*
* This field is REQUIRED for HWC_DEVICE_API_VERSION_1_1 and later.
- * It should be NULL for previous versions.
+ * It shall be NULL for previous versions.
*/
int (*getDisplayConfigs)(struct hwc_composer_device_1* dev, int disp,
uint32_t* configs, size_t* numConfigs);
@@ -612,19 +716,80 @@
* array will have one less value than the attributes array.
*
* This field is REQUIRED for HWC_DEVICE_API_VERSION_1_1 and later.
- * It should be NULL for previous versions.
+ * It shall be NULL for previous versions.
*
* If disp is a hotpluggable display type and no display is connected,
* or if config is not a valid configuration for the display, a negative
- * value should be returned.
+ * error code shall be returned.
*/
int (*getDisplayAttributes)(struct hwc_composer_device_1* dev, int disp,
uint32_t config, const uint32_t* attributes, int32_t* values);
/*
+ * (*getActiveConfig)() returns the index of the configuration that is
+ * currently active on the connected display. The index is relative to
+ * the list of configuration handles returned by getDisplayConfigs. If there
+ * is no active configuration, -1 shall be returned.
+ *
+ * Returns the configuration index on success or -1 on error.
+ *
+ * This field is REQUIRED for HWC_DEVICE_API_VERSION_1_4 and later.
+ * It shall be NULL for previous versions.
+ */
+ int (*getActiveConfig)(struct hwc_composer_device_1* dev, int disp);
+
+ /*
+ * (*setActiveConfig)() instructs the hardware composer to switch to the
+ * display configuration at the given index in the list of configuration
+ * handles returned by getDisplayConfigs.
+ *
+ * If this function returns without error, any subsequent calls to
+ * getActiveConfig shall return the index set by this function until one
+ * of the following occurs:
+ * 1) Another successful call of this function
+ * 2) The display is disconnected
+ *
+ * Returns 0 on success or a negative error code on error. If disp is a
+ * hotpluggable display type and no display is connected, or if index is
+ * outside of the range of hardware configurations returned by
+ * getDisplayConfigs, an error shall be returned.
+ *
+ * This field is REQUIRED for HWC_DEVICE_API_VERSION_1_4 and later.
+ * It shall be NULL for previous versions.
+ */
+ int (*setActiveConfig)(struct hwc_composer_device_1* dev, int disp,
+ int index);
+ /*
+ * Asynchronously update the location of the cursor layer.
+ *
+ * Within the standard prepare()/set() composition loop, the client
+ * (surfaceflinger) can request that a given layer uses dedicated cursor
+ * composition hardware by specifiying the HWC_IS_CURSOR_LAYER flag. Only
+ * one layer per display can have this flag set. If the layer is suitable
+ * for the platform's cursor hardware, hwcomposer will return from prepare()
+ * a composition type of HWC_CURSOR_OVERLAY for that layer. This indicates
+ * not only that the client is not responsible for compositing that layer,
+ * but also that the client can continue to update the position of that layer
+ * after a call to set(). This can reduce the visible latency of mouse
+ * movement to visible, on-screen cursor updates. Calls to
+ * setCursorPositionAsync() may be made from a different thread doing the
+ * prepare()/set() composition loop, but care must be taken to not interleave
+ * calls of setCursorPositionAsync() between calls of set()/prepare().
+ *
+ * Notes:
+ * - Only one layer per display can be specified as a cursor layer with
+ * HWC_IS_CURSOR_LAYER.
+ * - hwcomposer will only return one layer per display as HWC_CURSOR_OVERLAY
+ * - This returns 0 on success or -errno on error.
+ * - This field is optional for HWC_DEVICE_API_VERSION_1_4 and later. It
+ * should be null for previous versions.
+ */
+ int (*setCursorPositionAsync)(struct hwc_composer_device_1 *dev, int disp, int x_pos, int y_pos);
+
+ /*
* Reserved for future use. Must be NULL.
*/
- void* reserved_proc[4];
+ void* reserved_proc[1];
} hwc_composer_device_1_t;
diff --git a/include/hardware/hwcomposer_defs.h b/include/hardware/hwcomposer_defs.h
index c69a4bc..9a52436 100644
--- a/include/hardware/hwcomposer_defs.h
+++ b/include/hardware/hwcomposer_defs.h
@@ -36,6 +36,7 @@
#define HWC_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, HWC_HEADER_VERSION)
#define HWC_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, HWC_HEADER_VERSION)
#define HWC_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, HWC_HEADER_VERSION)
+#define HWC_DEVICE_API_VERSION_1_4 HARDWARE_DEVICE_API_VERSION_2(1, 4, HWC_HEADER_VERSION)
enum {
/* hwc_composer_device_t::set failed in EGL */
@@ -76,6 +77,16 @@
* by SurfaceFlinger (just as if compositionType was set to HWC_OVERLAY).
*/
HWC_SKIP_LAYER = 0x00000001,
+
+ /*
+ * HWC_IS_CURSOR_LAYER is set by surfaceflinger to indicate that this
+ * layer is being used as a cursor on this particular display, and that
+ * surfaceflinger can potentially perform asynchronous position updates for
+ * this layer. If a call to prepare() returns HWC_CURSOR_OVERLAY for the
+ * composition type of this layer, then the hwcomposer will allow async
+ * position updates to this layer via setCursorPositionAsync().
+ */
+ HWC_IS_CURSOR_LAYER = 0x00000002
};
/*
@@ -95,8 +106,17 @@
/* this layer holds the result of compositing the HWC_FRAMEBUFFER layers.
* Added in HWC_DEVICE_API_VERSION_1_1. */
HWC_FRAMEBUFFER_TARGET = 3,
-};
+ /* this layer's contents are taken from a sideband buffer stream.
+ * Added in HWC_DEVICE_API_VERSION_1_4. */
+ HWC_SIDEBAND = 4,
+
+ /* this layer's composition will be handled by hwcomposer by dedicated
+ cursor overlay hardware. hwcomposer will also all async position updates
+ of this layer outside of the normal prepare()/set() loop. Added in
+ HWC_DEVICE_API_VERSION_1_4. */
+ HWC_CURSOR_OVERLAY = 5
+ };
/*
* hwc_layer_t::blending values
*/
@@ -194,6 +214,32 @@
HWC_DISPLAY_VIRTUAL_BIT = 1 << HWC_DISPLAY_VIRTUAL,
};
+/* Display power modes */
+enum {
+ /* The display is turned off (blanked). */
+ HWC_POWER_MODE_OFF = 0,
+ /* The display is turned on and configured in a low power state
+ * that is suitable for presenting ambient information to the user,
+ * possibly with lower fidelity than normal but greater efficiency. */
+ HWC_POWER_MODE_DOZE = 1,
+ /* The display is turned on normally. */
+ HWC_POWER_MODE_NORMAL = 2,
+ /* The display is configured as in HWC_POWER_MODE_DOZE but may
+ * stop applying frame buffer updates from the graphics subsystem.
+ * This power mode is effectively a hint from the doze dream to
+ * tell the hardware that it is done drawing to the display for the
+ * time being and that the display should remain on in a low power
+ * state and continue showing its current contents indefinitely
+ * until the mode changes.
+ *
+ * This mode may also be used as a signal to enable hardware-based doze
+ * functionality. In this case, the doze dream is effectively
+ * indicating that the hardware is free to take over the display
+ * and manage it autonomously to implement low power always-on display
+ * functionality. */
+ HWC_POWER_MODE_DOZE_SUSPEND = 3,
+};
+
/*****************************************************************************/
__END_DECLS
diff --git a/include/hardware/keymaster.h b/include/hardware/keymaster.h
index 12158bf..8c5ff14 100644
--- a/include/hardware/keymaster.h
+++ b/include/hardware/keymaster.h
@@ -83,6 +83,12 @@
};
struct keystore_module {
+ /**
+ * Common methods of the keystore module. This *must* be the first member of
+ * keystore_module as users of this structure will cast a hw_module_t to
+ * keystore_module pointer in contexts where it's known the hw_module_t references a
+ * keystore_module.
+ */
hw_module_t common;
};
@@ -166,6 +172,12 @@
* The parameters that can be set for a given keymaster implementation.
*/
struct keymaster_device {
+ /**
+ * Common methods of the keymaster device. This *must* be the first member of
+ * keymaster_device as users of this structure will cast a hw_device_t to
+ * keymaster_device pointer in contexts where it's known the hw_device_t references a
+ * keymaster_device.
+ */
struct hw_device_t common;
/**
@@ -282,4 +294,3 @@
__END_DECLS
#endif // ANDROID_HARDWARE_KEYMASTER_H
-
diff --git a/include/hardware/local_time_hal.h b/include/hardware/local_time_hal.h
index 6b6a317..946e799 100644
--- a/include/hardware/local_time_hal.h
+++ b/include/hardware/local_time_hal.h
@@ -55,6 +55,12 @@
};
struct local_time_hw_device {
+ /**
+ * Common methods of the local time hardware device. This *must* be the first member of
+ * local_time_hw_device as users of this structure will cast a hw_device_t to
+ * local_time_hw_device pointer in contexts where it's known the hw_device_t references a
+ * local_time_hw_device.
+ */
struct hw_device_t common;
/**
diff --git a/include/hardware/mcu.h b/include/hardware/mcu.h
deleted file mode 100644
index 6fe2cfe..0000000
--- a/include/hardware/mcu.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_INCLUDE_HARDWARE_MCU_H
-#define ANDROID_INCLUDE_HARDWARE_MCU_H
-
-#include <stdint.h>
-#include <sys/cdefs.h>
-#include <sys/types.h>
-
-#include <hardware/hardware.h>
-
-__BEGIN_DECLS
-
-#define MCU_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1)
-
-/*
- * The id of this module
- */
-#define MCU_HARDWARE_MODULE_ID "mcu"
-
-/*
- * MCU message keys passed to (*sendMessage)
- */
-#define MCU_PARAMETER_MSG_ENABLE_MCU "enable_mcu"
-
-/*
- * MCU message values passed to (*sendMessage)
- */
-#define MCU_PARAMETER_ARG_ON "on"
-#define MCU_PARAMETER_ARG_OFF "off"
-
-/*
- * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
- * and the fields of this data structure must begin with hw_module_t
- * followed by module specific information.
- */
-typedef struct mcu_module {
- struct hw_module_t common;
-
- /*
- * (*init)() performs MCU module setup actions at runtime startup, such
- * as to initialize an external MCU. This is called only by the MCU HAL
- * instance loaded by PowerManagerService.
- *
- * Returns 0 on success or -errno on error.
- */
- int (*init)(struct mcu_module *module);
-
- /*
- * (*sendMessage)() passes a message/argument pair to the MCU to execute
- * a function. msg is NULL-terminated. If arg is text, then arg_len must
- * reflect the string length. result is a heap-allocated buffer that the
- * caller must free. If there is no result, then *result will be NULL and
- * *result_len will be 0.
- *
- * Returns 0 on success or -errno in case of error (for example, if the
- * MCU does not support the specified message.)
- *
- */
- int (*sendMessage)(struct mcu_module *module, const char *msg,
- const void *arg, size_t arg_len, void **result,
- size_t *result_len);
-
-} mcu_module_t;
-
-__END_DECLS
-
-#endif // ANDROID_INCLUDE_HARDWARE_MCU_H
diff --git a/include/hardware/nfc.h b/include/hardware/nfc.h
index 09523b3..58d33d9 100644
--- a/include/hardware/nfc.h
+++ b/include/hardware/nfc.h
@@ -53,12 +53,19 @@
* 9) Core NCI stack calls close()
*/
#define NFC_NCI_HARDWARE_MODULE_ID "nfc_nci"
+#define NFC_NCI_BCM2079X_HARDWARE_MODULE_ID "nfc_nci.bcm2079x"
#define NFC_NCI_CONTROLLER "nci"
/*
* nfc_nci_module_t should contain module-specific parameters
*/
typedef struct nfc_nci_module_t {
+ /**
+ * Common methods of the NFC NCI module. This *must* be the first member of
+ * nfc_nci_module_t as users of this structure will cast a hw_module_t to
+ * nfc_nci_module_t pointer in contexts where it's known the hw_module_t references a
+ * nfc_nci_module_t.
+ */
struct hw_module_t common;
} nfc_nci_module_t;
@@ -108,6 +115,12 @@
* All methods in the NCI HAL are asynchronous.
*/
typedef struct nfc_nci_device {
+ /**
+ * Common methods of the NFC NCI device. This *must* be the first member of
+ * nfc_nci_device_t as users of this structure will cast a hw_device_t to
+ * nfc_nci_device_t pointer in contexts where it's known the hw_device_t references a
+ * nfc_nci_device_t.
+ */
struct hw_device_t common;
/*
* (*open)() Opens the NFC controller device and performs initialization.
@@ -210,6 +223,12 @@
#define NFC_PN544_CONTROLLER "pn544"
typedef struct nfc_module_t {
+ /**
+ * Common methods of the NFC NXP PN544 module. This *must* be the first member of
+ * nfc_module_t as users of this structure will cast a hw_module_t to
+ * nfc_module_t pointer in contexts where it's known the hw_module_t references a
+ * nfc_module_t.
+ */
struct hw_module_t common;
} nfc_module_t;
@@ -227,6 +246,12 @@
} nfc_pn544_linktype;
typedef struct {
+ /**
+ * Common methods of the NFC NXP PN544 device. This *must* be the first member of
+ * nfc_pn544_device_t as users of this structure will cast a hw_device_t to
+ * nfc_pn544_device_t pointer in contexts where it's known the hw_device_t references a
+ * nfc_pn544_device_t.
+ */
struct hw_device_t common;
/* The number of EEPROM registers to write */
diff --git a/include/hardware/nfc_tag.h b/include/hardware/nfc_tag.h
new file mode 100644
index 0000000..040a07d
--- /dev/null
+++ b/include/hardware/nfc_tag.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_NFC_TAG_HAL_INTERFACE_H
+#define ANDROID_NFC_TAG_HAL_INTERFACE_H
+
+#include <stdint.h>
+
+#include <hardware/hardware.h>
+
+__BEGIN_DECLS
+
+/*
+ * HAL for programmable NFC tags.
+ *
+ */
+
+#define NFC_TAG_HARDWARE_MODULE_ID "nfc_tag"
+#define NFC_TAG_ID "tag"
+
+typedef struct nfc_tag_module_t {
+ /**
+ * Common methods of the NFC tag module. This *must* be the first member of
+ * nfc_tag_module_t as users of this structure will cast a hw_module_t to
+ * nfc_tag_module_t pointer in contexts where it's known the hw_module_t references a
+ * nfc_tag_module_t.
+ */
+ struct hw_module_t common;
+} nfc_tag_module_t;
+
+typedef struct nfc_tag_device {
+ /**
+ * Common methods of the NFC tag device. This *must* be the first member of
+ * nfc_tag_device_t as users of this structure will cast a hw_device_t to
+ * nfc_tag_device_t pointer in contexts where it's known the hw_device_t references a
+ * nfc_tag_device_t.
+ */
+ struct hw_device_t common;
+
+ /**
+ * Initialize the NFC tag.
+ *
+ * The driver must:
+ * * Set the static lock bytes to read only
+ * * Configure the Capability Container to disable write acess
+ * eg: 0xE1 0x10 <size> 0x0F
+ *
+ * This function is called once before any calls to setContent().
+ *
+ * Return 0 on success or -errno on error.
+ */
+ int (*init)(const struct nfc_tag_device *dev);
+
+ /**
+ * Set the NFC tag content.
+ *
+ * The driver must write <data> in the data area of the tag starting at
+ * byte 0 of block 4 and zero the rest of the data area.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ int (*setContent)(const struct nfc_tag_device *dev, const uint8_t *data, size_t len);
+
+ /**
+ * Returns the memory size of the data area.
+ */
+ int (*getMemorySize)(const struct nfc_tag_device *dev);
+} nfc_tag_device_t;
+
+static inline int nfc_tag_open(const struct hw_module_t* module,
+ nfc_tag_device_t** dev) {
+ return module->methods->open(module, NFC_TAG_ID,
+ (struct hw_device_t**)dev);
+}
+
+static inline int nfc_tag_close(nfc_tag_device_t* dev) {
+ return dev->common.close(&dev->common);
+}
+
+__END_DECLS
+
+#endif // ANDROID_NFC_TAG_HAL_INTERFACE_H
diff --git a/include/hardware/power.h b/include/hardware/power.h
index 89d57ed..dc33705 100644
--- a/include/hardware/power.h
+++ b/include/hardware/power.h
@@ -44,7 +44,8 @@
* KLP.
*/
POWER_HINT_VIDEO_ENCODE = 0x00000003,
- POWER_HINT_VIDEO_DECODE = 0x00000004
+ POWER_HINT_VIDEO_DECODE = 0x00000004,
+ POWER_HINT_LOW_POWER = 0x00000005
} power_hint_t;
/**
@@ -112,6 +113,13 @@
* and it may be appropriate to raise speeds of CPU, memory bus,
* etc. The data parameter is unused.
*
+ * POWER_HINT_LOW_POWER
+ *
+ * Low power mode is activated or deactivated. Low power mode
+ * is intended to save battery at the cost of performance. The data
+ * parameter is non-zero when low power mode is activated, and zero
+ * when deactivated.
+ *
* A particular platform may choose to ignore any hint.
*
* availability: version 0.2
diff --git a/include/hardware/sensors.h b/include/hardware/sensors.h
index cc08091..a44c155 100644
--- a/include/hardware/sensors.h
+++ b/include/hardware/sensors.h
@@ -34,6 +34,13 @@
#define SENSORS_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION_2(1, 0, SENSORS_HEADER_VERSION)
#define SENSORS_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, SENSORS_HEADER_VERSION)
#define SENSORS_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, SENSORS_HEADER_VERSION)
+#define SENSORS_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, SENSORS_HEADER_VERSION)
+
+/**
+ * Please see the Sensors section of source.android.com for an
+ * introduction to and detailed descriptions of Android sensor types:
+ * http://source.android.com/devices/sensors/index.html
+ */
/**
* The id of this module
@@ -57,9 +64,12 @@
/*
+ * **** Deprecated *****
* flags for (*batch)()
* Availability: SENSORS_DEVICE_API_VERSION_1_0
- * see (*batch)() documentation for details
+ * see (*batch)() documentation for details.
+ * Deprecated as of SENSORS_DEVICE_API_VERSION_1_3.
+ * WAKE_UP_* sensors replace WAKE_UPON_FIFO_FULL concept.
*/
enum {
SENSORS_BATCH_DRY_RUN = 0x00000001,
@@ -82,74 +92,40 @@
*/
#define SENSOR_PERMISSION_BODY_SENSORS "android.permission.BODY_SENSORS"
-/**
- * Definition of the axis used by the sensor HAL API
- *
- * This API is relative to the screen of the device in its default orientation,
- * that is, if the device can be used in portrait or landscape, this API
- * is only relative to the NATURAL orientation of the screen. In other words,
- * the axis are not swapped when the device's screen orientation changes.
- * Higher level services /may/ perform this transformation.
- *
- * x<0 x>0
- * ^
- * |
- * +-----------+--> y>0
- * | |
- * | |
- * | |
- * | | / z<0
- * | | /
- * | | /
- * O-----------+/
- * |[] [ ] []/
- * +----------/+ y<0
- * /
- * /
- * |/ z>0 (toward the sky)
- *
- * O: Origin (x=0,y=0,z=0)
- *
+/*
+ * Availability: SENSORS_DEVICE_API_VERSION_1_3
+ * Sensor flags used in sensor_t.flags.
*/
+enum {
+ /*
+ * Whether this sensor wakes up the AP from suspend mode when data is available.
+ */
+ SENSOR_FLAG_WAKE_UP = 1U << 0,
+ /*
+ * Reporting modes for various sensors. Each sensor will have exactly one of these modes set.
+ * The least significant 2nd, 3rd and 4th bits are used to represent four possible reporting
+ * modes.
+ */
+ SENSOR_FLAG_CONTINUOUS_MODE = 0, // 0000
+ SENSOR_FLAG_ON_CHANGE_MODE = 0x2, // 0010
+ SENSOR_FLAG_ONE_SHOT_MODE = 0x4, // 0100
+ SENSOR_FLAG_SPECIAL_REPORTING_MODE = 0x6 // 0110
+};
/*
- * Interaction with suspend mode
- *
- * Unless otherwise noted, an enabled sensor shall not prevent the
- * SoC to go into suspend mode. It is the responsibility of applications
- * to keep a partial wake-lock should they wish to receive sensor
- * events while the screen is off. While in suspend mode, and unless
- * otherwise noted (batch mode, sensor particularities, ...), enabled sensors'
- * events are lost.
- *
- * Note that conceptually, the sensor itself is not de-activated while in
- * suspend mode -- it's just that the data it returns are lost. As soon as
- * the SoC gets out of suspend mode, operations resume as usual. Of course,
- * in practice sensors shall be disabled while in suspend mode to
- * save power, unless batch mode is active, in which case they must
- * continue fill their internal FIFO (see the documentation of batch() to
- * learn how suspend interacts with batch mode).
- *
- * In batch mode, and only when the flag SENSORS_BATCH_WAKE_UPON_FIFO_FULL is
- * set and supported, the specified sensor must be able to wake-up the SoC and
- * be able to buffer at least 10 seconds worth of the requested sensor events.
- *
- * There are notable exceptions to this behavior, which are sensor-dependent
- * (see sensor types definitions below)
- *
- *
- * The sensor type documentation below specifies the wake-up behavior of
- * each sensor:
- * wake-up: yes this sensor must wake-up the SoC to deliver events
- * wake-up: no this sensor shall not wake-up the SoC, events are dropped
- *
+ * Mask and shift for reporting mode sensor flags defined above.
*/
+#define REPORTING_MODE_MASK (0xE)
+#define REPORTING_MODE_SHIFT (1)
/*
* Sensor type
*
* Each sensor has a type which defines what this sensor measures and how
- * measures are reported. All types are defined below.
+ * measures are reported. See the Base sensors and Composite sensors lists
+ * for complete descriptions:
+ * http://source.android.com/devices/sensors/base_triggers.html
+ * http://source.android.com/devices/sensors/composite_sensors.html
*
* Device manufacturers (OEMs) can define their own sensor types, for
* their private use by applications or services provided by them. Such
@@ -184,7 +160,7 @@
* SENSOR_TYPE_ON_HEAD_DETECTOR and STRING_SENSOR_TYPE_ON_HEAD_DETECTOR,
* those types should replace the Glass-team-specific types in all future
* launches.
- * - When launching glass on the L release, Google should now use the official
+ * - When launching Glass on the L release, Google should now use the official
* type (SENSOR_TYPE_ON_HEAD_DETECTOR) and stringType.
* - This way, all applications can now use this sensor.
*/
@@ -196,49 +172,8 @@
#define SENSOR_TYPE_DEVICE_PRIVATE_BASE 0x10000
/*
- * Sensor fusion and virtual sensors
- *
- * Many sensor types are or can be implemented as virtual sensors from
- * physical sensors on the device. For instance the rotation vector sensor,
- * orientation sensor, step-detector, step-counter, etc...
- *
- * From the point of view of this API these virtual sensors MUST appear as
- * real, individual sensors. It is the responsibility of the driver and HAL
- * to make sure this is the case.
- *
- * In particular, all sensors must be able to function concurrently.
- * For example, if defining both an accelerometer and a step counter,
- * then both must be able to work concurrently.
- */
-
-/*
- * Trigger modes
- *
- * Sensors can report events in different ways called trigger modes,
- * each sensor type has one and only one trigger mode associated to it.
- * Currently there are four trigger modes defined:
- *
- * continuous: events are reported at a constant rate defined by setDelay().
- * eg: accelerometers, gyroscopes.
- * on-change: events are reported only if the sensor's value has changed.
- * setDelay() is used to set a lower limit to the reporting
- * period (minimum time between two events).
- * The HAL must return an event immediately when an on-change
- * sensor is activated.
- * eg: proximity, light sensors
- * one-shot: upon detection of an event, the sensor deactivates itself and
- * then sends a single event. Order matters to avoid race
- * conditions. No other event is sent until the sensor get
- * reactivated. setDelay() is ignored.
- * eg: significant motion sensor
- * special: see details in the sensor type specification below
- *
- */
-
-
-/*
* SENSOR_TYPE_META_DATA
- * trigger-mode: n/a
+ * reporting-mode: n/a
* wake-up sensor: n/a
*
* NO SENSOR OF THAT TYPE MUST BE RETURNED (*get_sensors_list)()
@@ -270,55 +205,52 @@
#define SENSOR_TYPE_META_DATA (0)
/*
+ * Wake up sensors.
+ * Each sensor may have either or both a wake-up and a non-wake variant.
+ * When registered in batch mode, wake-up sensors will wake up the AP when
+ * their FIFOs are full or when the batch timeout expires. A separate FIFO has
+ * to be maintained for wake up sensors and non wake up sensors. The non wake-up
+ * sensors need to overwrite their FIFOs when they are full till the AP wakes up
+ * and the wake-up sensors will wake-up the AP when their FIFOs are full or when
+ * the batch timeout expires without losing events. Wake-up and non wake-up variants
+ * of each sensor can be activated at different rates independently of each other.
+ *
+ * Note: Proximity sensor and significant motion sensor which were defined in previous
+ * releases are also wake-up sensors and should be treated as such. Wake-up one-shot
+ * sensors like SIGNIFICANT_MOTION cannot be batched, hence the text about batch above
+ * doesn't apply to them. See the definitions of SENSOR_TYPE_PROXIMITY and
+ * SENSOR_TYPE_SIGNIFICANT_MOTION for more info.
+ *
+ * Set SENSOR_FLAG_WAKE_UP flag for all wake-up sensors.
+ *
+ * For example, A device can have two sensors both of SENSOR_TYPE_ACCELEROMETER and
+ * one of them can be a wake_up sensor (with SENSOR_FLAG_WAKE_UP flag set) and the other
+ * can be a regular non wake_up sensor. Both of these sensors must be activated/deactivated
+ * independently of the other.
+ */
+
+/*
* SENSOR_TYPE_ACCELEROMETER
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* All values are in SI units (m/s^2) and measure the acceleration of the
* device minus the force of gravity.
*
- * Acceleration sensors return sensor events for all 3 axes at a constant
- * rate defined by setDelay().
- *
- * x: Acceleration on the x-axis
- * y: Acceleration on the y-axis
- * z: Acceleration on the z-axis
- *
- * Note that the readings from the accelerometer include the acceleration
- * due to gravity (which is opposite to the direction of the gravity vector).
- *
- * Examples:
- * The norm of <x, y, z> should be close to 0 when in free fall.
- *
- * When the device lies flat on a table and is pushed on its left side
- * toward the right, the x acceleration value is positive.
- *
- * When the device lies flat on a table, the acceleration value is +9.81,
- * which correspond to the acceleration of the device (0 m/s^2) minus the
- * force of gravity (-9.81 m/s^2).
- *
- * When the device lies flat on a table and is pushed toward the sky, the
- * acceleration value is greater than +9.81, which correspond to the
- * acceleration of the device (+A m/s^2) minus the force of
- * gravity (-9.81 m/s^2).
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_ACCELEROMETER (1)
#define SENSOR_STRING_TYPE_ACCELEROMETER "android.sensor.accelerometer"
/*
* SENSOR_TYPE_GEOMAGNETIC_FIELD
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* All values are in micro-Tesla (uT) and measure the geomagnetic
* field in the X, Y and Z axis.
*
- * Returned values include calibration mechanisms such that the vector is
- * aligned with the magnetic declination and heading of the earth's
- * geomagnetic field.
- *
- * Magnetic Field sensors return sensor events for all 3 axes at a constant
- * rate defined by setDelay().
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_GEOMAGNETIC_FIELD (2)
#define SENSOR_TYPE_MAGNETIC_FIELD SENSOR_TYPE_GEOMAGNETIC_FIELD
@@ -326,82 +258,51 @@
/*
* SENSOR_TYPE_ORIENTATION
- * trigger-mode: continuous
- * wake-up sensor: no
- *
+ * reporting-mode: continuous
+ *
* All values are angles in degrees.
- *
+ *
* Orientation sensors return sensor events for all 3 axes at a constant
* rate defined by setDelay().
*
- * azimuth: angle between the magnetic north direction and the Y axis, around
- * the Z axis (0<=azimuth<360).
- * 0=North, 90=East, 180=South, 270=West
- *
- * pitch: Rotation around X axis (-180<=pitch<=180), with positive values when
- * the z-axis moves toward the y-axis.
- *
- * roll: Rotation around Y axis (-90<=roll<=90), with positive values when
- * the x-axis moves towards the z-axis.
- *
- * Note: For historical reasons the roll angle is positive in the clockwise
- * direction (mathematically speaking, it should be positive in the
- * counter-clockwise direction):
- *
- * Z
- * ^
- * (+roll) .--> |
- * / |
- * | | roll: rotation around Y axis
- * X <-------(.)
- * Y
- * note that +Y == -roll
- *
- *
- *
- * Note: This definition is different from yaw, pitch and roll used in aviation
- * where the X axis is along the long side of the plane (tail to nose).
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_ORIENTATION (3)
#define SENSOR_STRING_TYPE_ORIENTATION "android.sensor.orientation"
/*
* SENSOR_TYPE_GYROSCOPE
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* All values are in radians/second and measure the rate of rotation
- * around the X, Y and Z axis. The coordinate system is the same as is
- * used for the acceleration sensor. Rotation is positive in the
- * counter-clockwise direction (right-hand rule). That is, an observer
- * looking from some positive location on the x, y or z axis at a device
- * positioned on the origin would report positive rotation if the device
- * appeared to be rotating counter clockwise. Note that this is the
- * standard mathematical definition of positive rotation and does not agree
- * with the definition of roll given earlier.
- * The range should at least be 17.45 rad/s (ie: ~1000 deg/s).
+ * around the X, Y and Z axis.
*
- * automatic gyro-drift compensation is allowed but not required.
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_GYROSCOPE (4)
#define SENSOR_STRING_TYPE_GYROSCOPE "android.sensor.gyroscope"
/*
* SENSOR_TYPE_LIGHT
- * trigger-mode: on-change
- * wake-up sensor: no
+ * reporting-mode: on-change
*
* The light sensor value is returned in SI lux units.
+ *
+ * Both wake-up and non wake-up versions are useful.
*/
#define SENSOR_TYPE_LIGHT (5)
#define SENSOR_STRING_TYPE_LIGHT "android.sensor.light"
/*
* SENSOR_TYPE_PRESSURE
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* The pressure sensor return the athmospheric pressure in hectopascal (hPa)
+ *
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_PRESSURE (6)
#define SENSOR_STRING_TYPE_PRESSURE "android.sensor.pressure"
@@ -412,45 +313,39 @@
/*
* SENSOR_TYPE_PROXIMITY
- * trigger-mode: on-change
- * wake-up sensor: yes
+ * reporting-mode: on-change
*
- * The distance value is measured in centimeters. Note that some proximity
- * sensors only support a binary "close" or "far" measurement. In this case,
- * the sensor should report its maxRange value in the "far" state and a value
- * less than maxRange in the "near" state.
+ * The proximity sensor which turns the screen off and back on during calls is the
+ * wake-up proximity sensor. Implement wake-up proximity sensor before implementing
+ * a non wake-up proximity sensor. For the wake-up proximity sensor set the flag
+ * SENSOR_FLAG_WAKE_UP.
+ * The value corresponds to the distance to the nearest object in centimeters.
*/
#define SENSOR_TYPE_PROXIMITY (8)
#define SENSOR_STRING_TYPE_PROXIMITY "android.sensor.proximity"
/*
* SENSOR_TYPE_GRAVITY
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* A gravity output indicates the direction of and magnitude of gravity in
- * the devices's coordinates. On Earth, the magnitude is 9.8 m/s^2.
- * Units are m/s^2. The coordinate system is the same as is used for the
- * acceleration sensor. When the device is at rest, the output of the
- * gravity sensor should be identical to that of the accelerometer.
+ * the devices's coordinates.
+ *
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_GRAVITY (9)
#define SENSOR_STRING_TYPE_GRAVITY "android.sensor.gravity"
/*
* SENSOR_TYPE_LINEAR_ACCELERATION
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* Indicates the linear acceleration of the device in device coordinates,
* not including gravity.
*
- * The output is conceptually:
- * output of TYPE_ACCELERATION - output of TYPE_GRAVITY
- *
- * Readings on all axes should be close to 0 when device lies on a table.
- * Units are m/s^2.
- * The coordinate system is the same as is used for the acceleration sensor.
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_LINEAR_ACCELERATION (10)
#define SENSOR_STRING_TYPE_LINEAR_ACCELERATION "android.sensor.linear_acceleration"
@@ -458,232 +353,89 @@
/*
* SENSOR_TYPE_ROTATION_VECTOR
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* The rotation vector symbolizes the orientation of the device relative to the
- * East-North-Up coordinates frame. It is usually obtained by integration of
- * accelerometer, gyroscope and magnetometer readings.
+ * East-North-Up coordinates frame.
*
- * The East-North-Up coordinate system is defined as a direct orthonormal basis
- * where:
- * - X points east and is tangential to the ground.
- * - Y points north and is tangential to the ground.
- * - Z points towards the sky and is perpendicular to the ground.
- *
- * The orientation of the phone is represented by the rotation necessary to
- * align the East-North-Up coordinates with the phone's coordinates. That is,
- * applying the rotation to the world frame (X,Y,Z) would align them with the
- * phone coordinates (x,y,z).
- *
- * The rotation can be seen as rotating the phone by an angle theta around
- * an axis rot_axis to go from the reference (East-North-Up aligned) device
- * orientation to the current device orientation.
- *
- * The rotation is encoded as the 4 (reordered) components of a unit quaternion:
- * sensors_event_t.data[0] = rot_axis.x*sin(theta/2)
- * sensors_event_t.data[1] = rot_axis.y*sin(theta/2)
- * sensors_event_t.data[2] = rot_axis.z*sin(theta/2)
- * sensors_event_t.data[3] = cos(theta/2)
- * where
- * - rot_axis.x,y,z are the North-East-Up coordinates of a unit length vector
- * representing the rotation axis
- * - theta is the rotation angle
- *
- * The quaternion must be of norm 1 (it is a unit quaternion). Failure to ensure
- * this will cause erratic client behaviour.
- *
- * In addition, this sensor reports an estimated heading accuracy.
- * sensors_event_t.data[4] = estimated_accuracy (in radians)
- * The heading error must be less than estimated_accuracy 95% of the time
- *
- * This sensor must use a gyroscope and an accelerometer as main orientation
- * change input.
- *
- * This sensor can also include magnetometer input to make up for gyro drift,
- * but it cannot be implemented using only a magnetometer.
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_ROTATION_VECTOR (11)
#define SENSOR_STRING_TYPE_ROTATION_VECTOR "android.sensor.rotation_vector"
/*
* SENSOR_TYPE_RELATIVE_HUMIDITY
- * trigger-mode: on-change
- * wake-up sensor: no
+ * reporting-mode: on-change
*
* A relative humidity sensor measures relative ambient air humidity and
* returns a value in percent.
+ *
+ * Both wake-up and non wake-up versions are useful.
*/
#define SENSOR_TYPE_RELATIVE_HUMIDITY (12)
#define SENSOR_STRING_TYPE_RELATIVE_HUMIDITY "android.sensor.relative_humidity"
/*
* SENSOR_TYPE_AMBIENT_TEMPERATURE
- * trigger-mode: on-change
- * wake-up sensor: no
+ * reporting-mode: on-change
*
* The ambient (room) temperature in degree Celsius.
+ *
+ * Both wake-up and non wake-up versions are useful.
*/
#define SENSOR_TYPE_AMBIENT_TEMPERATURE (13)
#define SENSOR_STRING_TYPE_AMBIENT_TEMPERATURE "android.sensor.ambient_temperature"
/*
* SENSOR_TYPE_MAGNETIC_FIELD_UNCALIBRATED
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* Similar to SENSOR_TYPE_MAGNETIC_FIELD, but the hard iron calibration is
* reported separately instead of being included in the measurement.
- * Factory calibration and temperature compensation should still be applied to
- * the "uncalibrated" measurement.
- * Separating away the hard iron calibration estimation allows the system to
- * better recover from bad hard iron estimation.
*
- * All values are in micro-Tesla (uT) and measure the ambient magnetic
- * field in the X, Y and Z axis. Assumptions that the the magnetic field
- * is due to the Earth's poles should be avoided.
- *
- * The uncalibrated_magnetic event contains
- * - 3 fields for uncalibrated measurement: x_uncalib, y_uncalib, z_uncalib.
- * Each is a component of the measured magnetic field, with soft iron
- * and temperature compensation applied, but not hard iron calibration.
- * These values should be continuous (no re-calibration should cause a jump).
- * - 3 fields for hard iron bias estimates: x_bias, y_bias, z_bias.
- * Each field is a component of the estimated hard iron calibration.
- * They represent the offsets to apply to the calibrated readings to obtain
- * uncalibrated readings (x_uncalib ~= x_calibrated + x_bias)
- * These values are expected to jump as soon as the estimate of the hard iron
- * changes, and they should be stable the rest of the time.
- *
- * If this sensor is present, then the corresponding
- * SENSOR_TYPE_MAGNETIC_FIELD must be present and both must return the
- * same sensor_t::name and sensor_t::vendor.
- *
- * Minimum filtering should be applied to this sensor. In particular, low pass
- * filters should be avoided.
- *
- * See SENSOR_TYPE_MAGNETIC_FIELD for more information
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_MAGNETIC_FIELD_UNCALIBRATED (14)
#define SENSOR_STRING_TYPE_MAGNETIC_FIELD_UNCALIBRATED "android.sensor.magnetic_field_uncalibrated"
/*
* SENSOR_TYPE_GAME_ROTATION_VECTOR
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* Similar to SENSOR_TYPE_ROTATION_VECTOR, but not using the geomagnetic
- * field. Therefore the Y axis doesn't point north, but instead to some other
- * reference. That reference is allowed to drift by the same order of
- * magnitude than the gyroscope drift around the Z axis.
+ * field.
*
- * This sensor does not report an estimated heading accuracy:
- * sensors_event_t.data[4] is reserved and should be set to 0
- *
- * In the ideal case, a phone rotated and returning to the same real-world
- * orientation should report the same game rotation vector
- * (without using the earth's geomagnetic field).
- *
- * This sensor must be based on a gyroscope. It cannot be implemented using
- * a magnetometer.
- *
- * see SENSOR_TYPE_ROTATION_VECTOR for more details
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_GAME_ROTATION_VECTOR (15)
#define SENSOR_STRING_TYPE_GAME_ROTATION_VECTOR "android.sensor.game_rotation_vector"
/*
* SENSOR_TYPE_GYROSCOPE_UNCALIBRATED
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* All values are in radians/second and measure the rate of rotation
- * around the X, Y and Z axis. An estimation of the drift on each axis is
- * reported as well.
+ * around the X, Y and Z axis.
*
- * No gyro-drift compensation shall be performed.
- * Factory calibration and temperature compensation should still be applied
- * to the rate of rotation (angular speeds).
- *
- * The coordinate system is the same as is
- * used for the acceleration sensor. Rotation is positive in the
- * counter-clockwise direction (right-hand rule). That is, an observer
- * looking from some positive location on the x, y or z axis at a device
- * positioned on the origin would report positive rotation if the device
- * appeared to be rotating counter clockwise. Note that this is the
- * standard mathematical definition of positive rotation and does not agree
- * with the definition of roll given earlier.
- * The range should at least be 17.45 rad/s (ie: ~1000 deg/s).
- *
- * Content of an uncalibrated_gyro event: (units are rad/sec)
- * x_uncalib : angular speed (w/o drift compensation) around the X axis
- * y_uncalib : angular speed (w/o drift compensation) around the Y axis
- * z_uncalib : angular speed (w/o drift compensation) around the Z axis
- * x_bias : estimated drift around X axis in rad/s
- * y_bias : estimated drift around Y axis in rad/s
- * z_bias : estimated drift around Z axis in rad/s
- *
- * IMPLEMENTATION NOTES:
- *
- * If the implementation is not able to estimate the drift, then this
- * sensor MUST NOT be reported by this HAL. Instead, the regular
- * SENSOR_TYPE_GYROSCOPE is used without drift compensation.
- *
- * If this sensor is present, then the corresponding
- * SENSOR_TYPE_GYROSCOPE must be present and both must return the
- * same sensor_t::name and sensor_t::vendor.
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_GYROSCOPE_UNCALIBRATED (16)
#define SENSOR_STRING_TYPE_GYROSCOPE_UNCALIBRATED "android.sensor.gyroscope_uncalibrated"
-
/*
* SENSOR_TYPE_SIGNIFICANT_MOTION
- * trigger-mode: one-shot
- * wake-up sensor: yes
+ * reporting-mode: one-shot
*
* A sensor of this type triggers an event each time significant motion
* is detected and automatically disables itself.
+ * For Significant Motion sensor to be useful, it must be defined as a
+ * wake-up sensor. (set SENSOR_FLAG_WAKE_UP). Implement the wake-up significant motion
+ * sensor. A non wake-up version is not useful.
* The only allowed value to return is 1.0.
- *
- * A significant motion is a motion that might lead to a change in the user
- * location.
- * Examples of such motions are:
- * walking, biking, sitting in a moving car, coach or train.
- * Examples of situations that should not trigger significant motion:
- * - phone in pocket and person is not moving
- * - phone is on a table, even if the table shakes a bit due to nearby traffic
- * or washing machine
- *
- * A note on false positive / false negative / power consumption tradeoff
- * - The goal of this sensor is to save power.
- * - Triggering an event when the user is not moving (false positive) is costly
- * in terms of power, so it should be avoided.
- * - Not triggering an event when the user is moving (false negative) is
- * acceptable as long as it is not done repeatedly. If the user has been
- * walking for 10 seconds, not triggering an event within those 10 seconds
- * is not acceptable.
- *
- * IMPORTANT NOTE: this sensor type is very different from other types
- * in that it must work when the screen is off without the need of
- * holding a partial wake-lock and MUST allow the SoC to go into suspend.
- * When significant motion is detected, the sensor must awaken the SoC and
- * the event be reported.
- *
- * If a particular hardware cannot support this mode of operation then this
- * sensor type MUST NOT be reported by the HAL. ie: it is not acceptable
- * to "emulate" this sensor in the HAL.
- *
- * The whole point of this sensor type is to save power by keeping the
- * SoC in suspend mode when the device is at rest.
- *
- * When the sensor is not activated, it must also be deactivated in the
- * hardware: it must not wake up the SoC anymore, even in case of
- * significant motion.
- *
- * setDelay() has no effect and is ignored.
- * Once a "significant motion" event is returned, a sensor of this type
- * must disables itself automatically, as if activate(..., 0) had been called.
*/
#define SENSOR_TYPE_SIGNIFICANT_MOTION (17)
@@ -691,25 +443,13 @@
/*
* SENSOR_TYPE_STEP_DETECTOR
- * trigger-mode: special
- * wake-up sensor: no
+ * reporting-mode: special
*
* A sensor of this type triggers an event each time a step is taken
- * by the user. The only allowed value to return is 1.0 and an event is
- * generated for each step. Like with any other event, the timestamp
- * indicates when the event (here the step) occurred, this corresponds to when
- * the foot hit the ground, generating a high variation in acceleration.
+ * by the user. The only allowed value to return is 1.0 and an event
+ * is generated for each step.
*
- * While this sensor operates, it shall not disrupt any other sensors, in
- * particular, but not limited to, the accelerometer; which might very well
- * be in use as well.
- *
- * This sensor must be low power. That is, if the step detection cannot be
- * done in hardware, this sensor should not be defined. Also, when the
- * step detector is activated and the accelerometer is not, only steps should
- * trigger interrupts (not accelerometer data).
- *
- * setDelay() has no impact on this sensor type
+ * Both wake-up and non wake-up versions are useful.
*/
#define SENSOR_TYPE_STEP_DETECTOR (18)
@@ -718,52 +458,14 @@
/*
* SENSOR_TYPE_STEP_COUNTER
- * trigger-mode: on-change
- * wake-up sensor: no
+ * reporting-mode: on-change
*
* A sensor of this type returns the number of steps taken by the user since
* the last reboot while activated. The value is returned as a uint64_t and is
* reset to zero only on a system / android reboot.
*
- * The timestamp of the event is set to the time when the first step
- * for that event was taken.
- * See SENSOR_TYPE_STEP_DETECTOR for the signification of the time of a step.
- *
- * The minimum size of the hardware's internal counter shall be 16 bits
- * (this restriction is here to avoid too frequent wake-ups when the
- * delay is very large).
- *
- * IMPORTANT NOTE: this sensor type is different from other types
- * in that it must work when the screen is off without the need of
- * holding a partial wake-lock and MUST allow the SoC to go into suspend.
- * Unlike other sensors, while in suspend mode this sensor must stay active,
- * no events are reported during that time but, steps continue to be
- * accounted for; an event will be reported as soon as the SoC resumes if
- * the timeout has expired.
- *
- * In other words, when the screen is off and the device allowed to
- * go into suspend mode, we don't want to be woken up, regardless of the
- * setDelay() value, but the steps shall continue to be counted.
- *
- * The driver must however ensure that the internal step count never
- * overflows. It is allowed in this situation to wake the SoC up so the
- * driver can do the counter maintenance.
- *
- * While this sensor operates, it shall not disrupt any other sensors, in
- * particular, but not limited to, the accelerometer; which might very well
- * be in use as well.
- *
- * If a particular hardware cannot support these modes of operation then this
- * sensor type MUST NOT be reported by the HAL. ie: it is not acceptable
- * to "emulate" this sensor in the HAL.
- *
- * This sensor must be low power. That is, if the step detection cannot be
- * done in hardware, this sensor should not be defined. Also, when the
- * step counter is activated and the accelerometer is not, only steps should
- * trigger interrupts (not accelerometer data).
- *
- * The whole point of this sensor type is to save power by keeping the
- * SoC in suspend mode when the device is at rest.
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_STEP_COUNTER (19)
@@ -771,31 +473,20 @@
/*
* SENSOR_TYPE_GEOMAGNETIC_ROTATION_VECTOR
- * trigger-mode: continuous
- * wake-up sensor: no
+ * reporting-mode: continuous
*
* Similar to SENSOR_TYPE_ROTATION_VECTOR, but using a magnetometer instead
* of using a gyroscope.
*
- * This sensor must be based on a magnetometer. It cannot be implemented using
- * a gyroscope, and gyroscope input cannot be used by this sensor, as the
- * goal of this sensor is to be low power.
- * The accelerometer can be (and usually is) used.
- *
- * Just like SENSOR_TYPE_ROTATION_VECTOR, this sensor reports an estimated
- * heading accuracy:
- * sensors_event_t.data[4] = estimated_accuracy (in radians)
- * The heading error must be less than estimated_accuracy 95% of the time
- *
- * see SENSOR_TYPE_ROTATION_VECTOR for more details
+ * Implement the non-wake-up version of this sensor and implement the wake-up
+ * version if the system possesses a wake up fifo.
*/
#define SENSOR_TYPE_GEOMAGNETIC_ROTATION_VECTOR (20)
#define SENSOR_STRING_TYPE_GEOMAGNETIC_ROTATION_VECTOR "android.sensor.geomagnetic_rotation_vector"
/*
* SENSOR_TYPE_HEART_RATE
- * trigger-mode: on-change
- * wake-up sensor: no
+ * reporting-mode: on-change
*
* A sensor of this type returns the current heart rate.
* The events contain the current heart rate in beats per minute (BPM) and the
@@ -808,13 +499,104 @@
* to not be on the body, the status field of the first event must be set to
* SENSOR_STATUS_UNRELIABLE. The event should be generated no faster than every
* period_ns passed to setDelay() or to batch().
- * See the definition of the on-change trigger mode for more information.
+ * See the definition of the on-change reporting mode for more information.
*
* sensor_t.requiredPermission must be set to SENSOR_PERMISSION_BODY_SENSORS.
+ *
+ * Both wake-up and non wake-up versions are useful.
*/
#define SENSOR_TYPE_HEART_RATE (21)
#define SENSOR_STRING_TYPE_HEART_RATE "android.sensor.heart_rate"
+/*
+ * SENSOR_TYPE_WAKE_UP_TILT_DETECTOR
+ * reporting-mode: special (setDelay has no impact)
+ *
+ * A sensor of this type generates an event each time a tilt event is detected. A tilt event
+ * should be generated if the direction of the 2-seconds window average gravity changed by at least
+ * 35 degrees since the activation or the last trigger of the sensor.
+ * reference_estimated_gravity = average of accelerometer measurements over the first
+ * 1 second after activation or the estimated gravity at the last
+ * trigger.
+ * current_estimated_gravity = average of accelerometer measurements over the last 2 seconds.
+ * trigger when angle (reference_estimated_gravity, current_estimated_gravity) > 35 degrees
+ *
+ * Large accelerations without a change in phone orientation should not trigger a tilt event.
+ * For example, a sharp turn or strong acceleration while driving a car should not trigger a tilt
+ * event, even though the angle of the average acceleration might vary by more than 35 degrees.
+ *
+ * Typically, this sensor is implemented with the help of only an accelerometer. Other sensors can
+ * be used as well if they do not increase the power consumption significantly. This is a low power
+ * sensor that should allow the AP to go into suspend mode. Do not emulate this sensor in the HAL.
+ * Like other wake up sensors, the driver is expected to a hold a wake_lock with a timeout of 200 ms
+ * while reporting this event. The only allowed return value is 1.0.
+ *
+ * Implement only the wake-up version of this sensor.
+ */
+#define SENSOR_TYPE_TILT_DETECTOR (22)
+#define SENSOR_STRING_TYPE_TILT_DETECTOR "android.sensor.tilt_detector"
+
+/*
+ * SENSOR_TYPE_WAKE_GESTURE
+ * reporting-mode: one-shot
+ *
+ * A sensor enabling waking up the device based on a device specific motion.
+ *
+ * When this sensor triggers, the device behaves as if the power button was
+ * pressed, turning the screen on. This behavior (turning on the screen when
+ * this sensor triggers) might be deactivated by the user in the device
+ * settings. Changes in settings do not impact the behavior of the sensor:
+ * only whether the framework turns the screen on when it triggers.
+ *
+ * The actual gesture to be detected is not specified, and can be chosen by
+ * the manufacturer of the device.
+ * This sensor must be low power, as it is likely to be activated 24/7.
+ * The only allowed value to return is 1.0.
+ *
+ * Implement only the wake-up version of this sensor.
+ */
+#define SENSOR_TYPE_WAKE_GESTURE (23)
+#define SENSOR_STRING_TYPE_WAKE_GESTURE "android.sensor.wake_gesture"
+
+/*
+ * SENSOR_TYPE_GLANCE_GESTURE
+ * reporting-mode: one-shot
+ *
+ * A sensor enabling briefly turning the screen on to enable the user to
+ * glance content on screen based on a specific motion. The device should
+ * turn the screen off after a few moments.
+ *
+ * When this sensor triggers, the device turns the screen on momentarily
+ * to allow the user to glance notifications or other content while the
+ * device remains locked in a non-interactive state (dozing). This behavior
+ * (briefly turning on the screen when this sensor triggers) might be deactivated
+ * by the user in the device settings. Changes in settings do not impact the
+ * behavior of the sensor: only whether the framework briefly turns the screen on
+ * when it triggers.
+ *
+ * The actual gesture to be detected is not specified, and can be chosen by
+ * the manufacturer of the device.
+ * This sensor must be low power, as it is likely to be activated 24/7.
+ * The only allowed value to return is 1.0.
+ *
+ * Implement only the wake-up version of this sensor.
+ */
+#define SENSOR_TYPE_GLANCE_GESTURE (24)
+#define SENSOR_STRING_TYPE_GLANCE_GESTURE "android.sensor.glance_gesture"
+
+/**
+ * SENSOR_TYPE_PICK_UP_GESTURE
+ * reporting-mode: one-shot
+ *
+ * A sensor of this type triggers when the device is picked up regardless of wherever is was
+ * before (desk, pocket, bag). The only allowed return value is 1.0.
+ * This sensor de-activates itself immediately after it triggers.
+ *
+ * Implement only the wake-up version of this sensor.
+ */
+#define SENSOR_TYPE_PICK_UP_GESTURE (25)
+#define SENSOR_STRING_TYPE_PICK_UP_GESTURE "android.sensor.pick_up_gesture"
+
/**
* Values returned by the accelerometer in various locations in the universe.
* all values are in SI units (m/s^2)
@@ -880,6 +662,9 @@
};
} uncalibrated_event_t;
+/**
+ * Meta data event data
+ */
typedef struct meta_data_event {
int32_t what;
int32_t sensor;
@@ -893,6 +678,9 @@
// Set to 0 when status is SENSOR_STATUS_UNRELIABLE or ..._NO_CONTACT
float bpm;
// Status of the sensor for this reading. Set to one SENSOR_STATUS_...
+ // Note that this value should only be set for sensors that explicitly define
+ // the meaning of this field. This field is not piped through the framework
+ // for other sensors.
int8_t status;
} heart_rate_event_t;
@@ -970,7 +758,11 @@
uint64_t step_counter;
} u64;
};
- uint32_t reserved1[4];
+
+ /* Reserved flags for internal use. Set to zero. */
+ uint32_t flags;
+
+ uint32_t reserved1[3];
} sensors_event_t;
@@ -1010,7 +802,7 @@
* must increase when the driver is updated in a way that changes the
* output of this sensor. This is important for fused sensors when the
* fusion algorithm is updated.
- */
+ */
int version;
/* handle that identifies this sensors. This handle is used to reference
@@ -1030,7 +822,7 @@
/* rough estimate of this sensor's power consumption in mA */
float power;
- /* this value depends on the trigger mode:
+ /* this value depends on the reporting mode:
*
* continuous: minimum sample period allowed in microseconds
* on-change : 0
@@ -1071,28 +863,59 @@
*/
const char* requiredPermission;
+ /* This value is defined only for continuous mode sensors. It is the delay between two
+ * sensor events corresponding to the lowest frequency that this sensor supports. When
+ * lower frequencies are requested through batch()/setDelay() the events will be generated
+ * at this frequency instead. It can be used by the framework or applications to estimate
+ * when the batch FIFO may be full.
+ *
+ * NOTE: 1) period_ns is in nanoseconds where as maxDelay/minDelay are in microseconds.
+ * continuous: maximum sampling period allowed in microseconds.
+ * on-change, one-shot, special : 0
+ * 2) maxDelay should always fit within a 32 bit signed integer. It is declared as 64 bit
+ * on 64 bit architectures only for binary compatibility reasons.
+ * Availability: SENSORS_DEVICE_API_VERSION_1_3
+ */
+ #ifdef __LP64__
+ int64_t maxDelay;
+ #else
+ int32_t maxDelay;
+ #endif
+
+ /* Flags for sensor. See SENSOR_FLAG_* above. Only the least significant 32 bits are used here.
+ * It is declared as 64 bit on 64 bit architectures only for binary compatibility reasons.
+ * Availability: SENSORS_DEVICE_API_VERSION_1_3
+ */
+ #ifdef __LP64__
+ uint64_t flags;
+ #else
+ uint32_t flags;
+ #endif
+
/* reserved fields, must be zero */
- void* reserved[4];
+ void* reserved[2];
};
/*
* sensors_poll_device_t is used with SENSORS_DEVICE_API_VERSION_0_1
* and is present for backward binary and source compatibility.
- * (see documentation of the hooks in struct sensors_poll_device_1 below)
+ * See the Sensors HAL interface section for complete descriptions of the
+ * following functions:
+ * http://source.android.com/devices/sensors/index.html#hal
*/
struct sensors_poll_device_t {
struct hw_device_t common;
int (*activate)(struct sensors_poll_device_t *dev,
- int handle, int enabled);
+ int sensor_handle, int enabled);
int (*setDelay)(struct sensors_poll_device_t *dev,
- int handle, int64_t ns);
+ int sensor_handle, int64_t sampling_period_ns);
int (*poll)(struct sensors_poll_device_t *dev,
sensors_event_t* data, int count);
};
/*
- * struct sensors_poll_device_1 is used with SENSORS_DEVICE_API_VERSION_1_0
+ * struct sensors_poll_device_1 is used in HAL versions >= SENSORS_DEVICE_API_VERSION_1_0
*/
typedef struct sensors_poll_device_1 {
union {
@@ -1104,70 +927,27 @@
struct {
struct hw_device_t common;
- /* Activate/de-activate one sensor.
+ /* Activate/de-activate one sensor. Return 0 on success, negative
*
- * handle is the handle of the sensor to change.
+ * sensor_handle is the handle of the sensor to change.
* enabled set to 1 to enable, or 0 to disable the sensor.
*
- * if enabled is set to 1, the sensor is activated even if
- * setDelay() wasn't called before. In this case, a default rate
- * should be used.
- *
- * unless otherwise noted in the sensor types definitions, an
- * activated sensor never prevents the SoC to go into suspend
- * mode; that is, the HAL shall not hold a partial wake-lock on
- * behalf of applications.
- *
- * one-shot sensors de-activate themselves automatically upon
- * receiving an event and they must still accept to be deactivated
- * through a call to activate(..., ..., 0).
- *
- * if "enabled" is 1 and the sensor is already activated, this
- * function is a no-op and succeeds.
- *
- * if "enabled" is 0 and the sensor is already de-activated,
- * this function is a no-op and succeeds.
- *
- * return 0 on success, negative errno code otherwise
+ * Return 0 on success, negative errno code otherwise.
*/
int (*activate)(struct sensors_poll_device_t *dev,
- int handle, int enabled);
+ int sensor_handle, int enabled);
/**
* Set the events's period in nanoseconds for a given sensor.
- *
- * What the period_ns parameter means depends on the specified
- * sensor's trigger mode:
- *
- * continuous: setDelay() sets the sampling rate.
- * on-change: setDelay() limits the delivery rate of events
- * one-shot: setDelay() is ignored. it has no effect.
- * special: see specific sensor type definitions
- *
- * For continuous and on-change sensors, if the requested value is
- * less than sensor_t::minDelay, then it's silently clamped to
- * sensor_t::minDelay unless sensor_t::minDelay is 0, in which
- * case it is clamped to >= 1ms.
- *
- * setDelay will not be called when the sensor is in batching mode.
- * In this case, batch() will be called with the new period.
- *
- * @return 0 if successful, < 0 on error
+ * If sampling_period_ns > max_delay it will be truncated to
+ * max_delay and if sampling_period_ns < min_delay it will be
+ * replaced by min_delay.
*/
int (*setDelay)(struct sensors_poll_device_t *dev,
- int handle, int64_t period_ns);
+ int sensor_handle, int64_t sampling_period_ns);
/**
* Returns an array of sensor data.
- * This function must block until events are available.
- *
- * return the number of events read on success, or -errno in case
- * of an error.
- *
- * The number of events returned in data must be less or equal
- * to the "count" argument.
- *
- * This function shall never return 0 (no event).
*/
int (*poll)(struct sensors_poll_device_t *dev,
sensors_event_t* data, int count);
@@ -1176,239 +956,38 @@
/*
- * Enables batch mode for the given sensor and sets the delay between events
- *
- * A timeout value of zero disables batch mode for the given sensor.
- *
- * The period_ns parameter is equivalent to calling setDelay() -- this
- * function both enables or disables the batch mode AND sets the events's
- * period in nanosecond. See setDelay() above for a detailed explanation of
- * the period_ns parameter.
- *
- * BATCH MODE:
- * -----------
- * In non-batch mode, all sensor events must be reported as soon as they
- * are detected. For example, an accelerometer activated at 50Hz will
- * trigger interrupts 50 times per second.
- * While in batch mode, sensor events do not need to be reported as soon
- * as they are detected. They can be temporarily stored in batches and
- * reported in batches, as long as no event is delayed by more than
- * "timeout" nanoseconds. That is, all events since the previous batch
- * are recorded and returned all at once. This allows to reduce the amount
- * of interrupts sent to the SoC, and allow the SoC to switch to a lower
- * power state (Idle) while the sensor is capturing and batching data.
- *
- * setDelay() is not affected and it behaves as usual.
- *
- * Each event has a timestamp associated with it, the timestamp
- * must be accurate and correspond to the time at which the event
- * physically happened.
- *
- * Batching does not modify the behavior of poll(): batches from different
- * sensors can be interleaved and split. As usual, all events from the same
- * sensor are time-ordered.
- *
- * BEHAVIOUR OUTSIDE OF SUSPEND MODE:
- * ----------------------------------
- *
- * When the SoC is awake (not in suspend mode), events must be reported in
- * batches at least every "timeout". No event shall be dropped or lost.
- * If internal h/w FIFOs fill-up before the timeout, then events are
- * reported at that point to ensure no event is lost.
- *
- *
- * NORMAL BEHAVIOR IN SUSPEND MODE:
- * ---------------------------------
- *
- * By default, batch mode doesn't significantly change the interaction with
- * suspend mode. That is, sensors must continue to allow the SoC to
- * go into suspend mode and sensors must stay active to fill their
- * internal FIFO. In this mode, when the FIFO fills up, it shall wrap
- * around (basically behave like a circular buffer, overwriting events).
- * As soon as the SoC comes out of suspend mode, a batch is produced with
- * as much as the recent history as possible, and batch operation
- * resumes as usual.
- *
- * The behavior described above allows applications to record the recent
- * history of a set of sensor while keeping the SoC into suspend. It
- * also allows the hardware to not have to rely on a wake-up interrupt line.
- *
- * WAKE_UPON_FIFO_FULL BEHAVIOR IN SUSPEND MODE:
- * ----------------------------------------------
- *
- * There are cases, however, where an application cannot afford to lose
- * any events, even when the device goes into suspend mode.
- * For a given rate, if a sensor has the capability to store at least 10
- * seconds worth of events in its FIFO and is able to wake up the Soc, it
- * can implement an optional secondary mode: the WAKE_UPON_FIFO_FULL mode.
- *
- * The caller will set the SENSORS_BATCH_WAKE_UPON_FIFO_FULL flag to
- * activate this mode. If the sensor does not support this mode, batch()
- * will fail when the flag is set.
- *
- * When running with the WAKE_UPON_FIFO_FULL flag set, no events can be
- * lost. When the FIFO is getting full, the sensor must wake up the SoC from
- * suspend and return a batch before the FIFO fills-up.
- * Depending on the device, it might take a few miliseconds for the SoC to
- * entirely come out of suspend and start flushing the FIFO. Enough head
- * room must be allocated in the FIFO to allow the device to entirely come
- * out of suspend without the FIFO overflowing (no events shall be lost).
- *
- * Implementing the WAKE_UPON_FIFO_FULL mode is optional.
- * If the hardware cannot support this mode, or if the physical
- * FIFO is so small that the device would never be allowed to go into
- * suspend for at least 10 seconds, then this function MUST fail when
- * the flag SENSORS_BATCH_WAKE_UPON_FIFO_FULL is set, regardless of
- * the value of the timeout parameter.
- *
- *
- * DRY RUN:
- * --------
- *
- * If the flag SENSORS_BATCH_DRY_RUN is set, this function returns
- * without modifying the batch mode or the event period and has no side
- * effects, but returns errors as usual (as it would if this flag was
- * not set). This flag is used to check if batch mode is available for a
- * given configuration -- in particular for a given sensor at a given rate.
- *
- *
- * Return values:
- * --------------
- *
- * Because sensors must be independent, the return value must not depend
- * on the state of the system (whether another sensor is on or not),
- * nor on whether the flag SENSORS_BATCH_DRY_RUN is set (in other words,
- * if a batch call with SENSORS_BATCH_DRY_RUN is successful,
- * the same call without SENSORS_BATCH_DRY_RUN must succeed as well).
- *
- * When timeout is not 0:
- * If successful, 0 is returned.
- * If the specified sensor doesn't support batch mode, return -EINVAL.
- * If the specified sensor's trigger-mode is one-shot, return -EINVAL.
- * If WAKE_UPON_FIFO_FULL is specified and the specified sensor's internal
- * FIFO is too small to store at least 10 seconds worth of data at the
- * given rate, -EINVAL is returned. Note that as stated above, this has to
- * be determined at compile time, and not based on the state of the
- * system.
- * If some other constraints above cannot be satisfied, return -EINVAL.
- *
- * Note: the timeout parameter, when > 0, has no impact on whether this
- * function succeeds or fails.
- *
- * When timeout is 0:
- * The caller will never set the wake_upon_fifo_full flag.
- * The function must succeed, and batch mode must be deactivated.
- *
- * Independently of whether DRY_RUN is specified, When the call to batch()
- * fails, no state should be changed. In particular, a failed call to
- * batch() should not change the rate of the sensor. Example:
- * setDelay(..., 10ms)
- * batch(..., 20ms, ...) fails
- * rate should stay 10ms.
- *
- *
- * IMPLEMENTATION NOTES:
- * ---------------------
- *
- * Batch mode, if supported, should happen at the hardware level,
- * typically using hardware FIFOs. In particular, it SHALL NOT be
- * implemented in the HAL, as this would be counter productive.
- * The goal here is to save significant amounts of power.
- *
- * In some implementations, events from several sensors can share the
- * same physical FIFO. In that case, all events in the FIFO can be sent and
- * processed by the HAL as soon as one batch must be reported.
- * For example, if the following sensors are activated:
- * - accelerometer batched with timeout = 20s
- * - gyroscope batched with timeout = 5s
- * then the accelerometer batches can be reported at the same time the
- * gyroscope batches are reported (every 5 seconds)
- *
- * Batch mode can be enabled or disabled at any time, in particular
- * while the specified sensor is already enabled, and this shall not
- * result in the loss of events.
- *
- * COMPARATIVE IMPORTANCE OF BATCHING FOR DIFFERENT SENSORS:
- * ---------------------------------------------------------
- *
- * On platforms on which hardware fifo size is limited, the system designers
- * might have to choose how much fifo to reserve for each sensor. To help
- * with this choice, here is a list of applications made possible when
- * batching is implemented on the different sensors.
- *
- * High value: Low power pedestrian dead reckoning
- * Target batching time: 20 seconds to 1 minute
- * Sensors to batch:
- * - Step detector
- * - Rotation vector or game rotation vector at 5Hz
- * Gives us step and heading while letting the SoC go to Suspend.
- *
- * High value: Medium power activity/gesture recognition
- * Target batching time: 3 seconds
- * Sensors to batch: accelerometer between 20Hz and 50Hz
- * Allows recognizing arbitrary activities and gestures without having
- * to keep the SoC fully awake while the data is collected.
- *
- * Medium-high value: Interrupt load reduction
- * Target batching time: < 1 second
- * Sensors to batch: any high frequency sensor.
- * If the gyroscope is set at 800Hz, even batching just 10 gyro events can
- * reduce the number of interrupts from 800/second to 80/second.
- *
- * Medium value: Continuous low frequency data collection
- * Target batching time: > 1 minute
- * Sensors to batch: barometer, humidity sensor, other low frequency
- * sensors.
- * Allows creating monitoring applications at low power.
- *
- * Medium value: Continuous full-sensors collection
- * Target batching time: > 1 minute
- * Sensors to batch: all, at high frequencies
- * Allows full collection of sensor data while leaving the SoC in
- * suspend mode. Only to consider if fifo space is not an issue.
- *
- * In each of the cases above, if WAKE_UPON_FIFO_FULL is implemented, the
- * applications might decide to let the SoC go to suspend, allowing for even
- * more power savings.
+ * Sets a sensor’s parameters, including sampling frequency and maximum
+ * report latency. This function can be called while the sensor is
+ * activated, in which case it must not cause any sensor measurements to
+ * be lost: transitioning from one sampling rate to the other cannot cause
+ * lost events, nor can transitioning from a high maximum report latency to
+ * a low maximum report latency.
+ * See the Batching sensor results page for details:
+ * http://source.android.com/devices/sensors/batching.html
*/
int (*batch)(struct sensors_poll_device_1* dev,
- int handle, int flags, int64_t period_ns, int64_t timeout);
+ int sensor_handle, int flags, int64_t sampling_period_ns,
+ int64_t max_report_latency_ns);
/*
* Flush adds a META_DATA_FLUSH_COMPLETE event (sensors_event_meta_data_t)
* to the end of the "batch mode" FIFO for the specified sensor and flushes
- * the FIFO; those events are delivered as usual (i.e.: as if the batch
- * timeout had expired) and removed from the FIFO.
- *
- * See the META_DATA_FLUSH_COMPLETE section for details about the
- * META_DATA_FLUSH_COMPLETE event.
- *
- * The flush happens asynchronously (i.e.: this function must return
- * immediately).
- *
- * If the implementation uses a single FIFO for several sensors, that
- * FIFO is flushed and the META_DATA_FLUSH_COMPLETE event is added only
- * for the specified sensor.
- *
- * If the specified sensor wasn't in batch mode, flush succeeds and
- * promptly sends a META_DATA_FLUSH_COMPLETE event for that sensor.
- *
- * If the FIFO was empty at the time of the call, flush returns
- * 0 (success) and promptly sends a META_DATA_FLUSH_COMPLETE event
- * for that sensor.
- *
- * If the specified sensor wasn't enabled, flush returns -EINVAL.
- *
- * return 0 on success, negative errno code otherwise.
+ * the FIFO.
+ * If the FIFO is empty or if the sensor doesn't support batching (FIFO size zero),
+ * it should return SUCCESS along with a trivial META_DATA_FLUSH_COMPLETE event added to the
+ * event stream. This applies to all sensors other than one-shot sensors.
+ * If the sensor is a one-shot sensor, flush must return -EINVAL and not generate
+ * any flush complete metadata.
+ * If the sensor is not active at the time flush() is called, flush() should return
+ * -EINVAL.
*/
- int (*flush)(struct sensors_poll_device_1* dev, int handle);
+ int (*flush)(struct sensors_poll_device_1* dev, int sensor_handle);
void (*reserved_procs[8])(void);
} sensors_poll_device_1_t;
-
/** convenience API for opening and closing a device */
static inline int sensors_open(const struct hw_module_t* module,
diff --git a/include/hardware/sound_trigger.h b/include/hardware/sound_trigger.h
new file mode 100644
index 0000000..2a8db87
--- /dev/null
+++ b/include/hardware/sound_trigger.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <system/audio.h>
+#include <system/sound_trigger.h>
+#include <hardware/hardware.h>
+
+#ifndef ANDROID_SOUND_TRIGGER_HAL_H
+#define ANDROID_SOUND_TRIGGER_HAL_H
+
+
+__BEGIN_DECLS
+
+/**
+ * The id of this module
+ */
+#define SOUND_TRIGGER_HARDWARE_MODULE_ID "sound_trigger"
+
+/**
+ * Name of the audio devices to open
+ */
+#define SOUND_TRIGGER_HARDWARE_INTERFACE "sound_trigger_hw_if"
+
+#define SOUND_TRIGGER_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0)
+#define SOUND_TRIGGER_MODULE_API_VERSION_CURRENT SOUND_TRIGGER_MODULE_API_VERSION_1_0
+
+
+#define SOUND_TRIGGER_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION(1, 0)
+#define SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT SOUND_TRIGGER_DEVICE_API_VERSION_1_0
+
+/**
+ * List of known sound trigger HAL modules. This is the base name of the sound_trigger HAL
+ * library composed of the "sound_trigger." prefix, one of the base names below and
+ * a suffix specific to the device.
+ * e.g: sondtrigger.primary.goldfish.so or sound_trigger.primary.default.so
+ */
+
+#define SOUND_TRIGGER_HARDWARE_MODULE_ID_PRIMARY "primary"
+
+
+/**
+ * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
+ * and the fields of this data structure must begin with hw_module_t
+ * followed by module specific information.
+ */
+struct sound_trigger_module {
+ struct hw_module_t common;
+};
+
+typedef void (*recognition_callback_t)(struct sound_trigger_recognition_event *event, void *cookie);
+typedef void (*sound_model_callback_t)(struct sound_trigger_model_event *event, void *cookie);
+
+struct sound_trigger_hw_device {
+ struct hw_device_t common;
+
+ /*
+ * Retrieve implementation properties.
+ */
+ int (*get_properties)(const struct sound_trigger_hw_device *dev,
+ struct sound_trigger_properties *properties);
+
+ /*
+ * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+ * Only one active recognition per model at a time. The SoundTrigger service will handle
+ * concurrent recognition requests by different users/applications on the same model.
+ * The implementation returns a unique handle used by other functions (unload_sound_model(),
+ * start_recognition(), etc...
+ */
+ int (*load_sound_model)(const struct sound_trigger_hw_device *dev,
+ struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle);
+
+ /*
+ * Unload a sound model. A sound model can be unloaded to make room for a new one to overcome
+ * implementation limitations.
+ */
+ int (*unload_sound_model)(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t handle);
+
+ /* Start recognition on a given model. Only one recognition active at a time per model.
+ * Once recognition succeeds of fails, the callback is called.
+ * TODO: group recognition configuration parameters into one struct and add key phrase options.
+ */
+ int (*start_recognition)(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t sound_model_handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie);
+
+ /* Stop recognition on a given model.
+ * The implementation does not have to call the callback when stopped via this method.
+ */
+ int (*stop_recognition)(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t sound_model_handle);
+};
+
+typedef struct sound_trigger_hw_device sound_trigger_hw_device_t;
+
+/** convenience API for opening and closing a supported device */
+
+static inline int sound_trigger_hw_device_open(const struct hw_module_t* module,
+ struct sound_trigger_hw_device** device)
+{
+ return module->methods->open(module, SOUND_TRIGGER_HARDWARE_INTERFACE,
+ (struct hw_device_t**)device);
+}
+
+static inline int sound_trigger_hw_device_close(struct sound_trigger_hw_device* device)
+{
+ return device->common.close(&device->common);
+}
+
+__END_DECLS
+
+#endif // ANDROID_SOUND_TRIGGER_HAL_H
diff --git a/include/hardware/tv_input.h b/include/hardware/tv_input.h
new file mode 100644
index 0000000..a94e4ea
--- /dev/null
+++ b/include/hardware/tv_input.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TV_INPUT_INTERFACE_H
+#define ANDROID_TV_INPUT_INTERFACE_H
+
+#include <stdint.h>
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+#include <hardware/hardware.h>
+#include <system/audio.h>
+#include <system/window.h>
+
+__BEGIN_DECLS
+
+/*
+ * Module versioning information for the TV input hardware module, based on
+ * tv_input_module_t.common.module_api_version.
+ *
+ * Version History:
+ *
+ * TV_INPUT_MODULE_API_VERSION_0_1:
+ * Initial TV input hardware module API.
+ *
+ */
+
+#define TV_INPUT_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1)
+
+#define TV_INPUT_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION(0, 1)
+
+/*
+ * The id of this module
+ */
+#define TV_INPUT_HARDWARE_MODULE_ID "tv_input"
+
+#define TV_INPUT_DEFAULT_DEVICE "default"
+
+/*****************************************************************************/
+
+/*
+ * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
+ * and the fields of this data structure must begin with hw_module_t
+ * followed by module specific information.
+ */
+typedef struct tv_input_module {
+ struct hw_module_t common;
+} tv_input_module_t;
+
+/*****************************************************************************/
+
+enum {
+ /* Generic hardware. */
+ TV_INPUT_TYPE_OTHER_HARDWARE = 1,
+ /* Tuner. (e.g. built-in terrestrial tuner) */
+ TV_INPUT_TYPE_TUNER = 2,
+ TV_INPUT_TYPE_COMPOSITE = 3,
+ TV_INPUT_TYPE_SVIDEO = 4,
+ TV_INPUT_TYPE_SCART = 5,
+ TV_INPUT_TYPE_COMPONENT = 6,
+ TV_INPUT_TYPE_VGA = 7,
+ TV_INPUT_TYPE_DVI = 8,
+ /* Physical HDMI port. (e.g. HDMI 1) */
+ TV_INPUT_TYPE_HDMI = 9,
+ TV_INPUT_TYPE_DISPLAY_PORT = 10,
+};
+typedef uint32_t tv_input_type_t;
+
+typedef struct tv_input_device_info {
+ /* Device ID */
+ int device_id;
+
+ /* Type of physical TV input. */
+ tv_input_type_t type;
+
+ union {
+ struct {
+ /* HDMI port ID number */
+ uint32_t port_id;
+ } hdmi;
+
+ /* TODO: add other type specific information. */
+
+ int32_t type_info_reserved[16];
+ };
+
+ /* TODO: Add capability if necessary. */
+
+ /*
+ * Audio info
+ *
+ * audio_type == AUDIO_DEVICE_NONE if this input has no audio.
+ */
+ audio_devices_t audio_type;
+ const char* audio_address;
+
+ int32_t reserved[16];
+} tv_input_device_info_t;
+
+enum {
+ /*
+ * Hardware notifies the framework that a device is available.
+ */
+ TV_INPUT_EVENT_DEVICE_AVAILABLE = 1,
+ /*
+ * Hardware notifies the framework that a device is unavailable.
+ */
+ TV_INPUT_EVENT_DEVICE_UNAVAILABLE = 2,
+ /*
+ * Stream configurations are changed. Client should regard all open streams
+ * at the specific device are closed, and should call
+ * get_stream_configurations() again, opening some of them if necessary.
+ */
+ TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED = 3,
+ /*
+ * Hardware is done with capture request with the buffer. Client can assume
+ * ownership of the buffer again.
+ */
+ TV_INPUT_EVENT_CAPTURE_SUCCEEDED = 4,
+ /*
+ * Hardware met a failure while processing a capture request or client
+ * canceled the request. Client can assume ownership of the buffer again.
+ */
+ TV_INPUT_EVENT_CAPTURE_FAILED = 5,
+};
+typedef uint32_t tv_input_event_type_t;
+
+typedef struct tv_input_capture_result {
+ /* Device ID */
+ int device_id;
+
+ /* Stream ID */
+ int stream_id;
+
+ /* Sequence number of the request */
+ uint32_t seq;
+
+ /*
+ * The buffer passed to hardware in request_capture(). The content of
+ * buffer is undefined (although buffer itself is valid) for
+ * TV_INPUT_CAPTURE_FAILED event.
+ */
+ buffer_handle_t buffer;
+
+ /*
+ * Error code for the request. -ECANCELED if request is cancelled; other
+ * error codes are unknown errors.
+ */
+ int error_code;
+} tv_input_capture_result_t;
+
+typedef struct tv_input_event {
+ tv_input_event_type_t type;
+
+ union {
+ /*
+ * TV_INPUT_EVENT_DEVICE_AVAILABLE: all fields are relevant
+ * TV_INPUT_EVENT_DEVICE_UNAVAILABLE: only device_id is relevant
+ * TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED: only device_id is
+ * relevant
+ */
+ tv_input_device_info_t device_info;
+ /*
+ * TV_INPUT_EVENT_CAPTURE_SUCCEEDED: error_code is not relevant
+ * TV_INPUT_EVENT_CAPTURE_FAILED: all fields are relevant
+ */
+ tv_input_capture_result_t capture_result;
+ };
+} tv_input_event_t;
+
+typedef struct tv_input_callback_ops {
+ /*
+ * event contains the type of the event and additional data if necessary.
+ * The event object is guaranteed to be valid only for the duration of the
+ * call.
+ *
+ * data is an object supplied at device initialization, opaque to the
+ * hardware.
+ */
+ void (*notify)(struct tv_input_device* dev,
+ tv_input_event_t* event, void* data);
+} tv_input_callback_ops_t;
+
+enum {
+ TV_STREAM_TYPE_INDEPENDENT_VIDEO_SOURCE = 1,
+ TV_STREAM_TYPE_BUFFER_PRODUCER = 2,
+};
+typedef uint32_t tv_stream_type_t;
+
+typedef struct tv_stream_config {
+ /*
+ * ID number of the stream. This value is used to identify the whole stream
+ * configuration.
+ */
+ int stream_id;
+
+ /* Type of the stream */
+ tv_stream_type_t type;
+
+ /* Max width/height of the stream. */
+ uint32_t max_video_width;
+ uint32_t max_video_height;
+} tv_stream_config_t;
+
+typedef struct buffer_producer_stream {
+ /*
+ * IN/OUT: Width / height of the stream. Client may request for specific
+ * size but hardware may change it. Client must allocate buffers with
+ * specified width and height.
+ */
+ uint32_t width;
+ uint32_t height;
+
+ /* OUT: Client must set this usage when allocating buffer. */
+ uint32_t usage;
+
+ /* OUT: Client must allocate a buffer with this format. */
+ uint32_t format;
+} buffer_producer_stream_t;
+
+typedef struct tv_stream {
+ /* IN: ID in the stream configuration */
+ int stream_id;
+
+ /* OUT: Type of the stream (for convenience) */
+ tv_stream_type_t type;
+
+ /* Data associated with the stream for client's use */
+ union {
+ /* OUT: A native handle describing the sideband stream source */
+ native_handle_t* sideband_stream_source_handle;
+
+ /* IN/OUT: Details are in buffer_producer_stream_t */
+ buffer_producer_stream_t buffer_producer;
+ };
+} tv_stream_t;
+
+/*
+ * Every device data structure must begin with hw_device_t
+ * followed by module specific public methods and attributes.
+ */
+typedef struct tv_input_device {
+ struct hw_device_t common;
+
+ /*
+ * initialize:
+ *
+ * Provide callbacks to the device and start operation. At first, no device
+ * is available and after initialize() completes, currently available
+ * devices including static devices should notify via callback.
+ *
+ * Framework owns callbacks object.
+ *
+ * data is a framework-owned object which would be sent back to the
+ * framework for each callback notifications.
+ *
+ * Return 0 on success.
+ */
+ int (*initialize)(struct tv_input_device* dev,
+ const tv_input_callback_ops_t* callback, void* data);
+
+ /*
+ * get_stream_configurations:
+ *
+ * Get stream configurations for a specific device. An input device may have
+ * multiple configurations.
+ *
+ * The configs object is guaranteed to be valid only until the next call to
+ * get_stream_configurations() or STREAM_CONFIGURATIONS_CHANGED event.
+ *
+ * Return 0 on success.
+ */
+ int (*get_stream_configurations)(const struct tv_input_device* dev,
+ int device_id, int* num_configurations,
+ const tv_stream_config_t** configs);
+
+ /*
+ * open_stream:
+ *
+ * Open a stream with given stream ID. Caller owns stream object, and the
+ * populated data is only valid until the stream is closed.
+ *
+ * Return 0 on success; -EBUSY if the client should close other streams to
+ * open the stream; -EEXIST if the stream with the given ID is already open;
+ * -EINVAL if device_id and/or stream_id are invalid; other non-zero value
+ * denotes unknown error.
+ */
+ int (*open_stream)(struct tv_input_device* dev, int device_id,
+ tv_stream_t* stream);
+
+ /*
+ * close_stream:
+ *
+ * Close a stream to a device. data in tv_stream_t* object associated with
+ * the stream_id is obsolete once this call finishes.
+ *
+ * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if
+ * device_id and/or stream_id are invalid.
+ */
+ int (*close_stream)(struct tv_input_device* dev, int device_id,
+ int stream_id);
+
+ /*
+ * request_capture:
+ *
+ * Request buffer capture for a stream. This is only valid for buffer
+ * producer streams. The buffer should be created with size, format and
+ * usage specified in the stream. Framework provides seq in an
+ * increasing sequence per each stream. Hardware should provide the picture
+ * in a chronological order according to seq. For example, if two
+ * requests are being processed at the same time, the request with the
+ * smaller seq should get an earlier frame.
+ *
+ * The framework releases the ownership of the buffer upon calling this
+ * function. When the buffer is filled, hardware notifies the framework
+ * via TV_INPUT_EVENT_CAPTURE_FINISHED callback, and the ownership is
+ * transferred back to framework at that time.
+ *
+ * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if
+ * device_id and/or stream_id are invalid; -EWOULDBLOCK if HAL cannot take
+ * additional requests until it releases a buffer.
+ */
+ int (*request_capture)(struct tv_input_device* dev, int device_id,
+ int stream_id, buffer_handle_t buffer, uint32_t seq);
+
+ /*
+ * cancel_capture:
+ *
+ * Cancel an ongoing capture. Hardware should release the buffer as soon as
+ * possible via TV_INPUT_EVENT_CAPTURE_FAILED callback.
+ *
+ * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if
+ * device_id, stream_id, and/or seq are invalid.
+ */
+ int (*cancel_capture)(struct tv_input_device* dev, int device_id,
+ int stream_id, uint32_t seq);
+
+ void* reserved[16];
+} tv_input_device_t;
+
+__END_DECLS
+
+#endif // ANDROID_TV_INPUT_INTERFACE_H
diff --git a/include/hardware/vibrator.h b/include/hardware/vibrator.h
index 795d23e..92b1fd0 100644
--- a/include/hardware/vibrator.h
+++ b/include/hardware/vibrator.h
@@ -35,7 +35,13 @@
struct vibrator_device;
typedef struct vibrator_device {
- struct hw_device_t common;
+ /**
+ * Common methods of the vibrator device. This *must* be the first member of
+ * vibrator_device as users of this structure will cast a hw_device_t to
+ * vibrator_device pointer in contexts where it's known the hw_device_t references a
+ * vibrator_device.
+ */
+ struct hw_device_t common;
/** Turn on vibrator
*
diff --git a/modules/Android.mk b/modules/Android.mk
index f1a6c1c..0725d3e 100644
--- a/modules/Android.mk
+++ b/modules/Android.mk
@@ -1,4 +1,4 @@
hardware_modules := gralloc hwcomposer audio nfc nfc-nci local_time \
power usbaudio audio_remote_submix camera consumerir sensors vibrator \
- mcu
+ tv_input fingerprint
include $(call all-named-subdir-makefiles,$(hardware_modules))
diff --git a/modules/audio/audio_hw.c b/modules/audio/audio_hw.c
index 3051519..18c0e59 100644
--- a/modules/audio/audio_hw.c
+++ b/modules/audio/audio_hw.c
@@ -105,7 +105,7 @@
size_t bytes)
{
/* XXX: fake timing for audio output */
- usleep(bytes * 1000000 / audio_stream_frame_size(&stream->common) /
+ usleep(bytes * 1000000 / audio_stream_out_frame_size(stream) /
out_get_sample_rate(&stream->common));
return bytes;
}
@@ -193,7 +193,7 @@
size_t bytes)
{
/* XXX: fake timing for audio input */
- usleep(bytes * 1000000 / audio_stream_frame_size(&stream->common) /
+ usleep(bytes * 1000000 / audio_stream_in_frame_size(stream) /
in_get_sample_rate(&stream->common));
return bytes;
}
@@ -218,7 +218,8 @@
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
- struct audio_stream_out **stream_out)
+ struct audio_stream_out **stream_out,
+ const char *address __unused)
{
struct stub_audio_device *ladev = (struct stub_audio_device *)dev;
struct stub_stream_out *out;
@@ -327,7 +328,10 @@
audio_io_handle_t handle,
audio_devices_t devices,
struct audio_config *config,
- struct audio_stream_in **stream_in)
+ struct audio_stream_in **stream_in,
+ audio_input_flags_t flags __unused,
+ const char *address __unused,
+ audio_source_t source __unused)
{
struct stub_audio_device *ladev = (struct stub_audio_device *)dev;
struct stub_stream_in *in;
diff --git a/modules/audio_remote_submix/audio_hw.cpp b/modules/audio_remote_submix/audio_hw.cpp
index 433ef6c..014da8e 100644
--- a/modules/audio_remote_submix/audio_hw.cpp
+++ b/modules/audio_remote_submix/audio_hw.cpp
@@ -20,49 +20,128 @@
#include <errno.h>
#include <pthread.h>
#include <stdint.h>
-#include <sys/time.h>
#include <stdlib.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/limits.h>
#include <cutils/log.h>
-#include <cutils/str_parms.h>
#include <cutils/properties.h>
+#include <cutils/str_parms.h>
+#include <hardware/audio.h>
#include <hardware/hardware.h>
#include <system/audio.h>
-#include <hardware/audio.h>
+#include <media/AudioParameter.h>
+#include <media/AudioBufferProvider.h>
#include <media/nbaio/MonoPipe.h>
#include <media/nbaio/MonoPipeReader.h>
-#include <media/AudioBufferProvider.h>
#include <utils/String8.h>
-#include <media/AudioParameter.h>
+
+#define LOG_STREAMS_TO_FILES 0
+#if LOG_STREAMS_TO_FILES
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#endif // LOG_STREAMS_TO_FILES
extern "C" {
namespace android {
-#define MAX_PIPE_DEPTH_IN_FRAMES (1024*8)
+// Set to 1 to enable extremely verbose logging in this module.
+#define SUBMIX_VERBOSE_LOGGING 0
+#if SUBMIX_VERBOSE_LOGGING
+#define SUBMIX_ALOGV(...) ALOGV(__VA_ARGS__)
+#define SUBMIX_ALOGE(...) ALOGE(__VA_ARGS__)
+#else
+#define SUBMIX_ALOGV(...)
+#define SUBMIX_ALOGE(...)
+#endif // SUBMIX_VERBOSE_LOGGING
+
+// NOTE: This value will be rounded up to the nearest power of 2 by MonoPipe().
+#define DEFAULT_PIPE_SIZE_IN_FRAMES (1024*8)
+// Value used to divide the MonoPipe() buffer into segments that are written to the source and
+// read from the sink. The maximum latency of the device is the size of the MonoPipe's buffer
+// the minimum latency is the MonoPipe buffer size divided by this value.
+#define DEFAULT_PIPE_PERIOD_COUNT 4
// The duration of MAX_READ_ATTEMPTS * READ_ATTEMPT_SLEEP_MS must be stricly inferior to
// the duration of a record buffer at the current record sample rate (of the device, not of
// the recording itself). Here we have:
// 3 * 5ms = 15ms < 1024 frames * 1000 / 48000 = 21.333ms
#define MAX_READ_ATTEMPTS 3
#define READ_ATTEMPT_SLEEP_MS 5 // 5ms between two read attempts when pipe is empty
-#define DEFAULT_RATE_HZ 48000 // default sample rate
+#define DEFAULT_SAMPLE_RATE_HZ 48000 // default sample rate
+// See NBAIO_Format frameworks/av/include/media/nbaio/NBAIO.h.
+#define DEFAULT_FORMAT AUDIO_FORMAT_PCM_16_BIT
+// A legacy user of this device does not close the input stream when it shuts down, which
+// results in the application opening a new input stream before closing the old input stream
+// handle it was previously using. Setting this value to 1 allows multiple clients to open
+// multiple input streams from this device. If this option is enabled, each input stream returned
+// is *the same stream* which means that readers will race to read data from these streams.
+#define ENABLE_LEGACY_INPUT_OPEN 1
+// Whether channel conversion (16-bit signed PCM mono->stereo, stereo->mono) is enabled.
+#define ENABLE_CHANNEL_CONVERSION 1
+// Whether resampling is enabled.
+#define ENABLE_RESAMPLING 1
+#if LOG_STREAMS_TO_FILES
+// Folder to save stream log files to.
+#define LOG_STREAM_FOLDER "/data/misc/media"
+// Log filenames for input and output streams.
+#define LOG_STREAM_OUT_FILENAME LOG_STREAM_FOLDER "/r_submix_out.raw"
+#define LOG_STREAM_IN_FILENAME LOG_STREAM_FOLDER "/r_submix_in.raw"
+// File permissions for stream log files.
+#define LOG_STREAM_FILE_PERMISSIONS (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)
+#endif // LOG_STREAMS_TO_FILES
+// Common limits macros.
+#ifndef min
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif // min
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif // max
+
+// Set *result_variable_ptr to true if value_to_find is present in the array array_to_search,
+// otherwise set *result_variable_ptr to false.
+#define SUBMIX_VALUE_IN_SET(value_to_find, array_to_search, result_variable_ptr) \
+ { \
+ size_t i; \
+ *(result_variable_ptr) = false; \
+ for (i = 0; i < sizeof(array_to_search) / sizeof((array_to_search)[0]); i++) { \
+ if ((value_to_find) == (array_to_search)[i]) { \
+ *(result_variable_ptr) = true; \
+ break; \
+ } \
+ } \
+ }
+
+// Configuration of the submix pipe.
struct submix_config {
- audio_format_t format;
- audio_channel_mask_t channel_mask;
- unsigned int rate; // sample rate for the device
- unsigned int period_size; // size of the audio pipe is period_size * period_count in frames
- unsigned int period_count;
+ // Channel mask field in this data structure is set to either input_channel_mask or
+ // output_channel_mask depending upon the last stream to be opened on this device.
+ struct audio_config common;
+ // Input stream and output stream channel masks. This is required since input and output
+ // channel bitfields are not equivalent.
+ audio_channel_mask_t input_channel_mask;
+ audio_channel_mask_t output_channel_mask;
+#if ENABLE_RESAMPLING
+ // Input stream and output stream sample rates.
+ uint32_t input_sample_rate;
+ uint32_t output_sample_rate;
+#endif // ENABLE_RESAMPLING
+ size_t pipe_frame_size; // Number of bytes in each audio frame in the pipe.
+ size_t buffer_size_frames; // Size of the audio pipe in frames.
+ // Maximum number of frames buffered by the input and output streams.
+ size_t buffer_period_size_frames;
};
struct submix_audio_device {
struct audio_hw_device device;
- bool output_standby;
bool input_standby;
+ bool output_standby;
submix_config config;
// Pipe variables: they handle the ring buffer that "pipes" audio:
// - from the submix virtual audio output == what needs to be played
@@ -72,16 +151,30 @@
// A usecase example is one where the component capturing the audio is then sending it over
// Wifi for presentation on a remote Wifi Display device (e.g. a dongle attached to a TV, or a
// TV with Wifi Display capabilities), or to a wireless audio player.
- sp<MonoPipe> rsxSink;
+ sp<MonoPipe> rsxSink;
sp<MonoPipeReader> rsxSource;
+#if ENABLE_RESAMPLING
+ // Buffer used as temporary storage for resampled data prior to returning data to the output
+ // stream.
+ int16_t resampler_buffer[DEFAULT_PIPE_SIZE_IN_FRAMES];
+#endif // ENABLE_RESAMPLING
- // device lock, also used to protect access to the audio pipe
+ // Pointers to the current input and output stream instances. rsxSink and rsxSource are
+ // destroyed if both and input and output streams are destroyed.
+ struct submix_stream_out *output;
+ struct submix_stream_in *input;
+
+ // Device lock, also used to protect access to submix_audio_device from the input and output
+ // streams.
pthread_mutex_t lock;
};
struct submix_stream_out {
struct audio_stream_out stream;
struct submix_audio_device *dev;
+#if LOG_STREAMS_TO_FILES
+ int log_fd;
+#endif // LOG_STREAMS_TO_FILES
};
struct submix_stream_in {
@@ -93,84 +186,453 @@
struct timespec record_start_time;
// how many frames have been requested to be read
int64_t read_counter_frames;
+
+#if ENABLE_LEGACY_INPUT_OPEN
+ // Number of references to this input stream.
+ volatile int32_t ref_count;
+#endif // ENABLE_LEGACY_INPUT_OPEN
+#if LOG_STREAMS_TO_FILES
+ int log_fd;
+#endif // LOG_STREAMS_TO_FILES
};
+// Determine whether the specified sample rate is supported by the submix module.
+static bool sample_rate_supported(const uint32_t sample_rate)
+{
+ // Set of sample rates supported by Format_from_SR_C() frameworks/av/media/libnbaio/NAIO.cpp.
+ static const unsigned int supported_sample_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000,
+ };
+ bool return_value;
+ SUBMIX_VALUE_IN_SET(sample_rate, supported_sample_rates, &return_value);
+ return return_value;
+}
+
+// Determine whether the specified sample rate is supported, if it is return the specified sample
+// rate, otherwise return the default sample rate for the submix module.
+static uint32_t get_supported_sample_rate(uint32_t sample_rate)
+{
+ return sample_rate_supported(sample_rate) ? sample_rate : DEFAULT_SAMPLE_RATE_HZ;
+}
+
+// Determine whether the specified channel in mask is supported by the submix module.
+static bool channel_in_mask_supported(const audio_channel_mask_t channel_in_mask)
+{
+ // Set of channel in masks supported by Format_from_SR_C()
+ // frameworks/av/media/libnbaio/NAIO.cpp.
+ static const audio_channel_mask_t supported_channel_in_masks[] = {
+ AUDIO_CHANNEL_IN_MONO, AUDIO_CHANNEL_IN_STEREO,
+ };
+ bool return_value;
+ SUBMIX_VALUE_IN_SET(channel_in_mask, supported_channel_in_masks, &return_value);
+ return return_value;
+}
+
+// Determine whether the specified channel in mask is supported, if it is return the specified
+// channel in mask, otherwise return the default channel in mask for the submix module.
+static audio_channel_mask_t get_supported_channel_in_mask(
+ const audio_channel_mask_t channel_in_mask)
+{
+ return channel_in_mask_supported(channel_in_mask) ? channel_in_mask :
+ static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_IN_STEREO);
+}
+
+// Determine whether the specified channel out mask is supported by the submix module.
+static bool channel_out_mask_supported(const audio_channel_mask_t channel_out_mask)
+{
+ // Set of channel out masks supported by Format_from_SR_C()
+ // frameworks/av/media/libnbaio/NAIO.cpp.
+ static const audio_channel_mask_t supported_channel_out_masks[] = {
+ AUDIO_CHANNEL_OUT_MONO, AUDIO_CHANNEL_OUT_STEREO,
+ };
+ bool return_value;
+ SUBMIX_VALUE_IN_SET(channel_out_mask, supported_channel_out_masks, &return_value);
+ return return_value;
+}
+
+// Determine whether the specified channel out mask is supported, if it is return the specified
+// channel out mask, otherwise return the default channel out mask for the submix module.
+static audio_channel_mask_t get_supported_channel_out_mask(
+ const audio_channel_mask_t channel_out_mask)
+{
+ return channel_out_mask_supported(channel_out_mask) ? channel_out_mask :
+ static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_OUT_STEREO);
+}
+
+// Get a pointer to submix_stream_out given an audio_stream_out that is embedded within the
+// structure.
+static struct submix_stream_out * audio_stream_out_get_submix_stream_out(
+ struct audio_stream_out * const stream)
+{
+ ALOG_ASSERT(stream);
+ return reinterpret_cast<struct submix_stream_out *>(reinterpret_cast<uint8_t *>(stream) -
+ offsetof(struct submix_stream_out, stream));
+}
+
+// Get a pointer to submix_stream_out given an audio_stream that is embedded within the structure.
+static struct submix_stream_out * audio_stream_get_submix_stream_out(
+ struct audio_stream * const stream)
+{
+ ALOG_ASSERT(stream);
+ return audio_stream_out_get_submix_stream_out(
+ reinterpret_cast<struct audio_stream_out *>(stream));
+}
+
+// Get a pointer to submix_stream_in given an audio_stream_in that is embedded within the
+// structure.
+static struct submix_stream_in * audio_stream_in_get_submix_stream_in(
+ struct audio_stream_in * const stream)
+{
+ ALOG_ASSERT(stream);
+ return reinterpret_cast<struct submix_stream_in *>(reinterpret_cast<uint8_t *>(stream) -
+ offsetof(struct submix_stream_in, stream));
+}
+
+// Get a pointer to submix_stream_in given an audio_stream that is embedded within the structure.
+static struct submix_stream_in * audio_stream_get_submix_stream_in(
+ struct audio_stream * const stream)
+{
+ ALOG_ASSERT(stream);
+ return audio_stream_in_get_submix_stream_in(
+ reinterpret_cast<struct audio_stream_in *>(stream));
+}
+
+// Get a pointer to submix_audio_device given a pointer to an audio_device that is embedded within
+// the structure.
+static struct submix_audio_device * audio_hw_device_get_submix_audio_device(
+ struct audio_hw_device *device)
+{
+ ALOG_ASSERT(device);
+ return reinterpret_cast<struct submix_audio_device *>(reinterpret_cast<uint8_t *>(device) -
+ offsetof(struct submix_audio_device, device));
+}
+
+// Compare an audio_config with input channel mask and an audio_config with output channel mask
+// returning false if they do *not* match, true otherwise.
+static bool audio_config_compare(const audio_config * const input_config,
+ const audio_config * const output_config)
+{
+#if !ENABLE_CHANNEL_CONVERSION
+ const uint32_t input_channels = audio_channel_count_from_in_mask(input_config->channel_mask);
+ const uint32_t output_channels = audio_channel_count_from_out_mask(output_config->channel_mask);
+ if (input_channels != output_channels) {
+ ALOGE("audio_config_compare() channel count mismatch input=%d vs. output=%d",
+ input_channels, output_channels);
+ return false;
+ }
+#endif // !ENABLE_CHANNEL_CONVERSION
+#if ENABLE_RESAMPLING
+ if (input_config->sample_rate != output_config->sample_rate &&
+ audio_channel_count_from_in_mask(input_config->channel_mask) != 1) {
+#else
+ if (input_config->sample_rate != output_config->sample_rate) {
+#endif // ENABLE_RESAMPLING
+ ALOGE("audio_config_compare() sample rate mismatch %ul vs. %ul",
+ input_config->sample_rate, output_config->sample_rate);
+ return false;
+ }
+ if (input_config->format != output_config->format) {
+ ALOGE("audio_config_compare() format mismatch %x vs. %x",
+ input_config->format, output_config->format);
+ return false;
+ }
+ // This purposely ignores offload_info as it's not required for the submix device.
+ return true;
+}
+
+// If one doesn't exist, create a pipe for the submix audio device rsxadev of size
+// buffer_size_frames and optionally associate "in" or "out" with the submix audio device.
+static void submix_audio_device_create_pipe(struct submix_audio_device * const rsxadev,
+ const struct audio_config * const config,
+ const size_t buffer_size_frames,
+ const uint32_t buffer_period_count,
+ struct submix_stream_in * const in,
+ struct submix_stream_out * const out)
+{
+ ALOG_ASSERT(in || out);
+ ALOGV("submix_audio_device_create_pipe()");
+ pthread_mutex_lock(&rsxadev->lock);
+ // Save a reference to the specified input or output stream and the associated channel
+ // mask.
+ if (in) {
+ rsxadev->input = in;
+ rsxadev->config.input_channel_mask = config->channel_mask;
+#if ENABLE_RESAMPLING
+ rsxadev->config.input_sample_rate = config->sample_rate;
+ // If the output isn't configured yet, set the output sample rate to the maximum supported
+ // sample rate such that the smallest possible input buffer is created.
+ if (!rsxadev->output) {
+ rsxadev->config.output_sample_rate = 48000;
+ }
+#endif // ENABLE_RESAMPLING
+ }
+ if (out) {
+ rsxadev->output = out;
+ rsxadev->config.output_channel_mask = config->channel_mask;
+#if ENABLE_RESAMPLING
+ rsxadev->config.output_sample_rate = config->sample_rate;
+#endif // ENABLE_RESAMPLING
+ }
+ // If a pipe isn't associated with the device, create one.
+ if (rsxadev->rsxSink == NULL || rsxadev->rsxSource == NULL) {
+ struct submix_config * const device_config = &rsxadev->config;
+ uint32_t channel_count;
+ if (out)
+ channel_count = audio_channel_count_from_out_mask(config->channel_mask);
+ else
+ channel_count = audio_channel_count_from_in_mask(config->channel_mask);
+#if ENABLE_CHANNEL_CONVERSION
+ // If channel conversion is enabled, allocate enough space for the maximum number of
+ // possible channels stored in the pipe for the situation when the number of channels in
+ // the output stream don't match the number in the input stream.
+ const uint32_t pipe_channel_count = max(channel_count, 2);
+#else
+ const uint32_t pipe_channel_count = channel_count;
+#endif // ENABLE_CHANNEL_CONVERSION
+ const NBAIO_Format format = Format_from_SR_C(config->sample_rate, pipe_channel_count,
+ config->format);
+ const NBAIO_Format offers[1] = {format};
+ size_t numCounterOffers = 0;
+ // Create a MonoPipe with optional blocking set to true.
+ MonoPipe* sink = new MonoPipe(buffer_size_frames, format, true /*writeCanBlock*/);
+ // Negotiation between the source and sink cannot fail as the device open operation
+ // creates both ends of the pipe using the same audio format.
+ ssize_t index = sink->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ MonoPipeReader* source = new MonoPipeReader(sink);
+ numCounterOffers = 0;
+ index = source->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ ALOGV("submix_audio_device_create_pipe(): created pipe");
+
+ // Save references to the source and sink.
+ ALOG_ASSERT(rsxadev->rsxSink == NULL);
+ ALOG_ASSERT(rsxadev->rsxSource == NULL);
+ rsxadev->rsxSink = sink;
+ rsxadev->rsxSource = source;
+ // Store the sanitized audio format in the device so that it's possible to determine
+ // the format of the pipe source when opening the input device.
+ memcpy(&device_config->common, config, sizeof(device_config->common));
+ device_config->buffer_size_frames = sink->maxFrames();
+ device_config->buffer_period_size_frames = device_config->buffer_size_frames /
+ buffer_period_count;
+ if (in) device_config->pipe_frame_size = audio_stream_in_frame_size(&in->stream);
+ if (out) device_config->pipe_frame_size = audio_stream_out_frame_size(&out->stream);
+#if ENABLE_CHANNEL_CONVERSION
+ // Calculate the pipe frame size based upon the number of channels.
+ device_config->pipe_frame_size = (device_config->pipe_frame_size * pipe_channel_count) /
+ channel_count;
+#endif // ENABLE_CHANNEL_CONVERSION
+ SUBMIX_ALOGV("submix_audio_device_create_pipe(): pipe frame size %zd, pipe size %zd, "
+ "period size %zd", device_config->pipe_frame_size,
+ device_config->buffer_size_frames, device_config->buffer_period_size_frames);
+ }
+ pthread_mutex_unlock(&rsxadev->lock);
+}
+
+// Release references to the sink and source. Input and output threads may maintain references
+// to these objects via StrongPointer (sp<MonoPipe> and sp<MonoPipeReader>) which they can use
+// before they shutdown.
+static void submix_audio_device_release_pipe(struct submix_audio_device * const rsxadev)
+{
+ ALOGV("submix_audio_device_release_pipe()");
+ rsxadev->rsxSink.clear();
+ rsxadev->rsxSource.clear();
+}
+
+// Remove references to the specified input and output streams. When the device no longer
+// references input and output streams destroy the associated pipe.
+static void submix_audio_device_destroy_pipe(struct submix_audio_device * const rsxadev,
+ const struct submix_stream_in * const in,
+ const struct submix_stream_out * const out)
+{
+ MonoPipe* sink;
+ pthread_mutex_lock(&rsxadev->lock);
+ ALOGV("submix_audio_device_destroy_pipe()");
+ ALOG_ASSERT(in == NULL || rsxadev->input == in);
+ ALOG_ASSERT(out == NULL || rsxadev->output == out);
+ if (in != NULL) {
+#if ENABLE_LEGACY_INPUT_OPEN
+ const_cast<struct submix_stream_in*>(in)->ref_count--;
+ if (in->ref_count == 0) {
+ rsxadev->input = NULL;
+ }
+ ALOGV("submix_audio_device_destroy_pipe(): input ref_count %d", in->ref_count);
+#else
+ rsxadev->input = NULL;
+#endif // ENABLE_LEGACY_INPUT_OPEN
+ }
+ if (out != NULL) rsxadev->output = NULL;
+ if (rsxadev->input != NULL && rsxadev->output != NULL) {
+ submix_audio_device_release_pipe(rsxadev);
+ ALOGV("submix_audio_device_destroy_pipe(): pipe destroyed");
+ }
+ pthread_mutex_unlock(&rsxadev->lock);
+}
+
+// Sanitize the user specified audio config for a submix input / output stream.
+static void submix_sanitize_config(struct audio_config * const config, const bool is_input_format)
+{
+ config->channel_mask = is_input_format ? get_supported_channel_in_mask(config->channel_mask) :
+ get_supported_channel_out_mask(config->channel_mask);
+ config->sample_rate = get_supported_sample_rate(config->sample_rate);
+ config->format = DEFAULT_FORMAT;
+}
+
+// Verify a submix input or output stream can be opened.
+static bool submix_open_validate(const struct submix_audio_device * const rsxadev,
+ pthread_mutex_t * const lock,
+ const struct audio_config * const config,
+ const bool opening_input)
+{
+ bool input_open;
+ bool output_open;
+ audio_config pipe_config;
+
+ // Query the device for the current audio config and whether input and output streams are open.
+ pthread_mutex_lock(lock);
+ output_open = rsxadev->output != NULL;
+ input_open = rsxadev->input != NULL;
+ memcpy(&pipe_config, &rsxadev->config.common, sizeof(pipe_config));
+ pthread_mutex_unlock(lock);
+
+ // If the stream is already open, don't open it again.
+ if (opening_input ? !ENABLE_LEGACY_INPUT_OPEN && input_open : output_open) {
+ ALOGE("submix_open_validate(): %s stream already open.", opening_input ? "Input" :
+ "Output");
+ return false;
+ }
+
+ SUBMIX_ALOGV("submix_open_validate(): sample rate=%d format=%x "
+ "%s_channel_mask=%x", config->sample_rate, config->format,
+ opening_input ? "in" : "out", config->channel_mask);
+
+ // If either stream is open, verify the existing audio config the pipe matches the user
+ // specified config.
+ if (input_open || output_open) {
+ const audio_config * const input_config = opening_input ? config : &pipe_config;
+ const audio_config * const output_config = opening_input ? &pipe_config : config;
+ // Get the channel mask of the open device.
+ pipe_config.channel_mask =
+ opening_input ? rsxadev->config.output_channel_mask :
+ rsxadev->config.input_channel_mask;
+ if (!audio_config_compare(input_config, output_config)) {
+ ALOGE("submix_open_validate(): Unsupported format.");
+ return false;
+ }
+ }
+ return true;
+}
+
+// Calculate the maximum size of the pipe buffer in frames for the specified stream.
+static size_t calculate_stream_pipe_size_in_frames(const struct audio_stream *stream,
+ const struct submix_config *config,
+ const size_t pipe_frames,
+ const size_t stream_frame_size)
+{
+ const size_t pipe_frame_size = config->pipe_frame_size;
+ const size_t max_frame_size = max(stream_frame_size, pipe_frame_size);
+ return (pipe_frames * config->pipe_frame_size) / max_frame_size;
+}
/* audio HAL functions */
static uint32_t out_get_sample_rate(const struct audio_stream *stream)
{
- const struct submix_stream_out *out =
- reinterpret_cast<const struct submix_stream_out *>(stream);
- uint32_t out_rate = out->dev->config.rate;
- //ALOGV("out_get_sample_rate() returns %u", out_rate);
+ const struct submix_stream_out * const out = audio_stream_get_submix_stream_out(
+ const_cast<struct audio_stream *>(stream));
+#if ENABLE_RESAMPLING
+ const uint32_t out_rate = out->dev->config.output_sample_rate;
+#else
+ const uint32_t out_rate = out->dev->config.common.sample_rate;
+#endif // ENABLE_RESAMPLING
+ SUBMIX_ALOGV("out_get_sample_rate() returns %u", out_rate);
return out_rate;
}
static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
- if ((rate != 44100) && (rate != 48000)) {
+ struct submix_stream_out * const out = audio_stream_get_submix_stream_out(stream);
+#if ENABLE_RESAMPLING
+ // The sample rate of the stream can't be changed once it's set since this would change the
+ // output buffer size and hence break playback to the shared pipe.
+ if (rate != out->dev->config.output_sample_rate) {
+ ALOGE("out_set_sample_rate() resampling enabled can't change sample rate from "
+ "%u to %u", out->dev->config.output_sample_rate, rate);
+ return -ENOSYS;
+ }
+#endif // ENABLE_RESAMPLING
+ if (!sample_rate_supported(rate)) {
ALOGE("out_set_sample_rate(rate=%u) rate unsupported", rate);
return -ENOSYS;
}
- struct submix_stream_out *out = reinterpret_cast<struct submix_stream_out *>(stream);
- //ALOGV("out_set_sample_rate(rate=%u)", rate);
- out->dev->config.rate = rate;
+ SUBMIX_ALOGV("out_set_sample_rate(rate=%u)", rate);
+ out->dev->config.common.sample_rate = rate;
return 0;
}
static size_t out_get_buffer_size(const struct audio_stream *stream)
{
- const struct submix_stream_out *out =
- reinterpret_cast<const struct submix_stream_out *>(stream);
- const struct submix_config& config_out = out->dev->config;
- size_t buffer_size = config_out.period_size * popcount(config_out.channel_mask)
- * sizeof(int16_t); // only PCM 16bit
- //ALOGV("out_get_buffer_size() returns %u, period size=%u",
- // buffer_size, config_out.period_size);
- return buffer_size;
+ const struct submix_stream_out * const out = audio_stream_get_submix_stream_out(
+ const_cast<struct audio_stream *>(stream));
+ const struct submix_config * const config = &out->dev->config;
+ const size_t stream_frame_size =
+ audio_stream_out_frame_size((const struct audio_stream_out *)stream);
+ const size_t buffer_size_frames = calculate_stream_pipe_size_in_frames(
+ stream, config, config->buffer_period_size_frames, stream_frame_size);
+ const size_t buffer_size_bytes = buffer_size_frames * stream_frame_size;
+ SUBMIX_ALOGV("out_get_buffer_size() returns %zu bytes, %zu frames",
+ buffer_size_bytes, buffer_size_frames);
+ return buffer_size_bytes;
}
static audio_channel_mask_t out_get_channels(const struct audio_stream *stream)
{
- const struct submix_stream_out *out =
- reinterpret_cast<const struct submix_stream_out *>(stream);
- uint32_t channels = out->dev->config.channel_mask;
- //ALOGV("out_get_channels() returns %08x", channels);
- return channels;
+ const struct submix_stream_out * const out = audio_stream_get_submix_stream_out(
+ const_cast<struct audio_stream *>(stream));
+ uint32_t channel_mask = out->dev->config.output_channel_mask;
+ SUBMIX_ALOGV("out_get_channels() returns %08x", channel_mask);
+ return channel_mask;
}
static audio_format_t out_get_format(const struct audio_stream *stream)
{
- return AUDIO_FORMAT_PCM_16_BIT;
+ const struct submix_stream_out * const out = audio_stream_get_submix_stream_out(
+ const_cast<struct audio_stream *>(stream));
+ const audio_format_t format = out->dev->config.common.format;
+ SUBMIX_ALOGV("out_get_format() returns %x", format);
+ return format;
}
static int out_set_format(struct audio_stream *stream, audio_format_t format)
{
- if (format != AUDIO_FORMAT_PCM_16_BIT) {
+ const struct submix_stream_out * const out = audio_stream_get_submix_stream_out(stream);
+ if (format != out->dev->config.common.format) {
+ ALOGE("out_set_format(format=%x) format unsupported", format);
return -ENOSYS;
- } else {
- return 0;
}
+ SUBMIX_ALOGV("out_set_format(format=%x)", format);
+ return 0;
}
static int out_standby(struct audio_stream *stream)
{
+ struct submix_audio_device * const rsxadev = audio_stream_get_submix_stream_out(stream)->dev;
ALOGI("out_standby()");
- const struct submix_stream_out *out = reinterpret_cast<const struct submix_stream_out *>(stream);
+ pthread_mutex_lock(&rsxadev->lock);
- pthread_mutex_lock(&out->dev->lock);
+ rsxadev->output_standby = true;
- out->dev->output_standby = true;
-
- pthread_mutex_unlock(&out->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
return 0;
}
static int out_dump(const struct audio_stream *stream, int fd)
{
+ (void)stream;
+ (void)fd;
return 0;
}
@@ -178,94 +640,127 @@
{
int exiting = -1;
AudioParameter parms = AudioParameter(String8(kvpairs));
+ SUBMIX_ALOGV("out_set_parameters() kvpairs='%s'", kvpairs);
+
// FIXME this is using hard-coded strings but in the future, this functionality will be
// converted to use audio HAL extensions required to support tunneling
if ((parms.getInt(String8("exiting"), exiting) == NO_ERROR) && (exiting > 0)) {
- const struct submix_stream_out *out =
- reinterpret_cast<const struct submix_stream_out *>(stream);
-
- pthread_mutex_lock(&out->dev->lock);
-
+ struct submix_audio_device * const rsxadev =
+ audio_stream_get_submix_stream_out(stream)->dev;
+ pthread_mutex_lock(&rsxadev->lock);
{ // using the sink
- sp<MonoPipe> sink = out->dev->rsxSink.get();
- if (sink == 0) {
- pthread_mutex_unlock(&out->dev->lock);
+ sp<MonoPipe> sink = rsxadev->rsxSink;
+ if (sink == NULL) {
+ pthread_mutex_unlock(&rsxadev->lock);
return 0;
}
- ALOGI("shutdown");
+ ALOGI("out_set_parameters(): shutdown");
sink->shutdown(true);
} // done using the sink
-
- pthread_mutex_unlock(&out->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
}
-
return 0;
}
static char * out_get_parameters(const struct audio_stream *stream, const char *keys)
{
+ (void)stream;
+ (void)keys;
return strdup("");
}
static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
- const struct submix_stream_out *out =
- reinterpret_cast<const struct submix_stream_out *>(stream);
- const struct submix_config * config_out = &(out->dev->config);
- uint32_t latency = (MAX_PIPE_DEPTH_IN_FRAMES * 1000) / config_out->rate;
- ALOGV("out_get_latency() returns %u", latency);
- return latency;
+ const struct submix_stream_out * const out = audio_stream_out_get_submix_stream_out(
+ const_cast<struct audio_stream_out *>(stream));
+ const struct submix_config * const config = &out->dev->config;
+ const size_t stream_frame_size =
+ audio_stream_out_frame_size(stream);
+ const size_t buffer_size_frames = calculate_stream_pipe_size_in_frames(
+ &stream->common, config, config->buffer_size_frames, stream_frame_size);
+ const uint32_t sample_rate = out_get_sample_rate(&stream->common);
+ const uint32_t latency_ms = (buffer_size_frames * 1000) / sample_rate;
+ SUBMIX_ALOGV("out_get_latency() returns %u ms, size in frames %zu, sample rate %u",
+ latency_ms, buffer_size_frames, sample_rate);
+ return latency_ms;
}
static int out_set_volume(struct audio_stream_out *stream, float left,
float right)
{
+ (void)stream;
+ (void)left;
+ (void)right;
return -ENOSYS;
}
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
size_t bytes)
{
- //ALOGV("out_write(bytes=%d)", bytes);
+ SUBMIX_ALOGV("out_write(bytes=%zd)", bytes);
ssize_t written_frames = 0;
- struct submix_stream_out *out = reinterpret_cast<struct submix_stream_out *>(stream);
-
- const size_t frame_size = audio_stream_frame_size(&stream->common);
+ const size_t frame_size = audio_stream_out_frame_size(stream);
+ struct submix_stream_out * const out = audio_stream_out_get_submix_stream_out(stream);
+ struct submix_audio_device * const rsxadev = out->dev;
const size_t frames = bytes / frame_size;
- pthread_mutex_lock(&out->dev->lock);
+ pthread_mutex_lock(&rsxadev->lock);
- out->dev->output_standby = false;
+ rsxadev->output_standby = false;
- sp<MonoPipe> sink = out->dev->rsxSink.get();
- if (sink != 0) {
+ sp<MonoPipe> sink = rsxadev->rsxSink;
+ if (sink != NULL) {
if (sink->isShutdown()) {
sink.clear();
- pthread_mutex_unlock(&out->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
+ SUBMIX_ALOGV("out_write(): pipe shutdown, ignoring the write.");
// the pipe has already been shutdown, this buffer will be lost but we must
// simulate timing so we don't drain the output faster than realtime
usleep(frames * 1000000 / out_get_sample_rate(&stream->common));
return bytes;
}
} else {
- pthread_mutex_unlock(&out->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
ALOGE("out_write without a pipe!");
ALOG_ASSERT("out_write without a pipe!");
return 0;
}
- pthread_mutex_unlock(&out->dev->lock);
+ // If the write to the sink would block when no input stream is present, flush enough frames
+ // from the pipe to make space to write the most recent data.
+ {
+ const size_t availableToWrite = sink->availableToWrite();
+ sp<MonoPipeReader> source = rsxadev->rsxSource;
+ if (rsxadev->input == NULL && availableToWrite < frames) {
+ static uint8_t flush_buffer[64];
+ const size_t flushBufferSizeFrames = sizeof(flush_buffer) / frame_size;
+ size_t frames_to_flush_from_source = frames - availableToWrite;
+ SUBMIX_ALOGV("out_write(): flushing %d frames from the pipe to avoid blocking",
+ frames_to_flush_from_source);
+ while (frames_to_flush_from_source) {
+ const size_t flush_size = min(frames_to_flush_from_source, flushBufferSizeFrames);
+ frames_to_flush_from_source -= flush_size;
+ source->read(flush_buffer, flush_size, AudioBufferProvider::kInvalidPTS);
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&rsxadev->lock);
written_frames = sink->write(buffer, frames);
+#if LOG_STREAMS_TO_FILES
+ if (out->log_fd >= 0) write(out->log_fd, buffer, written_frames * frame_size);
+#endif // LOG_STREAMS_TO_FILES
+
if (written_frames < 0) {
if (written_frames == (ssize_t)NEGOTIATE) {
ALOGE("out_write() write to pipe returned NEGOTIATE");
- pthread_mutex_lock(&out->dev->lock);
+ pthread_mutex_lock(&rsxadev->lock);
sink.clear();
- pthread_mutex_unlock(&out->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
written_frames = 0;
return 0;
@@ -276,132 +771,195 @@
}
}
- pthread_mutex_lock(&out->dev->lock);
+ pthread_mutex_lock(&rsxadev->lock);
sink.clear();
- pthread_mutex_unlock(&out->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
if (written_frames < 0) {
ALOGE("out_write() failed writing to pipe with %zd", written_frames);
return 0;
- } else {
- ALOGV("out_write() wrote %zu bytes)", written_frames * frame_size);
- return written_frames * frame_size;
}
+ const ssize_t written_bytes = written_frames * frame_size;
+ SUBMIX_ALOGV("out_write() wrote %zd bytes %zd frames", written_bytes, written_frames);
+ return written_bytes;
}
static int out_get_render_position(const struct audio_stream_out *stream,
uint32_t *dsp_frames)
{
+ (void)stream;
+ (void)dsp_frames;
return -EINVAL;
}
static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
+ (void)stream;
+ (void)effect;
return 0;
}
static int out_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
+ (void)stream;
+ (void)effect;
return 0;
}
static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
int64_t *timestamp)
{
+ (void)stream;
+ (void)timestamp;
return -EINVAL;
}
/** audio_stream_in implementation **/
static uint32_t in_get_sample_rate(const struct audio_stream *stream)
{
- const struct submix_stream_in *in = reinterpret_cast<const struct submix_stream_in *>(stream);
- //ALOGV("in_get_sample_rate() returns %u", in->dev->config.rate);
- return in->dev->config.rate;
+ const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(
+ const_cast<struct audio_stream*>(stream));
+#if ENABLE_RESAMPLING
+ const uint32_t rate = in->dev->config.input_sample_rate;
+#else
+ const uint32_t rate = in->dev->config.common.sample_rate;
+#endif // ENABLE_RESAMPLING
+ SUBMIX_ALOGV("in_get_sample_rate() returns %u", rate);
+ return rate;
}
static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
{
- return -ENOSYS;
+ const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(stream);
+#if ENABLE_RESAMPLING
+ // The sample rate of the stream can't be changed once it's set since this would change the
+ // input buffer size and hence break recording from the shared pipe.
+ if (rate != in->dev->config.input_sample_rate) {
+ ALOGE("in_set_sample_rate() resampling enabled can't change sample rate from "
+ "%u to %u", in->dev->config.input_sample_rate, rate);
+ return -ENOSYS;
+ }
+#endif // ENABLE_RESAMPLING
+ if (!sample_rate_supported(rate)) {
+ ALOGE("in_set_sample_rate(rate=%u) rate unsupported", rate);
+ return -ENOSYS;
+ }
+ in->dev->config.common.sample_rate = rate;
+ SUBMIX_ALOGV("in_set_sample_rate() set %u", rate);
+ return 0;
}
static size_t in_get_buffer_size(const struct audio_stream *stream)
{
- const struct submix_stream_in *in = reinterpret_cast<const struct submix_stream_in *>(stream);
- ALOGV("in_get_buffer_size() returns %zu",
- in->dev->config.period_size * audio_stream_frame_size(stream));
- return in->dev->config.period_size * audio_stream_frame_size(stream);
+ const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(
+ const_cast<struct audio_stream*>(stream));
+ const struct submix_config * const config = &in->dev->config;
+ const size_t stream_frame_size =
+ audio_stream_in_frame_size((const struct audio_stream_in *)stream);
+ size_t buffer_size_frames = calculate_stream_pipe_size_in_frames(
+ stream, config, config->buffer_period_size_frames, stream_frame_size);
+#if ENABLE_RESAMPLING
+ // Scale the size of the buffer based upon the maximum number of frames that could be returned
+ // given the ratio of output to input sample rate.
+ buffer_size_frames = (size_t)(((float)buffer_size_frames *
+ (float)config->input_sample_rate) /
+ (float)config->output_sample_rate);
+#endif // ENABLE_RESAMPLING
+ const size_t buffer_size_bytes = buffer_size_frames * stream_frame_size;
+ SUBMIX_ALOGV("in_get_buffer_size() returns %zu bytes, %zu frames", buffer_size_bytes,
+ buffer_size_frames);
+ return buffer_size_bytes;
}
static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
{
- return AUDIO_CHANNEL_IN_STEREO;
+ const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(
+ const_cast<struct audio_stream*>(stream));
+ const audio_channel_mask_t channel_mask = in->dev->config.input_channel_mask;
+ SUBMIX_ALOGV("in_get_channels() returns %x", channel_mask);
+ return channel_mask;
}
static audio_format_t in_get_format(const struct audio_stream *stream)
{
- return AUDIO_FORMAT_PCM_16_BIT;
+ const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(
+ const_cast<struct audio_stream*>(stream));
+ const audio_format_t format = in->dev->config.common.format;
+ SUBMIX_ALOGV("in_get_format() returns %x", format);
+ return format;
}
static int in_set_format(struct audio_stream *stream, audio_format_t format)
{
- if (format != AUDIO_FORMAT_PCM_16_BIT) {
+ const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(stream);
+ if (format != in->dev->config.common.format) {
+ ALOGE("in_set_format(format=%x) format unsupported", format);
return -ENOSYS;
- } else {
- return 0;
}
+ SUBMIX_ALOGV("in_set_format(format=%x)", format);
+ return 0;
}
static int in_standby(struct audio_stream *stream)
{
+ struct submix_audio_device * const rsxadev = audio_stream_get_submix_stream_in(stream)->dev;
ALOGI("in_standby()");
- const struct submix_stream_in *in = reinterpret_cast<const struct submix_stream_in *>(stream);
- pthread_mutex_lock(&in->dev->lock);
+ pthread_mutex_lock(&rsxadev->lock);
- in->dev->input_standby = true;
+ rsxadev->input_standby = true;
- pthread_mutex_unlock(&in->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
return 0;
}
static int in_dump(const struct audio_stream *stream, int fd)
{
+ (void)stream;
+ (void)fd;
return 0;
}
static int in_set_parameters(struct audio_stream *stream, const char *kvpairs)
{
+ (void)stream;
+ (void)kvpairs;
return 0;
}
static char * in_get_parameters(const struct audio_stream *stream,
const char *keys)
{
+ (void)stream;
+ (void)keys;
return strdup("");
}
static int in_set_gain(struct audio_stream_in *stream, float gain)
{
+ (void)stream;
+ (void)gain;
return 0;
}
static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
size_t bytes)
{
- //ALOGV("in_read bytes=%u", bytes);
- ssize_t frames_read = -1977;
- struct submix_stream_in *in = reinterpret_cast<struct submix_stream_in *>(stream);
- const size_t frame_size = audio_stream_frame_size(&stream->common);
+ struct submix_stream_in * const in = audio_stream_in_get_submix_stream_in(stream);
+ struct submix_audio_device * const rsxadev = in->dev;
+ struct audio_config *format;
+ const size_t frame_size = audio_stream_in_frame_size(stream);
const size_t frames_to_read = bytes / frame_size;
- pthread_mutex_lock(&in->dev->lock);
+ SUBMIX_ALOGV("in_read bytes=%zu", bytes);
+ pthread_mutex_lock(&rsxadev->lock);
const bool output_standby_transition = (in->output_standby != in->dev->output_standby);
- in->output_standby = in->dev->output_standby;
+ in->output_standby = rsxadev->output_standby;
- if (in->dev->input_standby || output_standby_transition) {
- in->dev->input_standby = false;
+ if (rsxadev->input_standby || output_standby_transition) {
+ rsxadev->input_standby = false;
// keep track of when we exit input standby (== first read == start "real recording")
// or when we start recording silence, and reset projected time
int rc = clock_gettime(CLOCK_MONOTONIC, &in->record_start_time);
@@ -415,43 +973,156 @@
{
// about to read from audio source
- sp<MonoPipeReader> source = in->dev->rsxSource.get();
- if (source == 0) {
+ sp<MonoPipeReader> source = rsxadev->rsxSource;
+ if (source == NULL) {
ALOGE("no audio pipe yet we're trying to read!");
- pthread_mutex_unlock(&in->dev->lock);
- usleep((bytes / frame_size) * 1000000 / in_get_sample_rate(&stream->common));
+ pthread_mutex_unlock(&rsxadev->lock);
+ usleep(frames_to_read * 1000000 / in_get_sample_rate(&stream->common));
memset(buffer, 0, bytes);
return bytes;
}
- pthread_mutex_unlock(&in->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
// read the data from the pipe (it's non blocking)
int attempts = 0;
char* buff = (char*)buffer;
+#if ENABLE_CHANNEL_CONVERSION
+ // Determine whether channel conversion is required.
+ const uint32_t input_channels = audio_channel_count_from_in_mask(
+ rsxadev->config.input_channel_mask);
+ const uint32_t output_channels = audio_channel_count_from_out_mask(
+ rsxadev->config.output_channel_mask);
+ if (input_channels != output_channels) {
+ SUBMIX_ALOGV("in_read(): %d output channels will be converted to %d "
+ "input channels", output_channels, input_channels);
+ // Only support 16-bit PCM channel conversion from mono to stereo or stereo to mono.
+ ALOG_ASSERT(rsxadev->config.common.format == AUDIO_FORMAT_PCM_16_BIT);
+ ALOG_ASSERT((input_channels == 1 && output_channels == 2) ||
+ (input_channels == 2 && output_channels == 1));
+ }
+#endif // ENABLE_CHANNEL_CONVERSION
+
+#if ENABLE_RESAMPLING
+ const uint32_t input_sample_rate = in_get_sample_rate(&stream->common);
+ const uint32_t output_sample_rate = rsxadev->config.output_sample_rate;
+ const size_t resampler_buffer_size_frames =
+ sizeof(rsxadev->resampler_buffer) / sizeof(rsxadev->resampler_buffer[0]);
+ float resampler_ratio = 1.0f;
+ // Determine whether resampling is required.
+ if (input_sample_rate != output_sample_rate) {
+ resampler_ratio = (float)output_sample_rate / (float)input_sample_rate;
+ // Only support 16-bit PCM mono resampling.
+ // NOTE: Resampling is performed after the channel conversion step.
+ ALOG_ASSERT(rsxadev->config.common.format == AUDIO_FORMAT_PCM_16_BIT);
+ ALOG_ASSERT(audio_channel_count_from_in_mask(rsxadev->config.input_channel_mask) == 1);
+ }
+#endif // ENABLE_RESAMPLING
+
while ((remaining_frames > 0) && (attempts < MAX_READ_ATTEMPTS)) {
- attempts++;
- frames_read = source->read(buff, remaining_frames, AudioBufferProvider::kInvalidPTS);
+ ssize_t frames_read = -1977;
+ size_t read_frames = remaining_frames;
+#if ENABLE_RESAMPLING
+ char* const saved_buff = buff;
+ if (resampler_ratio != 1.0f) {
+ // Calculate the number of frames from the pipe that need to be read to generate
+ // the data for the input stream read.
+ const size_t frames_required_for_resampler = (size_t)(
+ (float)read_frames * (float)resampler_ratio);
+ read_frames = min(frames_required_for_resampler, resampler_buffer_size_frames);
+ // Read into the resampler buffer.
+ buff = (char*)rsxadev->resampler_buffer;
+ }
+#endif // ENABLE_RESAMPLING
+#if ENABLE_CHANNEL_CONVERSION
+ if (output_channels == 1 && input_channels == 2) {
+ // Need to read half the requested frames since the converted output
+ // data will take twice the space (mono->stereo).
+ read_frames /= 2;
+ }
+#endif // ENABLE_CHANNEL_CONVERSION
+
+ SUBMIX_ALOGV("in_read(): frames available to read %zd", source->availableToRead());
+
+ frames_read = source->read(buff, read_frames, AudioBufferProvider::kInvalidPTS);
+
+ SUBMIX_ALOGV("in_read(): frames read %zd", frames_read);
+
+#if ENABLE_CHANNEL_CONVERSION
+ // Perform in-place channel conversion.
+ // NOTE: In the following "input stream" refers to the data returned by this function
+ // and "output stream" refers to the data read from the pipe.
+ if (input_channels != output_channels && frames_read > 0) {
+ int16_t *data = (int16_t*)buff;
+ if (output_channels == 2 && input_channels == 1) {
+ // Offset into the output stream data in samples.
+ ssize_t output_stream_offset = 0;
+ for (ssize_t input_stream_frame = 0; input_stream_frame < frames_read;
+ input_stream_frame++, output_stream_offset += 2) {
+ // Average the content from both channels.
+ data[input_stream_frame] = ((int32_t)data[output_stream_offset] +
+ (int32_t)data[output_stream_offset + 1]) / 2;
+ }
+ } else if (output_channels == 1 && input_channels == 2) {
+ // Offset into the input stream data in samples.
+ ssize_t input_stream_offset = (frames_read - 1) * 2;
+ for (ssize_t output_stream_frame = frames_read - 1; output_stream_frame >= 0;
+ output_stream_frame--, input_stream_offset -= 2) {
+ const short sample = data[output_stream_frame];
+ data[input_stream_offset] = sample;
+ data[input_stream_offset + 1] = sample;
+ }
+ }
+ }
+#endif // ENABLE_CHANNEL_CONVERSION
+
+#if ENABLE_RESAMPLING
+ if (resampler_ratio != 1.0f) {
+ SUBMIX_ALOGV("in_read(): resampling %zd frames", frames_read);
+ const int16_t * const data = (int16_t*)buff;
+ int16_t * const resampled_buffer = (int16_t*)saved_buff;
+ // Resample with *no* filtering - if the data from the ouptut stream was really
+ // sampled at a different rate this will result in very nasty aliasing.
+ const float output_stream_frames = (float)frames_read;
+ size_t input_stream_frame = 0;
+ for (float output_stream_frame = 0.0f;
+ output_stream_frame < output_stream_frames &&
+ input_stream_frame < remaining_frames;
+ output_stream_frame += resampler_ratio, input_stream_frame++) {
+ resampled_buffer[input_stream_frame] = data[(size_t)output_stream_frame];
+ }
+ ALOG_ASSERT(input_stream_frame <= (ssize_t)resampler_buffer_size_frames);
+ SUBMIX_ALOGV("in_read(): resampler produced %zd frames", input_stream_frame);
+ frames_read = input_stream_frame;
+ buff = saved_buff;
+ }
+#endif // ENABLE_RESAMPLING
+
if (frames_read > 0) {
+#if LOG_STREAMS_TO_FILES
+ if (in->log_fd >= 0) write(in->log_fd, buff, frames_read * frame_size);
+#endif // LOG_STREAMS_TO_FILES
+
remaining_frames -= frames_read;
buff += frames_read * frame_size;
- //ALOGV(" in_read (att=%d) got %ld frames, remaining=%u",
- // attempts, frames_read, remaining_frames);
+ SUBMIX_ALOGV(" in_read (att=%d) got %zd frames, remaining=%zu",
+ attempts, frames_read, remaining_frames);
} else {
- //ALOGE(" in_read read returned %ld", frames_read);
+ attempts++;
+ SUBMIX_ALOGE(" in_read read returned %zd", frames_read);
usleep(READ_ATTEMPT_SLEEP_MS * 1000);
}
}
// done using the source
- pthread_mutex_lock(&in->dev->lock);
+ pthread_mutex_lock(&rsxadev->lock);
source.clear();
- pthread_mutex_unlock(&in->dev->lock);
+ pthread_mutex_unlock(&rsxadev->lock);
}
if (remaining_frames > 0) {
- ALOGV(" remaining_frames = %zu", remaining_frames);
- memset(((char*)buffer)+ bytes - (remaining_frames * frame_size), 0,
- remaining_frames * frame_size);
+ const size_t remaining_bytes = remaining_frames * frame_size;
+ SUBMIX_ALOGV(" clearing remaining_frames = %zu", remaining_frames);
+ memset(((char*)buffer)+ bytes - remaining_bytes, 0, remaining_bytes);
}
// compute how much we need to sleep after reading the data by comparing the wall clock with
@@ -469,17 +1140,17 @@
record_duration.tv_nsec += 1000000000;
}
- // read_counter_frames contains the number of frames that have been read since the beginning
- // of recording (including this call): it's converted to usec and compared to how long we've
- // been recording for, which gives us how long we must wait to sync the projected recording
- // time, and the observed recording time
+ // read_counter_frames contains the number of frames that have been read since the
+ // beginning of recording (including this call): it's converted to usec and compared to
+ // how long we've been recording for, which gives us how long we must wait to sync the
+ // projected recording time, and the observed recording time.
long projected_vs_observed_offset_us =
((int64_t)(in->read_counter_frames
- (record_duration.tv_sec*sample_rate)))
* 1000000 / sample_rate
- (record_duration.tv_nsec / 1000);
- ALOGV(" record duration %5lds %3ldms, will wait: %7ldus",
+ SUBMIX_ALOGV(" record duration %5lds %3ldms, will wait: %7ldus",
record_duration.tv_sec, record_duration.tv_nsec/1000000,
projected_vs_observed_offset_us);
if (projected_vs_observed_offset_us > 0) {
@@ -487,24 +1158,28 @@
}
}
-
- ALOGV("in_read returns %zu", bytes);
+ SUBMIX_ALOGV("in_read returns %zu", bytes);
return bytes;
}
static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream)
{
+ (void)stream;
return 0;
}
static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
+ (void)stream;
+ (void)effect;
return 0;
}
static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
+ (void)stream;
+ (void)effect;
return 0;
}
@@ -513,21 +1188,30 @@
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
- struct audio_stream_out **stream_out)
+ struct audio_stream_out **stream_out,
+ const char *address __unused)
{
+ struct submix_audio_device * const rsxadev = audio_hw_device_get_submix_audio_device(dev);
ALOGV("adev_open_output_stream()");
- struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev;
struct submix_stream_out *out;
- int ret;
+ bool force_pipe_creation = false;
+ (void)handle;
+ (void)devices;
+ (void)flags;
- out = (struct submix_stream_out *)calloc(1, sizeof(struct submix_stream_out));
- if (!out) {
- ret = -ENOMEM;
- goto err_open;
+ *stream_out = NULL;
+
+ // Make sure it's possible to open the device given the current audio config.
+ submix_sanitize_config(config, false);
+ if (!submix_open_validate(rsxadev, &rsxadev->lock, config, false)) {
+ ALOGE("adev_open_output_stream(): Unable to open output stream.");
+ return -EINVAL;
}
- pthread_mutex_lock(&rsxadev->lock);
+ out = (struct submix_stream_out *)calloc(1, sizeof(struct submix_stream_out));
+ if (!out) return -ENOMEM;
+ // Initialize the function pointer tables (v-tables).
out->stream.common.get_sample_rate = out_get_sample_rate;
out->stream.common.set_sample_rate = out_set_sample_rate;
out->stream.common.get_buffer_size = out_get_buffer_size;
@@ -546,216 +1230,248 @@
out->stream.get_render_position = out_get_render_position;
out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
- config->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
- rsxadev->config.channel_mask = config->channel_mask;
+#if ENABLE_RESAMPLING
+ // Recreate the pipe with the correct sample rate so that MonoPipe.write() rate limits
+ // writes correctly.
+ force_pipe_creation = rsxadev->config.common.sample_rate != config->sample_rate;
+#endif // ENABLE_RESAMPLING
- if ((config->sample_rate != 48000) && (config->sample_rate != 44100)) {
- config->sample_rate = DEFAULT_RATE_HZ;
+ // If the sink has been shutdown or pipe recreation is forced (see above), delete the pipe so
+ // that it's recreated.
+ pthread_mutex_lock(&rsxadev->lock);
+ if ((rsxadev->rsxSink != NULL && rsxadev->rsxSink->isShutdown()) || force_pipe_creation) {
+ submix_audio_device_release_pipe(rsxadev);
}
- rsxadev->config.rate = config->sample_rate;
-
- config->format = AUDIO_FORMAT_PCM_16_BIT;
- rsxadev->config.format = config->format;
-
- rsxadev->config.period_size = 1024;
- rsxadev->config.period_count = 4;
- out->dev = rsxadev;
-
- *stream_out = &out->stream;
-
- // initialize pipe
- {
- ALOGV(" initializing pipe");
- const NBAIO_Format format = Format_from_SR_C(config->sample_rate, 2);
- const NBAIO_Format offers[1] = {format};
- size_t numCounterOffers = 0;
- // creating a MonoPipe with optional blocking set to true.
- MonoPipe* sink = new MonoPipe(MAX_PIPE_DEPTH_IN_FRAMES, format, true/*writeCanBlock*/);
- ssize_t index = sink->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- MonoPipeReader* source = new MonoPipeReader(sink);
- numCounterOffers = 0;
- index = source->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- rsxadev->rsxSink = sink;
- rsxadev->rsxSource = source;
- }
-
pthread_mutex_unlock(&rsxadev->lock);
- return 0;
+ // Store a pointer to the device from the output stream.
+ out->dev = rsxadev;
+ // Initialize the pipe.
+ ALOGV("adev_open_output_stream(): Initializing pipe");
+ submix_audio_device_create_pipe(rsxadev, config, DEFAULT_PIPE_SIZE_IN_FRAMES,
+ DEFAULT_PIPE_PERIOD_COUNT, NULL, out);
+#if LOG_STREAMS_TO_FILES
+ out->log_fd = open(LOG_STREAM_OUT_FILENAME, O_CREAT | O_TRUNC | O_WRONLY,
+ LOG_STREAM_FILE_PERMISSIONS);
+ ALOGE_IF(out->log_fd < 0, "adev_open_output_stream(): log file open failed %s",
+ strerror(errno));
+ ALOGV("adev_open_output_stream(): log_fd = %d", out->log_fd);
+#endif // LOG_STREAMS_TO_FILES
+ // Return the output stream.
+ *stream_out = &out->stream;
-err_open:
- *stream_out = NULL;
- return ret;
+ return 0;
}
static void adev_close_output_stream(struct audio_hw_device *dev,
struct audio_stream_out *stream)
{
+ struct submix_stream_out * const out = audio_stream_out_get_submix_stream_out(stream);
ALOGV("adev_close_output_stream()");
- struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev;
-
- pthread_mutex_lock(&rsxadev->lock);
-
- rsxadev->rsxSink.clear();
- rsxadev->rsxSource.clear();
- free(stream);
-
- pthread_mutex_unlock(&rsxadev->lock);
+ submix_audio_device_destroy_pipe(audio_hw_device_get_submix_audio_device(dev), NULL, out);
+#if LOG_STREAMS_TO_FILES
+ if (out->log_fd >= 0) close(out->log_fd);
+#endif // LOG_STREAMS_TO_FILES
+ free(out);
}
static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs)
{
+ (void)dev;
+ (void)kvpairs;
return -ENOSYS;
}
static char * adev_get_parameters(const struct audio_hw_device *dev,
const char *keys)
{
+ (void)dev;
+ (void)keys;
return strdup("");;
}
static int adev_init_check(const struct audio_hw_device *dev)
{
ALOGI("adev_init_check()");
+ (void)dev;
return 0;
}
static int adev_set_voice_volume(struct audio_hw_device *dev, float volume)
{
+ (void)dev;
+ (void)volume;
return -ENOSYS;
}
static int adev_set_master_volume(struct audio_hw_device *dev, float volume)
{
+ (void)dev;
+ (void)volume;
return -ENOSYS;
}
static int adev_get_master_volume(struct audio_hw_device *dev, float *volume)
{
+ (void)dev;
+ (void)volume;
return -ENOSYS;
}
static int adev_set_master_mute(struct audio_hw_device *dev, bool muted)
{
+ (void)dev;
+ (void)muted;
return -ENOSYS;
}
static int adev_get_master_mute(struct audio_hw_device *dev, bool *muted)
{
+ (void)dev;
+ (void)muted;
return -ENOSYS;
}
static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode)
{
+ (void)dev;
+ (void)mode;
return 0;
}
static int adev_set_mic_mute(struct audio_hw_device *dev, bool state)
{
+ (void)dev;
+ (void)state;
return -ENOSYS;
}
static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state)
{
+ (void)dev;
+ (void)state;
return -ENOSYS;
}
static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
const struct audio_config *config)
{
- //### TODO correlate this with pipe parameters
- return 4096;
+ if (audio_is_linear_pcm(config->format)) {
+ const size_t buffer_period_size_frames =
+ audio_hw_device_get_submix_audio_device(const_cast<struct audio_hw_device*>(dev))->
+ config.buffer_period_size_frames;
+ const size_t frame_size_in_bytes = audio_channel_count_from_in_mask(config->channel_mask) *
+ audio_bytes_per_sample(config->format);
+ const size_t buffer_size = buffer_period_size_frames * frame_size_in_bytes;
+ SUBMIX_ALOGV("adev_get_input_buffer_size() returns %zu bytes, %zu frames",
+ buffer_size, buffer_period_size_frames);
+ return buffer_size;
+ }
+ return 0;
}
static int adev_open_input_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
struct audio_config *config,
- struct audio_stream_in **stream_in)
+ struct audio_stream_in **stream_in,
+ audio_input_flags_t flags __unused,
+ const char *address __unused,
+ audio_source_t source __unused)
{
- ALOGI("adev_open_input_stream()");
-
- struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev;
+ struct submix_audio_device *rsxadev = audio_hw_device_get_submix_audio_device(dev);
struct submix_stream_in *in;
- int ret;
+ ALOGI("adev_open_input_stream()");
+ (void)handle;
+ (void)devices;
- in = (struct submix_stream_in *)calloc(1, sizeof(struct submix_stream_in));
- if (!in) {
- ret = -ENOMEM;
- goto err_open;
+ *stream_in = NULL;
+
+ // Make sure it's possible to open the device given the current audio config.
+ submix_sanitize_config(config, true);
+ if (!submix_open_validate(rsxadev, &rsxadev->lock, config, true)) {
+ ALOGE("adev_open_input_stream(): Unable to open input stream.");
+ return -EINVAL;
}
+#if ENABLE_LEGACY_INPUT_OPEN
pthread_mutex_lock(&rsxadev->lock);
-
- in->stream.common.get_sample_rate = in_get_sample_rate;
- in->stream.common.set_sample_rate = in_set_sample_rate;
- in->stream.common.get_buffer_size = in_get_buffer_size;
- in->stream.common.get_channels = in_get_channels;
- in->stream.common.get_format = in_get_format;
- in->stream.common.set_format = in_set_format;
- in->stream.common.standby = in_standby;
- in->stream.common.dump = in_dump;
- in->stream.common.set_parameters = in_set_parameters;
- in->stream.common.get_parameters = in_get_parameters;
- in->stream.common.add_audio_effect = in_add_audio_effect;
- in->stream.common.remove_audio_effect = in_remove_audio_effect;
- in->stream.set_gain = in_set_gain;
- in->stream.read = in_read;
- in->stream.get_input_frames_lost = in_get_input_frames_lost;
-
- config->channel_mask = AUDIO_CHANNEL_IN_STEREO;
- rsxadev->config.channel_mask = config->channel_mask;
-
- if ((config->sample_rate != 48000) && (config->sample_rate != 44100)) {
- config->sample_rate = DEFAULT_RATE_HZ;
+ in = rsxadev->input;
+ if (in) {
+ in->ref_count++;
+ sp<MonoPipe> sink = rsxadev->rsxSink;
+ ALOG_ASSERT(sink != NULL);
+ // If the sink has been shutdown, delete the pipe.
+ if (sink->isShutdown()) submix_audio_device_release_pipe(rsxadev);
}
- rsxadev->config.rate = config->sample_rate;
+ pthread_mutex_unlock(&rsxadev->lock);
+#else
+ in = NULL;
+#endif // ENABLE_LEGACY_INPUT_OPEN
- config->format = AUDIO_FORMAT_PCM_16_BIT;
- rsxadev->config.format = config->format;
+ if (!in) {
+ in = (struct submix_stream_in *)calloc(1, sizeof(struct submix_stream_in));
+ if (!in) return -ENOMEM;
+ in->ref_count = 1;
- rsxadev->config.period_size = 1024;
- rsxadev->config.period_count = 4;
+ // Initialize the function pointer tables (v-tables).
+ in->stream.common.get_sample_rate = in_get_sample_rate;
+ in->stream.common.set_sample_rate = in_set_sample_rate;
+ in->stream.common.get_buffer_size = in_get_buffer_size;
+ in->stream.common.get_channels = in_get_channels;
+ in->stream.common.get_format = in_get_format;
+ in->stream.common.set_format = in_set_format;
+ in->stream.common.standby = in_standby;
+ in->stream.common.dump = in_dump;
+ in->stream.common.set_parameters = in_set_parameters;
+ in->stream.common.get_parameters = in_get_parameters;
+ in->stream.common.add_audio_effect = in_add_audio_effect;
+ in->stream.common.remove_audio_effect = in_remove_audio_effect;
+ in->stream.set_gain = in_set_gain;
+ in->stream.read = in_read;
+ in->stream.get_input_frames_lost = in_get_input_frames_lost;
+ }
- *stream_in = &in->stream;
-
- in->dev = rsxadev;
-
+ // Initialize the input stream.
in->read_counter_frames = 0;
in->output_standby = rsxadev->output_standby;
-
- pthread_mutex_unlock(&rsxadev->lock);
+ in->dev = rsxadev;
+ // Initialize the pipe.
+ submix_audio_device_create_pipe(rsxadev, config, DEFAULT_PIPE_SIZE_IN_FRAMES,
+ DEFAULT_PIPE_PERIOD_COUNT, in, NULL);
+#if LOG_STREAMS_TO_FILES
+ in->log_fd = open(LOG_STREAM_IN_FILENAME, O_CREAT | O_TRUNC | O_WRONLY,
+ LOG_STREAM_FILE_PERMISSIONS);
+ ALOGE_IF(in->log_fd < 0, "adev_open_input_stream(): log file open failed %s",
+ strerror(errno));
+ ALOGV("adev_open_input_stream(): log_fd = %d", in->log_fd);
+#endif // LOG_STREAMS_TO_FILES
+ // Return the input stream.
+ *stream_in = &in->stream;
return 0;
-
-err_open:
- *stream_in = NULL;
- return ret;
}
static void adev_close_input_stream(struct audio_hw_device *dev,
- struct audio_stream_in *stream)
+ struct audio_stream_in *stream)
{
+ struct submix_stream_in * const in = audio_stream_in_get_submix_stream_in(stream);
ALOGV("adev_close_input_stream()");
- struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev;
-
- pthread_mutex_lock(&rsxadev->lock);
-
- MonoPipe* sink = rsxadev->rsxSink.get();
- if (sink != NULL) {
- ALOGI("shutdown");
- sink->shutdown(true);
- }
-
- free(stream);
-
- pthread_mutex_unlock(&rsxadev->lock);
+ submix_audio_device_destroy_pipe(audio_hw_device_get_submix_audio_device(dev), in, NULL);
+#if LOG_STREAMS_TO_FILES
+ if (in->log_fd >= 0) close(in->log_fd);
+#endif // LOG_STREAMS_TO_FILES
+#if ENABLE_LEGACY_INPUT_OPEN
+ if (in->ref_count == 0) free(in);
+#else
+ free(in);
+#endif // ENABLE_LEGACY_INPUT_OPEN
}
static int adev_dump(const audio_hw_device_t *device, int fd)
{
+ (void)device;
+ (void)fd;
return 0;
}
diff --git a/modules/camera/Android.mk b/modules/camera/Android.mk
index fbe44c5..ae68ed5 100644
--- a/modules/camera/Android.mk
+++ b/modules/camera/Android.mk
@@ -26,14 +26,17 @@
LOCAL_SRC_FILES := \
CameraHAL.cpp \
Camera.cpp \
+ ExampleCamera.cpp \
Metadata.cpp \
Stream.cpp \
+ VendorTags.cpp \
LOCAL_SHARED_LIBRARIES := \
libcamera_metadata \
libcutils \
liblog \
libsync \
+ libutils \
LOCAL_CFLAGS += -Wall -Wextra -fvisibility=hidden
diff --git a/modules/camera/Camera.cpp b/modules/camera/Camera.cpp
index 973380e..de3ae78 100644
--- a/modules/camera/Camera.cpp
+++ b/modules/camera/Camera.cpp
@@ -15,11 +15,12 @@
*/
#include <cstdlib>
-#include <pthread.h>
+#include <stdio.h>
#include <hardware/camera3.h>
#include <sync/sync.h>
#include <system/camera_metadata.h>
#include <system/graphics.h>
+#include <utils/Mutex.h>
#include "CameraHAL.h"
#include "Metadata.h"
#include "Stream.h"
@@ -29,15 +30,12 @@
#include <cutils/log.h>
#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
-#include <cutils/trace.h>
-#include "ScopedTrace.h"
+#include <utils/Trace.h>
#include "Camera.h"
#define CAMERA_SYNC_TIMEOUT 5000 // in msecs
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-
namespace default_camera_hal {
extern "C" {
@@ -59,9 +57,7 @@
mNumStreams(0),
mSettings(NULL)
{
- pthread_mutex_init(&mMutex, NULL);
- pthread_mutex_init(&mStaticInfoMutex, NULL);
-
+ memset(&mTemplates, 0, sizeof(mTemplates));
memset(&mDevice, 0, sizeof(mDevice));
mDevice.common.tag = HARDWARE_DEVICE_TAG;
mDevice.common.version = CAMERA_DEVICE_API_VERSION_3_0;
@@ -72,17 +68,18 @@
Camera::~Camera()
{
- pthread_mutex_destroy(&mMutex);
- pthread_mutex_destroy(&mStaticInfoMutex);
+ if (mStaticInfo != NULL) {
+ free_camera_metadata(mStaticInfo);
+ }
}
int Camera::open(const hw_module_t *module, hw_device_t **device)
{
ALOGI("%s:%d: Opening camera device", __func__, mId);
- CAMTRACE_CALL();
- pthread_mutex_lock(&mMutex);
+ ATRACE_CALL();
+ android::Mutex::Autolock al(mDeviceLock);
+
if (mBusy) {
- pthread_mutex_unlock(&mMutex);
ALOGE("%s:%d: Error! Camera device already opened", __func__, mId);
return -EBUSY;
}
@@ -91,217 +88,62 @@
mBusy = true;
mDevice.common.module = const_cast<hw_module_t*>(module);
*device = &mDevice.common;
-
- pthread_mutex_unlock(&mMutex);
return 0;
}
int Camera::getInfo(struct camera_info *info)
{
+ android::Mutex::Autolock al(mStaticInfoLock);
+
info->facing = CAMERA_FACING_FRONT;
info->orientation = 0;
info->device_version = mDevice.common.version;
-
- pthread_mutex_lock(&mStaticInfoMutex);
if (mStaticInfo == NULL) {
mStaticInfo = initStaticInfo();
}
- pthread_mutex_unlock(&mStaticInfoMutex);
-
info->static_camera_characteristics = mStaticInfo;
-
return 0;
}
int Camera::close()
{
ALOGI("%s:%d: Closing camera device", __func__, mId);
- CAMTRACE_CALL();
- pthread_mutex_lock(&mMutex);
+ ATRACE_CALL();
+ android::Mutex::Autolock al(mDeviceLock);
+
if (!mBusy) {
- pthread_mutex_unlock(&mMutex);
ALOGE("%s:%d: Error! Camera device not open", __func__, mId);
return -EINVAL;
}
// TODO: close camera dev nodes, etc
mBusy = false;
-
- pthread_mutex_unlock(&mMutex);
return 0;
}
int Camera::initialize(const camera3_callback_ops_t *callback_ops)
{
+ int res;
+
ALOGV("%s:%d: callback_ops=%p", __func__, mId, callback_ops);
mCallbackOps = callback_ops;
- // Create standard settings templates
- // 0 is invalid as template
- mTemplates[0] = NULL;
- // CAMERA3_TEMPLATE_PREVIEW = 1
- mTemplates[1] = new Metadata(ANDROID_CONTROL_MODE_OFF,
- ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW);
- // CAMERA3_TEMPLATE_STILL_CAPTURE = 2
- mTemplates[2] = new Metadata(ANDROID_CONTROL_MODE_OFF,
- ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
- // CAMERA3_TEMPLATE_VIDEO_RECORD = 3
- mTemplates[3] = new Metadata(ANDROID_CONTROL_MODE_OFF,
- ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD);
- // CAMERA3_TEMPLATE_VIDEO_SNAPSHOT = 4
- mTemplates[4] = new Metadata(ANDROID_CONTROL_MODE_OFF,
- ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT);
- // CAMERA3_TEMPLATE_STILL_ZERO_SHUTTER_LAG = 5
- mTemplates[5] = new Metadata(ANDROID_CONTROL_MODE_OFF,
- ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG);
- // Pre-generate metadata structures
- for (int i = 1; i < CAMERA3_TEMPLATE_COUNT; i++) {
- mTemplates[i]->generate();
+ // per-device specific initialization
+ res = initDevice();
+ if (res != 0) {
+ ALOGE("%s:%d: Failed to initialize device!", __func__, mId);
+ return res;
}
- // TODO: create vendor templates
return 0;
}
-camera_metadata_t *Camera::initStaticInfo()
-{
- /*
- * Setup static camera info. This will have to customized per camera
- * device.
- */
- Metadata m;
-
- /* android.control */
- int32_t android_control_ae_available_target_fps_ranges[] = {30, 30};
- m.addInt32(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
- ARRAY_SIZE(android_control_ae_available_target_fps_ranges),
- android_control_ae_available_target_fps_ranges);
-
- int32_t android_control_ae_compensation_range[] = {-4, 4};
- m.addInt32(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
- ARRAY_SIZE(android_control_ae_compensation_range),
- android_control_ae_compensation_range);
-
- camera_metadata_rational_t android_control_ae_compensation_step[] = {{2,1}};
- m.addRational(ANDROID_CONTROL_AE_COMPENSATION_STEP,
- ARRAY_SIZE(android_control_ae_compensation_step),
- android_control_ae_compensation_step);
-
- int32_t android_control_max_regions[] = {1};
- m.addInt32(ANDROID_CONTROL_MAX_REGIONS,
- ARRAY_SIZE(android_control_max_regions),
- android_control_max_regions);
-
- /* android.jpeg */
- int32_t android_jpeg_available_thumbnail_sizes[] = {0, 0, 128, 96};
- m.addInt32(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
- ARRAY_SIZE(android_jpeg_available_thumbnail_sizes),
- android_jpeg_available_thumbnail_sizes);
-
- /* android.lens */
- float android_lens_info_available_focal_lengths[] = {1.0};
- m.addFloat(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
- ARRAY_SIZE(android_lens_info_available_focal_lengths),
- android_lens_info_available_focal_lengths);
-
- /* android.request */
- int32_t android_request_max_num_output_streams[] = {0, 3, 1};
- m.addInt32(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
- ARRAY_SIZE(android_request_max_num_output_streams),
- android_request_max_num_output_streams);
-
- /* android.scaler */
- int32_t android_scaler_available_formats[] = {
- HAL_PIXEL_FORMAT_RAW_SENSOR,
- HAL_PIXEL_FORMAT_BLOB,
- HAL_PIXEL_FORMAT_RGBA_8888,
- HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
- // These are handled by YCbCr_420_888
- // HAL_PIXEL_FORMAT_YV12,
- // HAL_PIXEL_FORMAT_YCrCb_420_SP,
- HAL_PIXEL_FORMAT_YCbCr_420_888};
- m.addInt32(ANDROID_SCALER_AVAILABLE_FORMATS,
- ARRAY_SIZE(android_scaler_available_formats),
- android_scaler_available_formats);
-
- int64_t android_scaler_available_jpeg_min_durations[] = {1};
- m.addInt64(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
- ARRAY_SIZE(android_scaler_available_jpeg_min_durations),
- android_scaler_available_jpeg_min_durations);
-
- int32_t android_scaler_available_jpeg_sizes[] = {640, 480};
- m.addInt32(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
- ARRAY_SIZE(android_scaler_available_jpeg_sizes),
- android_scaler_available_jpeg_sizes);
-
- float android_scaler_available_max_digital_zoom[] = {1};
- m.addFloat(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
- ARRAY_SIZE(android_scaler_available_max_digital_zoom),
- android_scaler_available_max_digital_zoom);
-
- int64_t android_scaler_available_processed_min_durations[] = {1};
- m.addInt64(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
- ARRAY_SIZE(android_scaler_available_processed_min_durations),
- android_scaler_available_processed_min_durations);
-
- int32_t android_scaler_available_processed_sizes[] = {640, 480};
- m.addInt32(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
- ARRAY_SIZE(android_scaler_available_processed_sizes),
- android_scaler_available_processed_sizes);
-
- int64_t android_scaler_available_raw_min_durations[] = {1};
- m.addInt64(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
- ARRAY_SIZE(android_scaler_available_raw_min_durations),
- android_scaler_available_raw_min_durations);
-
- int32_t android_scaler_available_raw_sizes[] = {640, 480};
- m.addInt32(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
- ARRAY_SIZE(android_scaler_available_raw_sizes),
- android_scaler_available_raw_sizes);
-
- /* android.sensor */
-
- int32_t android_sensor_info_active_array_size[] = {0, 0, 640, 480};
- m.addInt32(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
- ARRAY_SIZE(android_sensor_info_active_array_size),
- android_sensor_info_active_array_size);
-
- int32_t android_sensor_info_sensitivity_range[] =
- {100, 1600};
- m.addInt32(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
- ARRAY_SIZE(android_sensor_info_sensitivity_range),
- android_sensor_info_sensitivity_range);
-
- int64_t android_sensor_info_max_frame_duration[] = {30000000000};
- m.addInt64(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
- ARRAY_SIZE(android_sensor_info_max_frame_duration),
- android_sensor_info_max_frame_duration);
-
- float android_sensor_info_physical_size[] = {3.2, 2.4};
- m.addFloat(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
- ARRAY_SIZE(android_sensor_info_physical_size),
- android_sensor_info_physical_size);
-
- int32_t android_sensor_info_pixel_array_size[] = {640, 480};
- m.addInt32(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
- ARRAY_SIZE(android_sensor_info_pixel_array_size),
- android_sensor_info_pixel_array_size);
-
- int32_t android_sensor_orientation[] = {0};
- m.addInt32(ANDROID_SENSOR_ORIENTATION,
- ARRAY_SIZE(android_sensor_orientation),
- android_sensor_orientation);
-
- /* End of static camera characteristics */
-
- return clone_camera_metadata(m.generate());
-}
-
int Camera::configureStreams(camera3_stream_configuration_t *stream_config)
{
camera3_stream_t *astream;
Stream **newStreams = NULL;
- CAMTRACE_CALL();
ALOGV("%s:%d: stream_config=%p", __func__, mId, stream_config);
+ ATRACE_CALL();
+ android::Mutex::Autolock al(mDeviceLock);
if (stream_config == NULL) {
ALOGE("%s:%d: NULL stream configuration array", __func__, mId);
@@ -317,8 +159,6 @@
ALOGV("%s:%d: Number of Streams: %d", __func__, mId,
stream_config->num_streams);
- pthread_mutex_lock(&mMutex);
-
// Mark all current streams unused for now
for (int i = 0; i < mNumStreams; i++)
mStreams[i]->mReuse = false;
@@ -356,14 +196,11 @@
// Clear out last seen settings metadata
setSettings(NULL);
-
- pthread_mutex_unlock(&mMutex);
return 0;
err_out:
// Clean up temporary streams, preserve existing mStreams/mNumStreams
destroyStreams(newStreams, stream_config->num_streams);
- pthread_mutex_unlock(&mMutex);
return -EINVAL;
}
@@ -469,15 +306,20 @@
return stream->registerBuffers(buf_set);
}
+bool Camera::isValidTemplateType(int type)
+{
+ return type < 1 || type >= CAMERA3_TEMPLATE_COUNT;
+}
+
const camera_metadata_t* Camera::constructDefaultRequestSettings(int type)
{
ALOGV("%s:%d: type=%d", __func__, mId, type);
- if (type < 1 || type >= CAMERA3_TEMPLATE_COUNT) {
+ if (!isValidTemplateType(type)) {
ALOGE("%s:%d: Invalid template request type: %d", __func__, mId, type);
return NULL;
}
- return mTemplates[type]->generate();
+ return mTemplates[type];
}
int Camera::processCaptureRequest(camera3_capture_request_t *request)
@@ -485,7 +327,7 @@
camera3_capture_result result;
ALOGV("%s:%d: request=%p", __func__, mId, request);
- CAMTRACE_CALL();
+ ATRACE_CALL();
if (request == NULL) {
ALOGE("%s:%d: NULL request recieved", __func__, mId);
@@ -565,12 +407,6 @@
mSettings = clone_camera_metadata(new_settings);
}
-bool Camera::isValidCaptureSettings(const camera_metadata_t* /*settings*/)
-{
- // TODO: reject settings that cannot be captured
- return true;
-}
-
bool Camera::isValidReprocessSettings(const camera_metadata_t* /*settings*/)
{
// TODO: reject settings that cannot be reprocessed
@@ -631,16 +467,65 @@
mCallbackOps->notify(mCallbackOps, &m);
}
-void Camera::getMetadataVendorTagOps(vendor_tag_query_ops_t *ops)
-{
- ALOGV("%s:%d: ops=%p", __func__, mId, ops);
- // TODO: return vendor tag ops
-}
-
void Camera::dump(int fd)
{
ALOGV("%s:%d: Dumping to fd %d", __func__, mId, fd);
- // TODO: dprintf all relevant state to fd
+ ATRACE_CALL();
+ android::Mutex::Autolock al(mDeviceLock);
+
+ dprintf(fd, "Camera ID: %d (Busy: %d)\n", mId, mBusy);
+
+ // TODO: dump all settings
+ dprintf(fd, "Most Recent Settings: (%p)\n", mSettings);
+
+ dprintf(fd, "Number of streams: %d\n", mNumStreams);
+ for (int i = 0; i < mNumStreams; i++) {
+ dprintf(fd, "Stream %d/%d:\n", i, mNumStreams);
+ mStreams[i]->dump(fd);
+ }
+}
+
+const char* Camera::templateToString(int type)
+{
+ switch (type) {
+ case CAMERA3_TEMPLATE_PREVIEW:
+ return "CAMERA3_TEMPLATE_PREVIEW";
+ case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ return "CAMERA3_TEMPLATE_STILL_CAPTURE";
+ case CAMERA3_TEMPLATE_VIDEO_RECORD:
+ return "CAMERA3_TEMPLATE_VIDEO_RECORD";
+ case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+ return "CAMERA3_TEMPLATE_VIDEO_SNAPSHOT";
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+ return "CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG";
+ }
+ // TODO: support vendor templates
+ return "Invalid template type!";
+}
+
+int Camera::setTemplate(int type, camera_metadata_t *settings)
+{
+ android::Mutex::Autolock al(mDeviceLock);
+
+ if (!isValidTemplateType(type)) {
+ ALOGE("%s:%d: Invalid template request type: %d", __func__, mId, type);
+ return -EINVAL;
+ }
+
+ if (mTemplates[type] != NULL) {
+ ALOGE("%s:%d: Setting already constructed template type %s(%d)",
+ __func__, mId, templateToString(type), type);
+ return -EINVAL;
+ }
+
+ // Make a durable copy of the underlying metadata
+ mTemplates[type] = clone_camera_metadata(settings);
+ if (mTemplates[type] == NULL) {
+ ALOGE("%s:%d: Failed to clone metadata %p for template type %s(%d)",
+ __func__, mId, settings, templateToString(type), type);
+ return -EINVAL;
+ }
+ return 0;
}
extern "C" {
@@ -680,28 +565,30 @@
return camdev_to_camera(dev)->processCaptureRequest(request);
}
-static void get_metadata_vendor_tag_ops(const camera3_device_t *dev,
- vendor_tag_query_ops_t *ops)
-{
- camdev_to_camera(dev)->getMetadataVendorTagOps(ops);
-}
-
static void dump(const camera3_device_t *dev, int fd)
{
camdev_to_camera(dev)->dump(fd);
}
+
+static int flush(const camera3_device_t*)
+{
+ ALOGE("%s: unimplemented.", __func__);
+ return -1;
+}
+
} // extern "C"
const camera3_device_ops_t Camera::sOps = {
- .initialize = default_camera_hal::initialize,
- .configure_streams = default_camera_hal::configure_streams,
+ .initialize = default_camera_hal::initialize,
+ .configure_streams = default_camera_hal::configure_streams,
.register_stream_buffers = default_camera_hal::register_stream_buffers,
- .construct_default_request_settings =
- default_camera_hal::construct_default_request_settings,
+ .construct_default_request_settings
+ = default_camera_hal::construct_default_request_settings,
.process_capture_request = default_camera_hal::process_capture_request,
- .get_metadata_vendor_tag_ops =
- default_camera_hal::get_metadata_vendor_tag_ops,
- .dump = default_camera_hal::dump
+ .get_metadata_vendor_tag_ops = NULL,
+ .dump = default_camera_hal::dump,
+ .flush = default_camera_hal::flush,
+ .reserved = {0},
};
} // namespace default_camera_hal
diff --git a/modules/camera/Camera.h b/modules/camera/Camera.h
index be672f9..0ceaf25 100644
--- a/modules/camera/Camera.h
+++ b/modules/camera/Camera.h
@@ -17,9 +17,9 @@
#ifndef CAMERA_H_
#define CAMERA_H_
-#include <pthread.h>
#include <hardware/hardware.h>
#include <hardware/camera3.h>
+#include <utils/Mutex.h>
#include "Metadata.h"
#include "Stream.h"
@@ -28,12 +28,14 @@
// This is constructed when the HAL module is loaded, one per physical camera.
// It is opened by the framework, and must be closed before it can be opened
// again.
+// This is an abstract class, containing all logic and data shared between all
+// camera devices (front, back, etc) and common to the ISP.
class Camera {
public:
// id is used to distinguish cameras. 0 <= id < NUM_CAMERAS.
// module is a handle to the HAL module, used when the device is opened.
Camera(int id);
- ~Camera();
+ virtual ~Camera();
// Common Camera Device Operations (see <hardware/camera_common.h>)
int open(const hw_module_t *module, hw_device_t **device);
@@ -46,15 +48,24 @@
int registerStreamBuffers(const camera3_stream_buffer_set_t *buf_set);
const camera_metadata_t *constructDefaultRequestSettings(int type);
int processCaptureRequest(camera3_capture_request_t *request);
- void getMetadataVendorTagOps(vendor_tag_query_ops_t *ops);
void dump(int fd);
- // Camera device handle returned to framework for use
- camera3_device_t mDevice;
+
+ protected:
+ // Initialize static camera characteristics for individual device
+ virtual camera_metadata_t *initStaticInfo() = 0;
+ // Verify settings are valid for a capture
+ virtual bool isValidCaptureSettings(const camera_metadata_t *) = 0;
+ // Separate initialization method for individual devices when opened
+ virtual int initDevice() = 0;
+ // Accessor used by initDevice() to set the templates' metadata
+ int setTemplate(int type, camera_metadata_t *static_info);
+ // Prettyprint template names
+ const char* templateToString(int type);
private:
- // Separate initialization method for static metadata
- camera_metadata_t *initStaticInfo();
+ // Camera device handle returned to framework for use
+ camera3_device_t mDevice;
// Reuse a stream already created by this device
Stream *reuseStream(camera3_stream_t *astream);
// Destroy all streams in a stream array, and the array itself
@@ -65,8 +76,6 @@
void setupStreams(Stream **array, int count);
// Copy new settings for re-use and clean up old settings.
void setSettings(const camera_metadata_t *new_settings);
- // Verify settings are valid for a capture
- bool isValidCaptureSettings(const camera_metadata_t *settings);
// Verify settings are valid for reprocessing an input buffer
bool isValidReprocessSettings(const camera_metadata_t *settings);
// Process an output buffer
@@ -74,6 +83,8 @@
camera3_stream_buffer_t *out);
// Send a shutter notify message with start of exposure time
void notifyShutter(uint32_t frame_number, uint64_t timestamp);
+ // Is type a valid template type (and valid index into mTemplates)
+ bool isValidTemplateType(int type);
// Identifier used by framework to distinguish cameras
const int mId;
@@ -88,16 +99,16 @@
// Methods used to call back into the framework
const camera3_callback_ops_t *mCallbackOps;
// Lock protecting the Camera object for modifications
- pthread_mutex_t mMutex;
+ android::Mutex mDeviceLock;
// Lock protecting only static camera characteristics, which may
// be accessed without the camera device open
- pthread_mutex_t mStaticInfoMutex;
+ android::Mutex mStaticInfoLock;
// Array of handles to streams currently in use by the device
Stream **mStreams;
// Number of streams in mStreams
int mNumStreams;
// Static array of standard camera settings templates
- Metadata *mTemplates[CAMERA3_TEMPLATE_COUNT];
+ camera_metadata_t *mTemplates[CAMERA3_TEMPLATE_COUNT];
// Most recent request settings seen, memoized to be reused
camera_metadata_t *mSettings;
};
diff --git a/modules/camera/CameraHAL.cpp b/modules/camera/CameraHAL.cpp
index dfbbe4c..6f64a0d 100644
--- a/modules/camera/CameraHAL.cpp
+++ b/modules/camera/CameraHAL.cpp
@@ -17,7 +17,8 @@
#include <cstdlib>
#include <hardware/camera_common.h>
#include <hardware/hardware.h>
-#include "Camera.h"
+#include "ExampleCamera.h"
+#include "VendorTags.h"
//#define LOG_NDEBUG 0
#define LOG_TAG "DefaultCameraHAL"
@@ -38,25 +39,24 @@
// Default Camera HAL has 2 cameras, front and rear.
static CameraHAL gCameraHAL(2);
+// Handle containing vendor tag functionality
+static VendorTags gVendorTags;
CameraHAL::CameraHAL(int num_cameras)
: mNumberOfCameras(num_cameras),
mCallbacks(NULL)
{
- int i;
-
// Allocate camera array and instantiate camera devices
mCameras = new Camera*[mNumberOfCameras];
- for (i = 0; i < mNumberOfCameras; i++) {
- mCameras[i] = new Camera(i);
- }
+ // Rear camera
+ mCameras[0] = new ExampleCamera(0);
+ // Front camera
+ mCameras[1] = new ExampleCamera(1);
}
CameraHAL::~CameraHAL()
{
- int i;
-
- for (i = 0; i < mNumberOfCameras; i++) {
+ for (int i = 0; i < mNumberOfCameras; i++) {
delete mCameras[i];
}
delete [] mCameras;
@@ -124,6 +124,41 @@
return gCameraHAL.setCallbacks(callbacks);
}
+static int get_tag_count(const vendor_tag_ops_t* ops)
+{
+ return gVendorTags.getTagCount(ops);
+}
+
+static void get_all_tags(const vendor_tag_ops_t* ops, uint32_t* tag_array)
+{
+ gVendorTags.getAllTags(ops, tag_array);
+}
+
+static const char* get_section_name(const vendor_tag_ops_t* ops, uint32_t tag)
+{
+ return gVendorTags.getSectionName(ops, tag);
+}
+
+static const char* get_tag_name(const vendor_tag_ops_t* ops, uint32_t tag)
+{
+ return gVendorTags.getTagName(ops, tag);
+}
+
+static int get_tag_type(const vendor_tag_ops_t* ops, uint32_t tag)
+{
+ return gVendorTags.getTagType(ops, tag);
+}
+
+static void get_vendor_tag_ops(vendor_tag_ops_t* ops)
+{
+ ALOGV("%s : ops=%p", __func__, ops);
+ ops->get_tag_count = get_tag_count;
+ ops->get_all_tags = get_all_tags;
+ ops->get_section_name = get_section_name;
+ ops->get_tag_name = get_tag_name;
+ ops->get_tag_type = get_tag_type;
+}
+
static int open_dev(const hw_module_t* mod, const char* name, hw_device_t** dev)
{
return gCameraHAL.open(mod, name, dev);
@@ -136,7 +171,7 @@
camera_module_t HAL_MODULE_INFO_SYM __attribute__ ((visibility("default"))) = {
common : {
tag : HARDWARE_MODULE_TAG,
- module_api_version : CAMERA_MODULE_API_VERSION_2_0,
+ module_api_version : CAMERA_MODULE_API_VERSION_2_2,
hal_api_version : HARDWARE_HAL_API_VERSION,
id : CAMERA_HARDWARE_MODULE_ID,
name : "Default Camera HAL",
@@ -147,7 +182,10 @@
},
get_number_of_cameras : get_number_of_cameras,
get_camera_info : get_camera_info,
- set_callbacks : set_callbacks
+ set_callbacks : set_callbacks,
+ get_vendor_tag_ops : get_vendor_tag_ops,
+ open_legacy : NULL,
+ reserved : {0},
};
} // extern "C"
diff --git a/modules/camera/CameraHAL.h b/modules/camera/CameraHAL.h
index ba0db4e..00c74e5 100644
--- a/modules/camera/CameraHAL.h
+++ b/modules/camera/CameraHAL.h
@@ -20,7 +20,9 @@
#include <cutils/bitops.h>
#include <hardware/hardware.h>
#include <hardware/camera_common.h>
+#include <system/camera_vendor_tags.h>
#include "Camera.h"
+#include "VendorTags.h"
namespace default_camera_hal {
// CameraHAL contains all module state that isn't specific to an individual
@@ -34,6 +36,7 @@
int getNumberOfCameras();
int getCameraInfo(int camera_id, struct camera_info *info);
int setCallbacks(const camera_module_callbacks_t *callbacks);
+ void getVendorTagOps(vendor_tag_ops_t* ops);
// Hardware Module Interface (see <hardware/hardware.h>)
int open(const hw_module_t* mod, const char* name, hw_device_t** dev);
diff --git a/modules/camera/ExampleCamera.cpp b/modules/camera/ExampleCamera.cpp
new file mode 100644
index 0000000..ca28b99
--- /dev/null
+++ b/modules/camera/ExampleCamera.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <system/camera_metadata.h>
+#include "Camera.h"
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ExampleCamera"
+#include <cutils/log.h>
+
+#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
+#include <utils/Trace.h>
+
+#include "ExampleCamera.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+
+namespace default_camera_hal {
+
+ExampleCamera::ExampleCamera(int id) : Camera(id)
+{
+}
+
+ExampleCamera::~ExampleCamera()
+{
+}
+
+camera_metadata_t *ExampleCamera::initStaticInfo()
+{
+ /*
+ * Setup static camera info. This will have to customized per camera
+ * device.
+ */
+ Metadata m;
+
+ /* android.control */
+ int32_t android_control_ae_available_target_fps_ranges[] = {30, 30};
+ m.addInt32(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ ARRAY_SIZE(android_control_ae_available_target_fps_ranges),
+ android_control_ae_available_target_fps_ranges);
+
+ int32_t android_control_ae_compensation_range[] = {-4, 4};
+ m.addInt32(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ ARRAY_SIZE(android_control_ae_compensation_range),
+ android_control_ae_compensation_range);
+
+ camera_metadata_rational_t android_control_ae_compensation_step[] = {{2,1}};
+ m.addRational(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ ARRAY_SIZE(android_control_ae_compensation_step),
+ android_control_ae_compensation_step);
+
+ int32_t android_control_max_regions[] = {/*AE*/ 1,/*AWB*/ 1,/*AF*/ 1};
+ m.addInt32(ANDROID_CONTROL_MAX_REGIONS,
+ ARRAY_SIZE(android_control_max_regions),
+ android_control_max_regions);
+
+ /* android.jpeg */
+ int32_t android_jpeg_available_thumbnail_sizes[] = {0, 0, 128, 96};
+ m.addInt32(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ ARRAY_SIZE(android_jpeg_available_thumbnail_sizes),
+ android_jpeg_available_thumbnail_sizes);
+
+ int32_t android_jpeg_max_size[] = {13 * 1024 * 1024}; // 13MB
+ m.addInt32(ANDROID_JPEG_MAX_SIZE,
+ ARRAY_SIZE(android_jpeg_max_size),
+ android_jpeg_max_size);
+
+ /* android.lens */
+ float android_lens_info_available_focal_lengths[] = {1.0};
+ m.addFloat(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ ARRAY_SIZE(android_lens_info_available_focal_lengths),
+ android_lens_info_available_focal_lengths);
+
+ /* android.request */
+ int32_t android_request_max_num_output_streams[] = {0, 3, 1};
+ m.addInt32(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
+ ARRAY_SIZE(android_request_max_num_output_streams),
+ android_request_max_num_output_streams);
+
+ /* android.scaler */
+ int32_t android_scaler_available_formats[] = {
+ HAL_PIXEL_FORMAT_RAW_SENSOR,
+ HAL_PIXEL_FORMAT_BLOB,
+ HAL_PIXEL_FORMAT_RGBA_8888,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ // These are handled by YCbCr_420_888
+ // HAL_PIXEL_FORMAT_YV12,
+ // HAL_PIXEL_FORMAT_YCrCb_420_SP,
+ HAL_PIXEL_FORMAT_YCbCr_420_888};
+ m.addInt32(ANDROID_SCALER_AVAILABLE_FORMATS,
+ ARRAY_SIZE(android_scaler_available_formats),
+ android_scaler_available_formats);
+
+ int64_t android_scaler_available_jpeg_min_durations[] = {1};
+ m.addInt64(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
+ ARRAY_SIZE(android_scaler_available_jpeg_min_durations),
+ android_scaler_available_jpeg_min_durations);
+
+ int32_t android_scaler_available_jpeg_sizes[] = {640, 480};
+ m.addInt32(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+ ARRAY_SIZE(android_scaler_available_jpeg_sizes),
+ android_scaler_available_jpeg_sizes);
+
+ float android_scaler_available_max_digital_zoom[] = {1};
+ m.addFloat(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ ARRAY_SIZE(android_scaler_available_max_digital_zoom),
+ android_scaler_available_max_digital_zoom);
+
+ int64_t android_scaler_available_processed_min_durations[] = {1};
+ m.addInt64(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
+ ARRAY_SIZE(android_scaler_available_processed_min_durations),
+ android_scaler_available_processed_min_durations);
+
+ int32_t android_scaler_available_processed_sizes[] = {640, 480};
+ m.addInt32(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+ ARRAY_SIZE(android_scaler_available_processed_sizes),
+ android_scaler_available_processed_sizes);
+
+ int64_t android_scaler_available_raw_min_durations[] = {1};
+ m.addInt64(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
+ ARRAY_SIZE(android_scaler_available_raw_min_durations),
+ android_scaler_available_raw_min_durations);
+
+ int32_t android_scaler_available_raw_sizes[] = {640, 480};
+ m.addInt32(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+ ARRAY_SIZE(android_scaler_available_raw_sizes),
+ android_scaler_available_raw_sizes);
+
+ /* android.sensor */
+
+ int32_t android_sensor_info_active_array_size[] = {0, 0, 640, 480};
+ m.addInt32(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ ARRAY_SIZE(android_sensor_info_active_array_size),
+ android_sensor_info_active_array_size);
+
+ int32_t android_sensor_info_sensitivity_range[] =
+ {100, 1600};
+ m.addInt32(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ ARRAY_SIZE(android_sensor_info_sensitivity_range),
+ android_sensor_info_sensitivity_range);
+
+ int64_t android_sensor_info_max_frame_duration[] = {30000000000};
+ m.addInt64(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ ARRAY_SIZE(android_sensor_info_max_frame_duration),
+ android_sensor_info_max_frame_duration);
+
+ float android_sensor_info_physical_size[] = {3.2, 2.4};
+ m.addFloat(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ ARRAY_SIZE(android_sensor_info_physical_size),
+ android_sensor_info_physical_size);
+
+ int32_t android_sensor_info_pixel_array_size[] = {640, 480};
+ m.addInt32(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ ARRAY_SIZE(android_sensor_info_pixel_array_size),
+ android_sensor_info_pixel_array_size);
+
+ int32_t android_sensor_orientation[] = {0};
+ m.addInt32(ANDROID_SENSOR_ORIENTATION,
+ ARRAY_SIZE(android_sensor_orientation),
+ android_sensor_orientation);
+
+ /* End of static camera characteristics */
+
+ return clone_camera_metadata(m.get());
+}
+
+int ExampleCamera::initDevice()
+{
+ int res;
+ Metadata base;
+
+ // Create standard settings templates from copies of base metadata
+ // TODO: use vendor tags in base metadata
+ res = base.add1UInt8(ANDROID_CONTROL_MODE, ANDROID_CONTROL_MODE_OFF);
+ if (res)
+ return res;
+
+ // Use base settings to create all other templates and set them
+ res = setPreviewTemplate(base);
+ if (res)
+ return res;
+ res = setStillTemplate(base);
+ if (res)
+ return res;
+ res = setRecordTemplate(base);
+ if (res)
+ return res;
+ res = setSnapshotTemplate(base);
+ if (res)
+ return res;
+ res = setZslTemplate(base);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+int ExampleCamera::setPreviewTemplate(Metadata m)
+{
+ // Setup default preview controls
+ int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW);
+
+ if (res)
+ return res;
+ // TODO: set fast auto-focus, auto-whitebalance, auto-exposure, auto flash
+ return setTemplate(CAMERA3_TEMPLATE_PREVIEW, m.get());
+}
+
+int ExampleCamera::setStillTemplate(Metadata m)
+{
+ int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
+ // Setup default still capture controls
+ if (res)
+ return res;
+ // TODO: set fast auto-focus, auto-whitebalance, auto-exposure, auto flash
+ return setTemplate(CAMERA3_TEMPLATE_STILL_CAPTURE, m.get());
+}
+
+int ExampleCamera::setRecordTemplate(Metadata m)
+{
+ int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD);
+ // Setup default video record controls
+ if (res)
+ return res;
+ // TODO: set slow auto-focus, auto-whitebalance, auto-exposure, flash off
+ return setTemplate(CAMERA3_TEMPLATE_VIDEO_RECORD, m.get());
+}
+
+int ExampleCamera::setSnapshotTemplate(Metadata m)
+{
+ int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT);
+ // Setup default video snapshot controls
+ if (res)
+ return res;
+ // TODO: set slow auto-focus, auto-whitebalance, auto-exposure, flash off
+ return setTemplate(CAMERA3_TEMPLATE_VIDEO_SNAPSHOT, m.get());
+}
+
+int ExampleCamera::setZslTemplate(Metadata m)
+{
+ int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG);
+ // Setup default zero shutter lag controls
+ if (res)
+ return res;
+ // TODO: set reprocessing parameters for zsl input queue
+ return setTemplate(CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG, m.get());
+}
+
+bool ExampleCamera::isValidCaptureSettings(const camera_metadata_t* settings)
+{
+ // TODO: reject settings that cannot be captured
+ return true;
+}
+
+} // namespace default_camera_hal
diff --git a/modules/camera/ExampleCamera.h b/modules/camera/ExampleCamera.h
new file mode 100644
index 0000000..45c4a94
--- /dev/null
+++ b/modules/camera/ExampleCamera.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EXAMPLE_CAMERA_H_
+#define EXAMPLE_CAMERA_H_
+
+#include <system/camera_metadata.h>
+#include "Camera.h"
+
+namespace default_camera_hal {
+// ExampleCamera is an example for a specific camera device. The Camera object
+// contains all logic common between all cameras (e.g. front and back cameras),
+// while a specific camera device (e.g. ExampleCamera) holds all specific
+// metadata and logic about that device.
+class ExampleCamera : public Camera {
+ public:
+ ExampleCamera(int id);
+ ~ExampleCamera();
+
+ private:
+ // Initialize static camera characteristics for individual device
+ camera_metadata_t *initStaticInfo();
+ // Initialize whole device (templates/etc) when opened
+ int initDevice();
+ // Initialize each template metadata controls
+ int setPreviewTemplate(Metadata m);
+ int setStillTemplate(Metadata m);
+ int setRecordTemplate(Metadata m);
+ int setSnapshotTemplate(Metadata m);
+ int setZslTemplate(Metadata m);
+ // Verify settings are valid for a capture with this device
+ bool isValidCaptureSettings(const camera_metadata_t* settings);
+};
+} // namespace default_camera_hal
+
+#endif // CAMERA_H_
diff --git a/modules/camera/Metadata.cpp b/modules/camera/Metadata.cpp
index d5854f9..f195534 100644
--- a/modules/camera/Metadata.cpp
+++ b/modules/camera/Metadata.cpp
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include <pthread.h>
#include <system/camera_metadata.h>
//#define LOG_NDEBUG 0
@@ -22,102 +21,85 @@
#include <cutils/log.h>
#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
-#include <cutils/trace.h>
-#include "ScopedTrace.h"
+#include <utils/Trace.h>
#include "Metadata.h"
namespace default_camera_hal {
-Metadata::Metadata()
- : mHead(NULL),
- mTail(NULL),
- mEntryCount(0),
- mDataCount(0),
- mGenerated(NULL),
- mDirty(true)
+Metadata::Metadata():
+ mData(NULL)
{
- // NULL (default) pthread mutex attributes
- pthread_mutex_init(&mMutex, NULL);
}
Metadata::~Metadata()
{
- Entry *current = mHead;
-
- while (current != NULL) {
- Entry *tmp = current;
- current = current->mNext;
- delete tmp;
- }
-
- if (mGenerated != NULL)
- free_camera_metadata(mGenerated);
-
- pthread_mutex_destroy(&mMutex);
+ replace(NULL);
}
-Metadata::Metadata(uint8_t mode, uint8_t intent)
- : mHead(NULL),
- mTail(NULL),
- mEntryCount(0),
- mDataCount(0),
- mGenerated(NULL),
- mDirty(true)
+void Metadata::replace(camera_metadata_t *m)
{
- pthread_mutex_init(&mMutex, NULL);
-
- if (validate(ANDROID_CONTROL_MODE, TYPE_BYTE, 1)) {
- int res = add(ANDROID_CONTROL_MODE, 1, &mode);
- if (res != 0) {
- ALOGE("%s: Unable to add mode to template!", __func__);
- }
- } else {
- ALOGE("%s: Invalid mode constructing template!", __func__);
+ if (m == mData) {
+ ALOGE("%s: Replacing metadata with itself?!", __func__);
+ return;
}
-
- if (validate(ANDROID_CONTROL_CAPTURE_INTENT, TYPE_BYTE, 1)) {
- int res = add(ANDROID_CONTROL_CAPTURE_INTENT, 1, &intent);
- if (res != 0) {
- ALOGE("%s: Unable to add capture intent to template!", __func__);
- }
- } else {
- ALOGE("%s: Invalid capture intent constructing template!", __func__);
- }
+ if (mData)
+ free_camera_metadata(mData);
+ mData = m;
}
-int Metadata::addUInt8(uint32_t tag, int count, uint8_t *data)
+int Metadata::init(const camera_metadata_t *metadata)
+{
+ camera_metadata_t* tmp;
+
+ if (!validate_camera_metadata_structure(metadata, NULL))
+ return -EINVAL;
+
+ tmp = clone_camera_metadata(metadata);
+ if (tmp == NULL)
+ return -EINVAL;
+
+ replace(tmp);
+ return 0;
+}
+
+int Metadata::addUInt8(uint32_t tag, int count, const uint8_t *data)
{
if (!validate(tag, TYPE_BYTE, count)) return -EINVAL;
return add(tag, count, data);
}
-int Metadata::addInt32(uint32_t tag, int count, int32_t *data)
+int Metadata::add1UInt8(uint32_t tag, const uint8_t data)
+{
+ return addUInt8(tag, 1, &data);
+}
+
+int Metadata::addInt32(uint32_t tag, int count, const int32_t *data)
{
if (!validate(tag, TYPE_INT32, count)) return -EINVAL;
return add(tag, count, data);
}
-int Metadata::addFloat(uint32_t tag, int count, float *data)
+int Metadata::addFloat(uint32_t tag, int count, const float *data)
{
if (!validate(tag, TYPE_FLOAT, count)) return -EINVAL;
return add(tag, count, data);
}
-int Metadata::addInt64(uint32_t tag, int count, int64_t *data)
+int Metadata::addInt64(uint32_t tag, int count, const int64_t *data)
{
if (!validate(tag, TYPE_INT64, count)) return -EINVAL;
return add(tag, count, data);
}
-int Metadata::addDouble(uint32_t tag, int count, double *data)
+int Metadata::addDouble(uint32_t tag, int count, const double *data)
{
if (!validate(tag, TYPE_DOUBLE, count)) return -EINVAL;
return add(tag, count, data);
}
int Metadata::addRational(uint32_t tag, int count,
- camera_metadata_rational_t *data)
+ const camera_metadata_rational_t *data)
{
if (!validate(tag, TYPE_RATIONAL, count)) return -EINVAL;
return add(tag, count, data);
@@ -145,102 +127,48 @@
return true;
}
-int Metadata::add(uint32_t tag, int count, void *tag_data)
+int Metadata::add(uint32_t tag, int count, const void *tag_data)
{
+ int res;
+ camera_metadata_t* tmp;
int tag_type = get_camera_metadata_tag_type(tag);
- size_t type_sz = camera_metadata_type_size[tag_type];
+ size_t size = calculate_camera_metadata_entry_data_size(tag_type, count);
+ size_t entry_capacity = get_camera_metadata_entry_count(mData) + 1;
+ size_t data_capacity = get_camera_metadata_data_count(mData) + size;
- // Allocate array to hold new metadata
- void *data = malloc(count * type_sz);
- if (data == NULL)
+ // Opportunistically attempt to add if metadata has room for it
+ if (!add_camera_metadata_entry(mData, tag, tag_data, count))
+ return 0;
+
+ // Double new dimensions to minimize future reallocations
+ tmp = allocate_camera_metadata(entry_capacity * 2, data_capacity * 2);
+ if (tmp == NULL) {
+ ALOGE("%s: Failed to allocate new metadata with %zu entries, %zu data",
+ __func__, entry_capacity, data_capacity);
return -ENOMEM;
- memcpy(data, tag_data, count * type_sz);
+ }
+ // Append the current metadata to the new (empty) metadata
+ res = append_camera_metadata(tmp, mData);
+ if (res) {
+ ALOGE("%s: Failed to append old metadata %p to new %p",
+ __func__, mData, tmp);
+ return res;
+ }
+ // Add the remaining new item
+ res = add_camera_metadata_entry(tmp, tag, tag_data, count);
+ if (res) {
+ ALOGE("%s: Failed to add new entry (%d, %p, %d) to metadata %p",
+ __func__, tag, tag_data, count, tmp);
+ return res;
+ }
- pthread_mutex_lock(&mMutex);
- mEntryCount++;
- mDataCount += calculate_camera_metadata_entry_data_size(tag_type, count);
- push(new Entry(tag, data, count));
- mDirty = true;
- pthread_mutex_unlock(&mMutex);
+ replace(tmp);
return 0;
}
-camera_metadata_t* Metadata::generate()
+camera_metadata_t* Metadata::get()
{
- pthread_mutex_lock(&mMutex);
- // Reuse if old generated metadata still valid
- if (!mDirty && mGenerated != NULL) {
- ALOGV("%s: Reusing generated metadata at %p", __func__, mGenerated);
- goto out;
- }
- // Destroy old metadata
- if (mGenerated != NULL) {
- ALOGV("%s: Freeing generated metadata at %p", __func__, mGenerated);
- free_camera_metadata(mGenerated);
- mGenerated = NULL;
- }
- // Generate new metadata structure
- ALOGV("%s: Generating new camera metadata structure, Entries:%d Data:%d",
- __func__, mEntryCount, mDataCount);
- mGenerated = allocate_camera_metadata(mEntryCount, mDataCount);
- if (mGenerated == NULL) {
- ALOGE("%s: Failed to allocate metadata (%d entries %d data)",
- __func__, mEntryCount, mDataCount);
- goto out;
- }
- // Walk list of entries adding each one to newly allocated metadata
- for (Entry *current = mHead; current != NULL; current = current->mNext) {
- int res = add_camera_metadata_entry(mGenerated, current->mTag,
- current->mData, current->mCount);
- if (res != 0) {
- ALOGE("%s: Failed to add camera metadata: %d", __func__, res);
- free_camera_metadata(mGenerated);
- mGenerated = NULL;
- goto out;
- }
- }
-
-out:
- pthread_mutex_unlock(&mMutex);
- return mGenerated;
-}
-
-Metadata::Entry::Entry(uint32_t tag, void *data, int count)
- : mNext(NULL),
- mPrev(NULL),
- mTag(tag),
- mData(data),
- mCount(count)
-{
-}
-
-void Metadata::push(Entry *e)
-{
- if (mHead == NULL) {
- mHead = mTail = e;
- } else {
- mTail->insertAfter(e);
- mTail = e;
- }
-}
-
-Metadata::Entry::~Entry()
-{
- if (mNext != NULL)
- mNext->mPrev = mPrev;
- if (mPrev != NULL)
- mPrev->mNext = mNext;
-}
-
-void Metadata::Entry::insertAfter(Entry *e)
-{
- if (e == NULL)
- return;
- if (mNext != NULL)
- mNext->mPrev = e;
- e->mNext = mNext;
- e->mPrev = this;
- mNext = e;
+ return mData;
}
} // namespace default_camera_hal
diff --git a/modules/camera/Metadata.h b/modules/camera/Metadata.h
index 22d2f22..f432d04 100644
--- a/modules/camera/Metadata.h
+++ b/modules/camera/Metadata.h
@@ -17,10 +17,9 @@
#ifndef METADATA_H_
#define METADATA_H_
+#include <stdint.h>
#include <hardware/camera3.h>
-#include <hardware/gralloc.h>
#include <system/camera_metadata.h>
-#include <system/graphics.h>
namespace default_camera_hal {
// Metadata is a convenience class for dealing with libcamera_metadata
@@ -28,51 +27,32 @@
public:
Metadata();
~Metadata();
- // Constructor used for request metadata templates
- Metadata(uint8_t mode, uint8_t intent);
+ // Initialize with framework metadata
+ int init(const camera_metadata_t *metadata);
- // Parse and add an entry
- int addUInt8(uint32_t tag, int count, uint8_t *data);
- int addInt32(uint32_t tag, int count, int32_t *data);
- int addFloat(uint32_t tag, int count, float *data);
- int addInt64(uint32_t tag, int count, int64_t *data);
- int addDouble(uint32_t tag, int count, double *data);
+ // Parse and add an entry. Allocates and copies new storage for *data.
+ int addUInt8(uint32_t tag, int count, const uint8_t *data);
+ int add1UInt8(uint32_t tag, const uint8_t data);
+ int addInt32(uint32_t tag, int count, const int32_t *data);
+ int addFloat(uint32_t tag, int count, const float *data);
+ int addInt64(uint32_t tag, int count, const int64_t *data);
+ int addDouble(uint32_t tag, int count, const double *data);
int addRational(uint32_t tag, int count,
- camera_metadata_rational_t *data);
- // Generate a camera_metadata structure and fill it with internal data
- camera_metadata_t *generate();
+ const camera_metadata_rational_t *data);
+
+ // Get a handle to the current metadata
+ // This is not a durable handle, and may be destroyed by add*/init
+ camera_metadata_t* get();
private:
+ // Actual internal storage
+ camera_metadata_t* mData;
+ // Destroy old metadata and replace with new
+ void replace(camera_metadata_t *m);
// Validate the tag, type and count for a metadata entry
bool validate(uint32_t tag, int tag_type, int count);
- // Add a verified tag with data to this Metadata structure
- int add(uint32_t tag, int count, void *tag_data);
-
- class Entry {
- public:
- Entry(uint32_t tag, void *data, int count);
- ~Entry();
- Entry *mNext;
- Entry *mPrev;
- const uint32_t mTag;
- const void *mData;
- const int mCount;
- void insertAfter(Entry *e);
- };
- // List ends
- Entry *mHead;
- Entry *mTail;
- // Append entry to list
- void push(Entry *e);
- // Total of entries and entry data size
- int mEntryCount;
- int mDataCount;
- // Save generated metadata, invalidated on update
- camera_metadata_t *mGenerated;
- // Flag to force metadata regeneration
- bool mDirty;
- // Lock protecting the Metadata object for modifications
- pthread_mutex_t mMutex;
+ // Add a verified tag with data
+ int add(uint32_t tag, int count, const void *tag_data);
};
} // namespace default_camera_hal
diff --git a/modules/camera/ScopedTrace.h b/modules/camera/ScopedTrace.h
deleted file mode 100644
index ed00570..0000000
--- a/modules/camera/ScopedTrace.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CAMERA_SCOPED_TRACE_H
-#define CAMERA_SCOPED_TRACE_H
-
-#include <stdint.h>
-#include <cutils/trace.h>
-
-// See <cutils/trace.h> for more tracing macros.
-
-// CAMTRACE_NAME traces the beginning and end of the current scope. To trace
-// the correct start and end times this macro should be declared first in the
-// scope body.
-#define CAMTRACE_NAME(name) ScopedTrace ___tracer(ATRACE_TAG, name)
-// CAMTRACE_CALL is an ATRACE_NAME that uses the current function name.
-#define CAMTRACE_CALL() CAMTRACE_NAME(__FUNCTION__)
-
-namespace default_camera_hal {
-
-class ScopedTrace {
-public:
-inline ScopedTrace(uint64_t tag, const char* name)
- : mTag(tag) {
- atrace_begin(mTag,name);
-}
-
-inline ~ScopedTrace() {
- atrace_end(mTag);
-}
-
-private:
- uint64_t mTag;
-};
-
-}; // namespace default_camera_hal
-
-#endif // CAMERA_SCOPED_TRACE_H
diff --git a/modules/camera/Stream.cpp b/modules/camera/Stream.cpp
index aae7adb..2db3ed2 100644
--- a/modules/camera/Stream.cpp
+++ b/modules/camera/Stream.cpp
@@ -14,18 +14,18 @@
* limitations under the License.
*/
-#include <pthread.h>
+#include <stdio.h>
#include <hardware/camera3.h>
#include <hardware/gralloc.h>
#include <system/graphics.h>
+#include <utils/Mutex.h>
//#define LOG_NDEBUG 0
#define LOG_TAG "Stream"
#include <cutils/log.h>
#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
-#include <cutils/trace.h>
-#include "ScopedTrace.h"
+#include <utils/Trace.h>
#include "Stream.h"
@@ -45,37 +45,32 @@
mBuffers(0),
mNumBuffers(0)
{
- // NULL (default) pthread mutex attributes
- pthread_mutex_init(&mMutex, NULL);
}
Stream::~Stream()
{
- pthread_mutex_lock(&mMutex);
+ android::Mutex::Autolock al(mLock);
unregisterBuffers_L();
- pthread_mutex_unlock(&mMutex);
}
void Stream::setUsage(uint32_t usage)
{
- pthread_mutex_lock(&mMutex);
+ android::Mutex::Autolock al(mLock);
if (usage != mUsage) {
mUsage = usage;
mStream->usage = usage;
unregisterBuffers_L();
}
- pthread_mutex_unlock(&mMutex);
}
void Stream::setMaxBuffers(uint32_t max_buffers)
{
- pthread_mutex_lock(&mMutex);
+ android::Mutex::Autolock al(mLock);
if (max_buffers != mMaxBuffers) {
mMaxBuffers = max_buffers;
mStream->max_buffers = max_buffers;
unregisterBuffers_L();
}
- pthread_mutex_unlock(&mMutex);
}
int Stream::getType()
@@ -95,6 +90,61 @@
mType == CAMERA3_STREAM_BIDIRECTIONAL;
}
+const char* Stream::typeToString(int type)
+{
+ switch (type) {
+ case CAMERA3_STREAM_INPUT:
+ return "CAMERA3_STREAM_INPUT";
+ case CAMERA3_STREAM_OUTPUT:
+ return "CAMERA3_STREAM_OUTPUT";
+ case CAMERA3_STREAM_BIDIRECTIONAL:
+ return "CAMERA3_STREAM_BIDIRECTIONAL";
+ }
+ return "Invalid stream type!";
+}
+
+const char* Stream::formatToString(int format)
+{
+ // See <system/graphics.h> for full list
+ switch (format) {
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ return "BGRA 8888";
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ return "RGBA 8888";
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ return "RGBX 8888";
+ case HAL_PIXEL_FORMAT_RGB_888:
+ return "RGB 888";
+ case HAL_PIXEL_FORMAT_RGB_565:
+ return "RGB 565";
+ case HAL_PIXEL_FORMAT_sRGB_A_8888:
+ return "sRGB A 8888";
+ case HAL_PIXEL_FORMAT_sRGB_X_8888:
+ return "sRGB B 8888";
+ case HAL_PIXEL_FORMAT_Y8:
+ return "Y8";
+ case HAL_PIXEL_FORMAT_Y16:
+ return "Y16";
+ case HAL_PIXEL_FORMAT_YV12:
+ return "YV12";
+ case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+ return "NV16";
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ return "NV21";
+ case HAL_PIXEL_FORMAT_YCbCr_422_I:
+ return "YUY2";
+ case HAL_PIXEL_FORMAT_RAW_SENSOR:
+ return "RAW SENSOR";
+ case HAL_PIXEL_FORMAT_BLOB:
+ return "BLOB";
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ return "IMPLEMENTATION DEFINED";
+ case HAL_PIXEL_FORMAT_YCbCr_420_888:
+ return "FLEXIBLE YCbCr 420 888";
+ }
+ return "Invalid stream format!";
+}
+
bool Stream::isRegistered()
{
return mRegistered;
@@ -113,15 +163,15 @@
return false;
}
if (s->stream_type != mType) {
- // TODO: prettyprint type string
- ALOGE("%s:%d: Mismatched type in reused stream. Got %d expect %d",
- __func__, mId, s->stream_type, mType);
+ ALOGE("%s:%d: Mismatched type in reused stream. Got %s(%d) "
+ "expect %s(%d)", __func__, mId, typeToString(s->stream_type),
+ s->stream_type, typeToString(mType), mType);
return false;
}
if (s->format != mFormat) {
- // TODO: prettyprint format string
- ALOGE("%s:%d: Mismatched format in reused stream. Got %d expect %d",
- __func__, mId, s->format, mFormat);
+ ALOGE("%s:%d: Mismatched format in reused stream. Got %s(%d) "
+ "expect %s(%d)", __func__, mId, formatToString(s->format),
+ s->format, formatToString(mFormat), mFormat);
return false;
}
if (s->width != mWidth) {
@@ -139,7 +189,8 @@
int Stream::registerBuffers(const camera3_stream_buffer_set_t *buf_set)
{
- CAMTRACE_CALL();
+ ATRACE_CALL();
+ android::Mutex::Autolock al(mLock);
if (buf_set->stream != mStream) {
ALOGE("%s:%d: Buffer set for invalid stream. Got %p expect %p",
@@ -147,8 +198,6 @@
return -EINVAL;
}
- pthread_mutex_lock(&mMutex);
-
mNumBuffers = buf_set->num_buffers;
mBuffers = new buffer_handle_t*[mNumBuffers];
@@ -160,12 +209,10 @@
}
mRegistered = true;
- pthread_mutex_unlock(&mMutex);
-
return 0;
}
-// This must only be called with mMutex held
+// This must only be called with mLock held
void Stream::unregisterBuffers_L()
{
mRegistered = false;
@@ -174,4 +221,23 @@
// TODO: unregister buffers from hw
}
+void Stream::dump(int fd)
+{
+ android::Mutex::Autolock al(mLock);
+
+ dprintf(fd, "Stream ID: %d (%p)\n", mId, mStream);
+ dprintf(fd, "Stream Type: %s (%d)\n", typeToString(mType), mType);
+ dprintf(fd, "Width: %"PRIu32" Height: %"PRIu32"\n", mWidth, mHeight);
+ dprintf(fd, "Stream Format: %s (%d)", formatToString(mFormat), mFormat);
+ // ToDo: prettyprint usage mask flags
+ dprintf(fd, "Gralloc Usage Mask: %#"PRIx32"\n", mUsage);
+ dprintf(fd, "Max Buffer Count: %"PRIu32"\n", mMaxBuffers);
+ dprintf(fd, "Buffers Registered: %s\n", mRegistered ? "true" : "false");
+ dprintf(fd, "Number of Buffers: %"PRIu32"\n", mNumBuffers);
+ for (uint32_t i = 0; i < mNumBuffers; i++) {
+ dprintf(fd, "Buffer %"PRIu32"/%"PRIu32": %p\n", i, mNumBuffers,
+ mBuffers[i]);
+ }
+}
+
} // namespace default_camera_hal
diff --git a/modules/camera/Stream.h b/modules/camera/Stream.h
index 34abd95..5efbc52 100644
--- a/modules/camera/Stream.h
+++ b/modules/camera/Stream.h
@@ -20,6 +20,7 @@
#include <hardware/camera3.h>
#include <hardware/gralloc.h>
#include <system/graphics.h>
+#include <utils/Mutex.h>
namespace default_camera_hal {
// Stream represents a single input or output stream for a camera device.
@@ -41,12 +42,15 @@
bool isInputType();
bool isOutputType();
bool isRegistered();
+ const char* typeToString(int type);
+ const char* formatToString(int format);
+ void dump(int fd);
// This stream is being reused. Used in stream configuration passes
bool mReuse;
private:
- // Clean up buffer state. must be called with mMutex held.
+ // Clean up buffer state. must be called with mLock held.
void unregisterBuffers_L();
// The camera device id this stream belongs to
@@ -72,7 +76,7 @@
// Number of buffers in mBuffers
unsigned int mNumBuffers;
// Lock protecting the Stream object for modifications
- pthread_mutex_t mMutex;
+ android::Mutex mLock;
};
} // namespace default_camera_hal
diff --git a/modules/camera/VendorTags.cpp b/modules/camera/VendorTags.cpp
new file mode 100644
index 0000000..2c54648
--- /dev/null
+++ b/modules/camera/VendorTags.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <system/camera_metadata.h>
+#include "Metadata.h"
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VendorTags"
+#include <cutils/log.h>
+
+#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL)
+#include <utils/Trace.h>
+
+#include "VendorTags.h"
+
+namespace default_camera_hal {
+
+// Internal representations of vendor tags for convenience.
+// Other classes must access this data via public interfaces.
+// Structured to be easy to extend and contain complexity.
+namespace {
+// Describes a single vendor tag entry
+struct Entry {
+ const char* name;
+ uint8_t type;
+};
+// Describes a vendor tag section
+struct Section {
+ const char* name;
+ uint32_t start;
+ uint32_t end;
+ const Entry* tags;
+};
+
+// Entry arrays for each section
+const Entry DemoWizardry[demo_wizardry_end - demo_wizardry_start] = {
+ [demo_wizardry_dimension_size - demo_wizardry_start] =
+ {"dimensionSize", TYPE_INT32},
+ [demo_wizardry_dimensions - demo_wizardry_start] =
+ {"dimensions", TYPE_INT32},
+ [demo_wizardry_familiar - demo_wizardry_start] =
+ {"familiar", TYPE_BYTE},
+ [demo_wizardry_fire - demo_wizardry_start] =
+ {"fire", TYPE_RATIONAL}
+};
+
+const Entry DemoSorcery[demo_sorcery_end - demo_sorcery_start] = {
+ [demo_sorcery_difficulty - demo_sorcery_start] =
+ {"difficulty", TYPE_INT64},
+ [demo_sorcery_light - demo_sorcery_start] =
+ {"light", TYPE_BYTE}
+};
+
+const Entry DemoMagic[demo_magic_end - demo_magic_start] = {
+ [demo_magic_card_trick - demo_magic_start] =
+ {"cardTrick", TYPE_DOUBLE},
+ [demo_magic_levitation - demo_magic_start] =
+ {"levitation", TYPE_FLOAT}
+};
+
+// Array of all sections
+const Section DemoSections[DEMO_SECTION_COUNT] = {
+ [DEMO_WIZARDRY] = { "demo.wizardry",
+ demo_wizardry_start,
+ demo_wizardry_end,
+ DemoWizardry },
+ [DEMO_SORCERY] = { "demo.sorcery",
+ demo_sorcery_start,
+ demo_sorcery_end,
+ DemoSorcery },
+ [DEMO_MAGIC] = { "demo.magic",
+ demo_magic_start,
+ demo_magic_end,
+ DemoMagic }
+};
+
+// Get a static handle to a specific vendor tag section
+const Section* getSection(uint32_t tag)
+{
+ uint32_t section = (tag - vendor_section_start) >> 16;
+
+ if (tag < vendor_section_start) {
+ ALOGE("%s: Tag 0x%x before vendor section", __func__, tag);
+ return NULL;
+ }
+
+ if (section >= DEMO_SECTION_COUNT) {
+ ALOGE("%s: Tag 0x%x after vendor section", __func__, tag);
+ return NULL;
+ }
+
+ return &DemoSections[section];
+}
+
+// Get a static handle to a specific vendor tag entry
+const Entry* getEntry(uint32_t tag)
+{
+ const Section* section = getSection(tag);
+ int index;
+
+ if (section == NULL)
+ return NULL;
+
+ if (tag >= section->end) {
+ ALOGE("%s: Tag 0x%x outside section", __func__, tag);
+ return NULL;
+ }
+
+ index = tag - section->start;
+ return §ion->tags[index];
+}
+} // namespace
+
+VendorTags::VendorTags()
+ : mTagCount(0)
+{
+ for (int i = 0; i < DEMO_SECTION_COUNT; i++) {
+ mTagCount += DemoSections[i].end - DemoSections[i].start;
+ }
+}
+
+VendorTags::~VendorTags()
+{
+}
+
+int VendorTags::getTagCount(const vendor_tag_ops_t* ops)
+{
+ return mTagCount;
+}
+
+void VendorTags::getAllTags(const vendor_tag_ops_t* ops, uint32_t* tag_array)
+{
+ if (tag_array == NULL) {
+ ALOGE("%s: NULL tag_array", __func__);
+ return;
+ }
+
+ for (int i = 0; i < DEMO_SECTION_COUNT; i++) {
+ for (uint32_t tag = DemoSections[i].start;
+ tag < DemoSections[i].end; tag++) {
+ *tag_array++ = tag;
+ }
+ }
+}
+
+const char* VendorTags::getSectionName(const vendor_tag_ops_t* ops, uint32_t tag)
+{
+ const Section* section = getSection(tag);
+
+ if (section == NULL)
+ return NULL;
+
+ return section->name;
+}
+
+const char* VendorTags::getTagName(const vendor_tag_ops_t* ops, uint32_t tag)
+{
+ const Entry* entry = getEntry(tag);
+
+ if (entry == NULL)
+ return NULL;
+
+ return entry->name;
+}
+
+int VendorTags::getTagType(const vendor_tag_ops_t* ops, uint32_t tag)
+{
+ const Entry* entry = getEntry(tag);
+
+ if (entry == NULL)
+ return -1;
+
+ return entry->type;
+}
+} // namespace default_camera_hal
diff --git a/modules/camera/VendorTags.h b/modules/camera/VendorTags.h
new file mode 100644
index 0000000..ecf777e
--- /dev/null
+++ b/modules/camera/VendorTags.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VENDOR_TAGS_H_
+#define VENDOR_TAGS_H_
+
+#include <hardware/camera_common.h>
+#include <system/camera_metadata.h>
+
+namespace default_camera_hal {
+
+// VendorTags contains all vendor-specific metadata tag functionality
+class VendorTags {
+ public:
+ VendorTags();
+ ~VendorTags();
+
+ // Vendor Tags Operations (see <hardware/camera_common.h>)
+ int getTagCount(const vendor_tag_ops_t* ops);
+ void getAllTags(const vendor_tag_ops_t* ops, uint32_t* tag_array);
+ const char* getSectionName(const vendor_tag_ops_t* ops, uint32_t tag);
+ const char* getTagName(const vendor_tag_ops_t* ops, uint32_t tag);
+ int getTagType(const vendor_tag_ops_t* ops, uint32_t tag);
+
+ private:
+ // Total number of vendor tags
+ int mTagCount;
+};
+
+// Tag sections start at the beginning of vendor tags (0x8000_0000)
+// See <system/camera_metadata.h>
+enum {
+ DEMO_WIZARDRY,
+ DEMO_SORCERY,
+ DEMO_MAGIC,
+ DEMO_SECTION_COUNT
+};
+
+const uint32_t vendor_section_start = VENDOR_SECTION_START;
+
+// Each section starts at increments of 0x1_0000
+const uint32_t demo_wizardry_start = (DEMO_WIZARDRY + VENDOR_SECTION) << 16;
+const uint32_t demo_sorcery_start = (DEMO_SORCERY + VENDOR_SECTION) << 16;
+const uint32_t demo_magic_start = (DEMO_MAGIC + VENDOR_SECTION) << 16;
+
+// Vendor Tag values, start value begins each section
+const uint32_t demo_wizardry_dimension_size = demo_wizardry_start;
+const uint32_t demo_wizardry_dimensions = demo_wizardry_start + 1;
+const uint32_t demo_wizardry_familiar = demo_wizardry_start + 2;
+const uint32_t demo_wizardry_fire = demo_wizardry_start + 3;
+const uint32_t demo_wizardry_end = demo_wizardry_start + 4;
+
+const uint32_t demo_sorcery_difficulty = demo_sorcery_start;
+const uint32_t demo_sorcery_light = demo_sorcery_start + 1;
+const uint32_t demo_sorcery_end = demo_sorcery_start + 2;
+
+const uint32_t demo_magic_card_trick = demo_magic_start;
+const uint32_t demo_magic_levitation = demo_magic_start + 1;
+const uint32_t demo_magic_end = demo_magic_start + 2;
+
+} // namespace default_camera_hal
+
+#endif // VENDOR_TAGS_H_
diff --git a/modules/consumerir/consumerir.c b/modules/consumerir/consumerir.c
index 83eba75..87039cc 100644
--- a/modules/consumerir/consumerir.c
+++ b/modules/consumerir/consumerir.c
@@ -32,8 +32,8 @@
{.min = 56000, .max = 56000},
};
-static int consumerir_transmit(struct consumerir_device *dev,
- int carrier_freq, int pattern[], int pattern_len)
+static int consumerir_transmit(struct consumerir_device *dev __unused,
+ int carrier_freq, const int pattern[], int pattern_len)
{
int total_time = 0;
long i;
@@ -48,12 +48,12 @@
return 0;
}
-static int consumerir_get_num_carrier_freqs(struct consumerir_device *dev)
+static int consumerir_get_num_carrier_freqs(struct consumerir_device *dev __unused)
{
return ARRAY_SIZE(consumerir_freqs);
}
-static int consumerir_get_carrier_freqs(struct consumerir_device *dev,
+static int consumerir_get_carrier_freqs(struct consumerir_device *dev __unused,
size_t len, consumerir_freq_range_t *ranges)
{
size_t to_copy = ARRAY_SIZE(consumerir_freqs);
diff --git a/modules/mcu/Android.mk b/modules/fingerprint/Android.mk
similarity index 85%
copy from modules/mcu/Android.mk
copy to modules/fingerprint/Android.mk
index ec40e3a..58c0a83 100644
--- a/modules/mcu/Android.mk
+++ b/modules/fingerprint/Android.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2014 The Android Open Source Project
+# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,9 +16,9 @@
include $(CLEAR_VARS)
-LOCAL_MODULE := mcu.default
+LOCAL_MODULE := fingerprint.default
LOCAL_MODULE_RELATIVE_PATH := hw
-LOCAL_SRC_FILES := mcu.c
+LOCAL_SRC_FILES := fingerprint.c
LOCAL_SHARED_LIBRARIES := liblog
LOCAL_MODULE_TAGS := optional
diff --git a/modules/fingerprint/fingerprint.c b/modules/fingerprint/fingerprint.c
new file mode 100644
index 0000000..14dac12
--- /dev/null
+++ b/modules/fingerprint/fingerprint.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "FingerprintHal"
+
+#include <errno.h>
+#include <string.h>
+#include <cutils/log.h>
+#include <hardware/hardware.h>
+#include <hardware/fingerprint.h>
+
+static int fingerprint_close(hw_device_t *dev)
+{
+ if (dev) {
+ free(dev);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static int fingerprint_enroll(struct fingerprint_device __unused *dev,
+ uint32_t __unused timeout_sec) {
+ return FINGERPRINT_ERROR;
+}
+
+static int fingerprint_remove(struct fingerprint_device __unused *dev,
+ uint32_t __unused fingerprint_id) {
+ return FINGERPRINT_ERROR;
+}
+
+static int set_notify_callback(struct fingerprint_device *dev,
+ fingerprint_notify_t notify) {
+ /* Decorate with locks */
+ dev->notify = notify;
+ return FINGERPRINT_ERROR;
+}
+
+static int fingerprint_open(const hw_module_t* module, const char __unused *id,
+ hw_device_t** device)
+{
+ if (device == NULL) {
+ ALOGE("NULL device on open");
+ return -EINVAL;
+ }
+
+ fingerprint_device_t *dev = malloc(sizeof(fingerprint_device_t));
+ memset(dev, 0, sizeof(fingerprint_device_t));
+
+ dev->common.tag = HARDWARE_DEVICE_TAG;
+ dev->common.version = HARDWARE_MODULE_API_VERSION(1, 0);
+ dev->common.module = (struct hw_module_t*) module;
+ dev->common.close = fingerprint_close;
+
+ dev->enroll = fingerprint_enroll;
+ dev->remove = fingerprint_remove;
+ dev->set_notify = set_notify_callback;
+ dev->notify = NULL;
+
+ *device = (hw_device_t*) dev;
+ return 0;
+}
+
+static struct hw_module_methods_t fingerprint_module_methods = {
+ .open = fingerprint_open,
+};
+
+fingerprint_module_t HAL_MODULE_INFO_SYM = {
+ .common = {
+ .tag = HARDWARE_MODULE_TAG,
+ .module_api_version = FINGERPRINT_MODULE_API_VERSION_1_0,
+ .hal_api_version = HARDWARE_HAL_API_VERSION,
+ .id = FINGERPRINT_HARDWARE_MODULE_ID,
+ .name = "Demo Fingerprint HAL",
+ .author = "The Android Open Source Project",
+ .methods = &fingerprint_module_methods,
+ },
+};
diff --git a/modules/mcu/mcu.c b/modules/mcu/mcu.c
deleted file mode 100644
index 47300f9..0000000
--- a/modules/mcu/mcu.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <errno.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-
-#define LOG_TAG "Legacy MCU HAL"
-#include <utils/Log.h>
-
-#include <hardware/hardware.h>
-#include <hardware/mcu.h>
-
-static int mcu_init(struct mcu_module *module)
-{
- return 0;
-}
-
-static int mcu_send_message(struct mcu_module *module, const char *msg,
- const void *arg, size_t arg_len, void **result,
- size_t *result_len)
-{
- return 0;
-}
-
-static struct hw_module_methods_t mcu_module_methods = {
- .open = NULL,
-};
-
-struct mcu_module HAL_MODULE_INFO_SYM = {
- .common = {
- .tag = HARDWARE_MODULE_TAG,
- .module_api_version = MCU_MODULE_API_VERSION_0_1,
- .hal_api_version = HARDWARE_HAL_API_VERSION,
- .id = MCU_HARDWARE_MODULE_ID,
- .name = "Default MCU HAL",
- .author = "The Android Open Source Project",
- .methods = &mcu_module_methods,
- },
-
- .init = mcu_init,
- .sendMessage = mcu_send_message,
-};
diff --git a/modules/mcu/Android.mk b/modules/soundtrigger/Android.mk
similarity index 73%
copy from modules/mcu/Android.mk
copy to modules/soundtrigger/Android.mk
index ec40e3a..325980c 100644
--- a/modules/mcu/Android.mk
+++ b/modules/soundtrigger/Android.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2014 The Android Open Source Project
+# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,12 +14,14 @@
LOCAL_PATH := $(call my-dir)
+# Stub sound_trigger HAL module, used for tests
include $(CLEAR_VARS)
-LOCAL_MODULE := mcu.default
+LOCAL_MODULE := sound_trigger.stub.default
LOCAL_MODULE_RELATIVE_PATH := hw
-LOCAL_SRC_FILES := mcu.c
-LOCAL_SHARED_LIBRARIES := liblog
+LOCAL_SRC_FILES := sound_trigger_hw.c
+LOCAL_SHARED_LIBRARIES := liblog libcutils
LOCAL_MODULE_TAGS := optional
+LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
diff --git a/modules/soundtrigger/sound_trigger_hw.c b/modules/soundtrigger/sound_trigger_hw.c
new file mode 100644
index 0000000..e7f9baf
--- /dev/null
+++ b/modules/soundtrigger/sound_trigger_hw.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "sound_trigger_hw_default"
+/*#define LOG_NDEBUG 0*/
+
+#include <errno.h>
+#include <pthread.h>
+#include <sys/prctl.h>
+#include <cutils/log.h>
+
+#include <hardware/hardware.h>
+#include <system/sound_trigger.h>
+#include <hardware/sound_trigger.h>
+
+static const struct sound_trigger_properties hw_properties = {
+ "The Android Open Source Project", // implementor
+ "Sound Trigger stub HAL", // description
+ 1, // version
+ { 0xed7a7d60, 0xc65e, 0x11e3, 0x9be4, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ 1, // max_sound_models
+ 1, // max_key_phrases
+ 1, // max_users
+ RECOGNITION_MODE_VOICE_TRIGGER, // recognition_modes
+ false, // capture_transition
+ 0, // max_buffer_ms
+ false, // concurrent_capture
+ false, // trigger_in_event
+ 0 // power_consumption_mw
+};
+
+struct stub_sound_trigger_device {
+ struct sound_trigger_hw_device device;
+ sound_model_handle_t model_handle;
+ recognition_callback_t recognition_callback;
+ void *recognition_cookie;
+ sound_model_callback_t sound_model_callback;
+ void *sound_model_cookie;
+ pthread_t callback_thread;
+ pthread_mutex_t lock;
+ pthread_cond_t cond;
+};
+
+
+static void *callback_thread_loop(void *context)
+{
+ struct stub_sound_trigger_device *stdev = (struct stub_sound_trigger_device *)context;
+ ALOGI("%s", __func__);
+
+ prctl(PR_SET_NAME, (unsigned long)"sound trigger callback", 0, 0, 0);
+
+ pthread_mutex_lock(&stdev->lock);
+ if (stdev->recognition_callback == NULL) {
+ goto exit;
+ }
+ struct timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ ts.tv_sec += 3;
+ ALOGI("%s wait 3 sec", __func__);
+ int rc = pthread_cond_timedwait(&stdev->cond, &stdev->lock, &ts);
+ if (rc == ETIMEDOUT && stdev->recognition_callback != NULL) {
+ char *data = (char *)calloc(1, sizeof(struct sound_trigger_phrase_recognition_event) + 1);
+ struct sound_trigger_phrase_recognition_event *event =
+ (struct sound_trigger_phrase_recognition_event *)data;
+ event->common.status = RECOGNITION_STATUS_SUCCESS;
+ event->common.type = SOUND_MODEL_TYPE_KEYPHRASE;
+ event->common.model = stdev->model_handle;
+ event->num_phrases = 1;
+ event->phrase_extras[0].recognition_modes = RECOGNITION_MODE_VOICE_TRIGGER;
+ event->phrase_extras[0].confidence_level = 100;
+ event->phrase_extras[0].num_levels = 1;
+ event->phrase_extras[0].levels[0].level = 100;
+ event->phrase_extras[0].levels[0].user_id = 0;
+ event->common.data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
+ event->common.data_size = 1;
+ data[event->common.data_offset] = 8;
+ ALOGI("%s send callback model %d", __func__, stdev->model_handle);
+ stdev->recognition_callback(&event->common, stdev->recognition_cookie);
+ free(data);
+ } else {
+ ALOGI("%s abort recognition model %d", __func__, stdev->model_handle);
+ }
+ stdev->recognition_callback = NULL;
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+
+ return NULL;
+}
+
+static int stdev_get_properties(const struct sound_trigger_hw_device *dev,
+ struct sound_trigger_properties *properties)
+{
+ struct stub_sound_trigger_device *stdev = (struct stub_sound_trigger_device *)dev;
+
+ ALOGI("%s", __func__);
+ if (properties == NULL)
+ return -EINVAL;
+ memcpy(properties, &hw_properties, sizeof(struct sound_trigger_properties));
+ return 0;
+}
+
+static int stdev_load_sound_model(const struct sound_trigger_hw_device *dev,
+ struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle)
+{
+ struct stub_sound_trigger_device *stdev = (struct stub_sound_trigger_device *)dev;
+ int status = 0;
+
+ ALOGI("%s stdev %p", __func__, stdev);
+ pthread_mutex_lock(&stdev->lock);
+ if (handle == NULL || sound_model == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (sound_model->data_size == 0 ||
+ sound_model->data_offset < sizeof(struct sound_trigger_sound_model)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (stdev->model_handle == 1) {
+ status = -ENOSYS;
+ goto exit;
+ }
+ char *data = (char *)sound_model + sound_model->data_offset;
+ ALOGI("%s data size %d data %d - %d", __func__,
+ sound_model->data_size, data[0], data[sound_model->data_size - 1]);
+ stdev->model_handle = 1;
+ stdev->sound_model_callback = callback;
+ stdev->sound_model_cookie = cookie;
+
+ *handle = stdev->model_handle;
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return status;
+}
+
+static int stdev_unload_sound_model(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t handle)
+{
+ struct stub_sound_trigger_device *stdev = (struct stub_sound_trigger_device *)dev;
+ int status = 0;
+
+ ALOGI("%s handle %d", __func__, handle);
+ pthread_mutex_lock(&stdev->lock);
+ if (handle != 1) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (stdev->model_handle == 0) {
+ status = -ENOSYS;
+ goto exit;
+ }
+ stdev->model_handle = 0;
+ if (stdev->recognition_callback != NULL) {
+ stdev->recognition_callback = NULL;
+ pthread_cond_signal(&stdev->cond);
+ pthread_mutex_unlock(&stdev->lock);
+ pthread_join(stdev->callback_thread, (void **) NULL);
+ pthread_mutex_lock(&stdev->lock);
+ }
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return status;
+}
+
+static int stdev_start_recognition(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t sound_model_handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie)
+{
+ struct stub_sound_trigger_device *stdev = (struct stub_sound_trigger_device *)dev;
+ int status = 0;
+ ALOGI("%s sound model %d", __func__, sound_model_handle);
+ pthread_mutex_lock(&stdev->lock);
+ if (stdev->model_handle != sound_model_handle) {
+ status = -ENOSYS;
+ goto exit;
+ }
+ if (stdev->recognition_callback != NULL) {
+ status = -ENOSYS;
+ goto exit;
+ }
+ if (config->data_size != 0) {
+ char *data = (char *)config + config->data_offset;
+ ALOGI("%s data size %d data %d - %d", __func__,
+ config->data_size, data[0], data[config->data_size - 1]);
+ }
+
+ stdev->recognition_callback = callback;
+ stdev->recognition_cookie = cookie;
+ pthread_create(&stdev->callback_thread, (const pthread_attr_t *) NULL,
+ callback_thread_loop, stdev);
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return status;
+}
+
+static int stdev_stop_recognition(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t sound_model_handle)
+{
+ struct stub_sound_trigger_device *stdev = (struct stub_sound_trigger_device *)dev;
+ int status = 0;
+ ALOGI("%s sound model %d", __func__, sound_model_handle);
+ pthread_mutex_lock(&stdev->lock);
+ if (stdev->model_handle != sound_model_handle) {
+ status = -ENOSYS;
+ goto exit;
+ }
+ if (stdev->recognition_callback == NULL) {
+ status = -ENOSYS;
+ goto exit;
+ }
+ stdev->recognition_callback = NULL;
+ pthread_cond_signal(&stdev->cond);
+ pthread_mutex_unlock(&stdev->lock);
+ pthread_join(stdev->callback_thread, (void **) NULL);
+ pthread_mutex_lock(&stdev->lock);
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return status;
+}
+
+
+static int stdev_close(hw_device_t *device)
+{
+ free(device);
+ return 0;
+}
+
+static int stdev_open(const hw_module_t* module, const char* name,
+ hw_device_t** device)
+{
+ struct stub_sound_trigger_device *stdev;
+ int ret;
+
+ if (strcmp(name, SOUND_TRIGGER_HARDWARE_INTERFACE) != 0)
+ return -EINVAL;
+
+ stdev = calloc(1, sizeof(struct stub_sound_trigger_device));
+ if (!stdev)
+ return -ENOMEM;
+
+ stdev->device.common.tag = HARDWARE_DEVICE_TAG;
+ stdev->device.common.version = SOUND_TRIGGER_DEVICE_API_VERSION_1_0;
+ stdev->device.common.module = (struct hw_module_t *) module;
+ stdev->device.common.close = stdev_close;
+ stdev->device.get_properties = stdev_get_properties;
+ stdev->device.load_sound_model = stdev_load_sound_model;
+ stdev->device.unload_sound_model = stdev_unload_sound_model;
+ stdev->device.start_recognition = stdev_start_recognition;
+ stdev->device.stop_recognition = stdev_stop_recognition;
+
+ pthread_mutex_init(&stdev->lock, (const pthread_mutexattr_t *) NULL);
+ pthread_cond_init(&stdev->cond, (const pthread_condattr_t *) NULL);
+
+ *device = &stdev->device.common;
+
+ return 0;
+}
+
+static struct hw_module_methods_t hal_module_methods = {
+ .open = stdev_open,
+};
+
+struct sound_trigger_module HAL_MODULE_INFO_SYM = {
+ .common = {
+ .tag = HARDWARE_MODULE_TAG,
+ .module_api_version = SOUND_TRIGGER_MODULE_API_VERSION_1_0,
+ .hal_api_version = HARDWARE_HAL_API_VERSION,
+ .id = SOUND_TRIGGER_HARDWARE_MODULE_ID,
+ .name = "Default sound trigger HAL",
+ .author = "The Android Open Source Project",
+ .methods = &hal_module_methods,
+ },
+};
diff --git a/modules/mcu/Android.mk b/modules/tv_input/Android.mk
similarity index 87%
rename from modules/mcu/Android.mk
rename to modules/tv_input/Android.mk
index ec40e3a..e8aa7fc 100644
--- a/modules/mcu/Android.mk
+++ b/modules/tv_input/Android.mk
@@ -16,10 +16,9 @@
include $(CLEAR_VARS)
-LOCAL_MODULE := mcu.default
LOCAL_MODULE_RELATIVE_PATH := hw
-LOCAL_SRC_FILES := mcu.c
-LOCAL_SHARED_LIBRARIES := liblog
+LOCAL_SHARED_LIBRARIES := libcutils liblog
+LOCAL_SRC_FILES := tv_input.cpp
+LOCAL_MODULE := tv_input.default
LOCAL_MODULE_TAGS := optional
-
include $(BUILD_SHARED_LIBRARY)
diff --git a/modules/tv_input/tv_input.cpp b/modules/tv_input/tv_input.cpp
new file mode 100644
index 0000000..bc02786
--- /dev/null
+++ b/modules/tv_input/tv_input.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <errno.h>
+
+#include <cutils/log.h>
+#include <cutils/native_handle.h>
+
+#include <hardware/tv_input.h>
+
+/*****************************************************************************/
+
+typedef struct tv_input_private {
+ tv_input_device_t device;
+
+ // Callback related data
+ const tv_input_callback_ops_t* callback;
+ void* callback_data;
+} tv_input_private_t;
+
+static int tv_input_device_open(const struct hw_module_t* module,
+ const char* name, struct hw_device_t** device);
+
+static struct hw_module_methods_t tv_input_module_methods = {
+ open: tv_input_device_open
+};
+
+tv_input_module_t HAL_MODULE_INFO_SYM = {
+ common: {
+ tag: HARDWARE_MODULE_TAG,
+ version_major: 0,
+ version_minor: 1,
+ id: TV_INPUT_HARDWARE_MODULE_ID,
+ name: "Sample TV input module",
+ author: "The Android Open Source Project",
+ methods: &tv_input_module_methods,
+ }
+};
+
+/*****************************************************************************/
+
+static int tv_input_initialize(struct tv_input_device* dev,
+ const tv_input_callback_ops_t* callback, void* data)
+{
+ if (dev == NULL || callback == NULL) {
+ return -EINVAL;
+ }
+ tv_input_private_t* priv = (tv_input_private_t*)dev;
+ if (priv->callback != NULL) {
+ return -EEXIST;
+ }
+
+ priv->callback = callback;
+ priv->callback_data = data;
+
+ return 0;
+}
+
+static int tv_input_get_stream_configurations(
+ const struct tv_input_device*, int, int*, const tv_stream_config_t**)
+{
+ return -EINVAL;
+}
+
+static int tv_input_open_stream(struct tv_input_device*, int, tv_stream_t*)
+{
+ return -EINVAL;
+}
+
+static int tv_input_close_stream(struct tv_input_device*, int, int)
+{
+ return -EINVAL;
+}
+
+static int tv_input_request_capture(
+ struct tv_input_device*, int, int, buffer_handle_t, uint32_t)
+{
+ return -EINVAL;
+}
+
+static int tv_input_cancel_capture(struct tv_input_device*, int, int, uint32_t)
+{
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+
+static int tv_input_device_close(struct hw_device_t *dev)
+{
+ tv_input_private_t* priv = (tv_input_private_t*)dev;
+ if (priv) {
+ free(priv);
+ }
+ return 0;
+}
+
+/*****************************************************************************/
+
+static int tv_input_device_open(const struct hw_module_t* module,
+ const char* name, struct hw_device_t** device)
+{
+ int status = -EINVAL;
+ if (!strcmp(name, TV_INPUT_DEFAULT_DEVICE)) {
+ tv_input_private_t* dev = (tv_input_private_t*)malloc(sizeof(*dev));
+
+ /* initialize our state here */
+ memset(dev, 0, sizeof(*dev));
+
+ /* initialize the procs */
+ dev->device.common.tag = HARDWARE_DEVICE_TAG;
+ dev->device.common.version = TV_INPUT_DEVICE_API_VERSION_0_1;
+ dev->device.common.module = const_cast<hw_module_t*>(module);
+ dev->device.common.close = tv_input_device_close;
+
+ dev->device.initialize = tv_input_initialize;
+ dev->device.get_stream_configurations =
+ tv_input_get_stream_configurations;
+ dev->device.open_stream = tv_input_open_stream;
+ dev->device.close_stream = tv_input_close_stream;
+ dev->device.request_capture = tv_input_request_capture;
+ dev->device.cancel_capture = tv_input_cancel_capture;
+
+ *device = &dev->device.common;
+ status = 0;
+ }
+ return status;
+}
diff --git a/modules/usbaudio/Android.mk b/modules/usbaudio/Android.mk
index 2af7897..ec8a8c0 100644
--- a/modules/usbaudio/Android.mk
+++ b/modules/usbaudio/Android.mk
@@ -19,10 +19,15 @@
LOCAL_MODULE := audio.usb.default
LOCAL_MODULE_RELATIVE_PATH := hw
LOCAL_SRC_FILES := \
- audio_hw.c
+ audio_hw.c \
+ alsa_device_profile.c \
+ alsa_device_proxy.c \
+ logging.c \
+ format.c
LOCAL_C_INCLUDES += \
- external/tinyalsa/include
-LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa
+ external/tinyalsa/include \
+ $(call include-path-for, audio-utils)
+LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa libaudioutils
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS := -Wno-unused-parameter
diff --git a/modules/usbaudio/alsa_device_profile.c b/modules/usbaudio/alsa_device_profile.c
new file mode 100644
index 0000000..c7df00c
--- /dev/null
+++ b/modules/usbaudio/alsa_device_profile.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "alsa_device_profile"
+/*#define LOG_NDEBUG 0*/
+/*#define LOG_PCM_PARAMS 0*/
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <log/log.h>
+
+#include "alsa_device_profile.h"
+#include "format.h"
+#include "logging.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/*TODO - Evaluate if this value should/can be retrieved from a device-specific property */
+#define BUFF_DURATION_MS 5
+
+#define DEFAULT_PERIOD_SIZE 1024
+
+static const char * const format_string_map[] = {
+ "AUDIO_FORMAT_PCM_16_BIT", /* "PCM_FORMAT_S16_LE", */
+ "AUDIO_FORMAT_PCM_32_BIT", /* "PCM_FORMAT_S32_LE", */
+ "AUDIO_FORMAT_PCM_8_BIT", /* "PCM_FORMAT_S8", */
+ "AUDIO_FORMAT_PCM_8_24_BIT", /* "PCM_FORMAT_S24_LE", */
+ "AUDIO_FORMAT_PCM_24_BIT_PACKED"/* "PCM_FORMAT_S24_3LE" */
+};
+
+static const unsigned const format_byte_size_map[] = {
+ 2, /* PCM_FORMAT_S16_LE */
+ 4, /* PCM_FORMAT_S32_LE */
+ 1, /* PCM_FORMAT_S8 */
+ 4, /* PCM_FORMAT_S24_LE */
+ 3, /* PCM_FORMAT_S24_3LE */
+};
+
+extern int8_t const pcm_format_value_map[50];
+
+/* sort these highest -> lowest (to default to best quality) */
+static const unsigned std_sample_rates[] =
+ {48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000};
+
+void profile_init(alsa_device_profile* profile, int direction)
+{
+ profile->card = profile->device = -1;
+ profile->direction = direction;
+
+ /* Fill the attribute arrays with invalid values */
+ size_t index;
+ for (index = 0; index < ARRAY_SIZE(profile->formats); index++) {
+ profile->formats[index] = PCM_FORMAT_INVALID;
+ }
+
+ for (index = 0; index < ARRAY_SIZE(profile->sample_rates); index++) {
+ profile->sample_rates[index] = 0;
+ }
+
+ for (index = 0; index < ARRAY_SIZE(profile->channel_counts); index++) {
+ profile->channel_counts[index] = 0;
+ }
+
+ profile->min_period_size = profile->max_period_size = 0;
+ profile->min_channel_count = profile->max_channel_count = DEFAULT_CHANNEL_COUNT;
+
+ profile->is_valid = false;
+}
+
+bool profile_is_initialized(alsa_device_profile* profile)
+{
+ return profile->card >= 0 && profile->device >= 0;
+}
+
+bool profile_is_valid(alsa_device_profile* profile) {
+ return profile->is_valid;
+}
+
+bool profile_is_cached_for(alsa_device_profile* profile, int card, int device) {
+ return card == profile->card && device == profile->device;
+}
+
+void profile_decache(alsa_device_profile* profile) {
+ profile->card = profile->device = -1;
+}
+
+/*
+ * Returns the supplied value rounded up to the next even multiple of 16
+ */
+static unsigned int round_to_16_mult(unsigned int size)
+{
+ return (size + 15) & ~15; // 0xFFFFFFF0;
+}
+
+/*
+ * Returns the system defined minimum period size based on the supplied sample rate.
+ */
+unsigned profile_calc_min_period_size(alsa_device_profile* profile, unsigned sample_rate)
+{
+ ALOGV("profile_calc_min_period_size(%p, rate:%d)", profile, sample_rate);
+ if (profile == NULL) {
+ return DEFAULT_PERIOD_SIZE;
+ } else {
+ unsigned num_sample_frames = (sample_rate * BUFF_DURATION_MS) / 1000;
+ if (num_sample_frames < profile->min_period_size) {
+ num_sample_frames = profile->min_period_size;
+ }
+ return round_to_16_mult(num_sample_frames) * 2;
+ }
+}
+
+unsigned int profile_get_period_size(alsa_device_profile* profile, unsigned sample_rate)
+{
+ // return profile->default_config.period_size;
+ unsigned int period_size = profile_calc_min_period_size(profile, sample_rate);
+ ALOGV("profile_get_period_size(rate:%d) = %d", sample_rate, period_size);
+ return period_size;
+}
+
+/*
+ * Sample Rate
+ */
+unsigned profile_get_default_sample_rate(alsa_device_profile* profile)
+{
+ /*
+ * TODO this won't be right in general. we should store a preferred rate as we are scanning.
+ * But right now it will return the highest rate, which may be correct.
+ */
+ return profile_is_valid(profile) ? profile->sample_rates[0] : DEFAULT_SAMPLE_RATE;
+}
+
+bool profile_is_sample_rate_valid(alsa_device_profile* profile, unsigned rate)
+{
+ if (profile_is_valid(profile)) {
+ size_t index;
+ for (index = 0; profile->sample_rates[index] != 0; index++) {
+ if (profile->sample_rates[index] == rate) {
+ return true;
+ }
+ }
+
+ return false;
+ } else {
+ return rate == DEFAULT_SAMPLE_RATE;
+ }
+}
+
+/*
+ * Format
+ */
+enum pcm_format profile_get_default_format(alsa_device_profile* profile)
+{
+ /*
+ * TODO this won't be right in general. we should store a preferred format as we are scanning.
+ */
+ return profile_is_valid(profile) ? profile->formats[0] : DEFAULT_SAMPLE_FORMAT;
+}
+
+bool profile_is_format_valid(alsa_device_profile* profile, enum pcm_format fmt) {
+ if (profile_is_valid(profile)) {
+ size_t index;
+ for (index = 0; profile->formats[index] != PCM_FORMAT_INVALID; index++) {
+ if (profile->formats[index] == fmt) {
+ return true;
+ }
+ }
+
+ return false;
+ } else {
+ return fmt == DEFAULT_SAMPLE_FORMAT;
+ }
+}
+
+/*
+ * Channels
+ */
+unsigned profile_get_default_channel_count(alsa_device_profile* profile)
+{
+ return profile_is_valid(profile) ? profile->channel_counts[0] : DEFAULT_CHANNEL_COUNT;
+}
+
+bool profile_is_channel_count_valid(alsa_device_profile* profile, unsigned count)
+{
+ if (profile_is_initialized(profile)) {
+ return count >= profile->min_channel_count && count <= profile->max_channel_count;
+ } else {
+ return count == DEFAULT_CHANNEL_COUNT;
+ }
+}
+
+static bool profile_test_sample_rate(alsa_device_profile* profile, unsigned rate)
+{
+ struct pcm_config config = profile->default_config;
+ config.rate = rate;
+
+ bool works = false; /* let's be pessimistic */
+ struct pcm * pcm = pcm_open(profile->card, profile->device,
+ profile->direction, &config);
+
+ if (pcm != NULL) {
+ works = pcm_is_ready(pcm);
+ pcm_close(pcm);
+ }
+
+ return works;
+}
+
+static unsigned profile_enum_sample_rates(alsa_device_profile* profile, unsigned min, unsigned max)
+{
+ unsigned num_entries = 0;
+ unsigned index;
+
+ for (index = 0; index < ARRAY_SIZE(std_sample_rates) &&
+ num_entries < ARRAY_SIZE(profile->sample_rates) - 1;
+ index++) {
+ if (std_sample_rates[index] >= min && std_sample_rates[index] <= max
+ && profile_test_sample_rate(profile, std_sample_rates[index])) {
+ profile->sample_rates[num_entries++] = std_sample_rates[index];
+ }
+ }
+
+ return num_entries; /* return # of supported rates */
+}
+
+static unsigned profile_enum_sample_formats(alsa_device_profile* profile, struct pcm_mask * mask)
+{
+ const int num_slots = ARRAY_SIZE(mask->bits);
+ const int bits_per_slot = sizeof(mask->bits[0]) * 8;
+
+ const int table_size = ARRAY_SIZE(pcm_format_value_map);
+
+ int slot_index, bit_index, table_index;
+ table_index = 0;
+ int num_written = 0;
+ for (slot_index = 0; slot_index < num_slots && table_index < table_size;
+ slot_index++) {
+ unsigned bit_mask = 1;
+ for (bit_index = 0;
+ bit_index < bits_per_slot && table_index < table_size;
+ bit_index++) {
+ if ((mask->bits[slot_index] & bit_mask) != 0) {
+ enum pcm_format format = pcm_format_value_map[table_index];
+ /* Never return invalid (unrecognized) or 8-bit */
+ if (format != PCM_FORMAT_INVALID && format != PCM_FORMAT_S8) {
+ profile->formats[num_written++] = format;
+ if (num_written == ARRAY_SIZE(profile->formats) - 1) {
+ /* leave at least one PCM_FORMAT_INVALID at the end */
+ return num_written;
+ }
+ }
+ }
+ bit_mask <<= 1;
+ table_index++;
+ }
+ }
+
+ return num_written;
+}
+
+static unsigned profile_enum_channel_counts(alsa_device_profile* profile, unsigned min, unsigned max)
+{
+ // TODO: Don't return MONO even if the device supports it. This causes problems
+ // in AudioPolicyManager. Revisit.
+ static const unsigned std_out_channel_counts[] = {8, 4, 2/*, 1*/};
+ static const unsigned std_in_channel_counts[] = {8, 4, 2, 1};
+
+ unsigned * channel_counts =
+ profile->direction == PCM_OUT ? std_out_channel_counts : std_in_channel_counts;
+ unsigned num_channel_counts =
+ profile->direction == PCM_OUT
+ ? ARRAY_SIZE(std_out_channel_counts) : ARRAY_SIZE(std_in_channel_counts);
+
+ unsigned num_counts = 0;
+ unsigned index;
+ /* TODO write a profile_test_channel_count() */
+ /* Ensure there is at least one invalid channel count to terminate the channel counts array */
+ for (index = 0; index < num_channel_counts &&
+ num_counts < ARRAY_SIZE(profile->channel_counts) - 1;
+ index++) {
+ /* TODO Do we want a channel counts test? */
+ if (channel_counts[index] >= min && channel_counts[index] <= max /* &&
+ profile_test_channel_count(profile, channel_counts[index])*/) {
+ profile->channel_counts[num_counts++] = channel_counts[index];
+ }
+ }
+
+ return num_counts; /* return # of supported counts */
+}
+
+/*
+ * Reads and decodes configuration info from the specified ALSA card/device.
+ */
+static int read_alsa_device_config(alsa_device_profile * profile, struct pcm_config * config)
+{
+ ALOGV("usb:audio_hw - read_alsa_device_config(c:%d d:%d t:0x%X)",
+ profile->card, profile->device, profile->direction);
+
+ if (profile->card < 0 || profile->device < 0) {
+ return -EINVAL;
+ }
+
+ struct pcm_params * alsa_hw_params =
+ pcm_params_get(profile->card, profile->device, profile->direction);
+ if (alsa_hw_params == NULL) {
+ return -EINVAL;
+ }
+
+ profile->min_period_size = pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_SIZE);
+ profile->max_period_size = pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_SIZE);
+
+ profile->min_channel_count = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS);
+ profile->max_channel_count = pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS);
+
+ int ret = 0;
+
+ /*
+ * This Logging will be useful when testing new USB devices.
+ */
+#ifdef LOG_PCM_PARAMS
+ log_pcm_params(alsa_hw_params);
+#endif
+
+ config->channels = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS);
+ config->rate = pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE);
+ config->period_size = profile_calc_min_period_size(profile, config->rate);
+ config->period_count = pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIODS);
+ config->format = get_pcm_format_for_mask(pcm_params_get_mask(alsa_hw_params, PCM_PARAM_FORMAT));
+#ifdef LOG_PCM_PARAMS
+ log_pcm_config(config, "read_alsa_device_config");
+#endif
+ if (config->format == PCM_FORMAT_INVALID) {
+ ret = -EINVAL;
+ }
+
+ pcm_params_free(alsa_hw_params);
+
+ return ret;
+}
+
+bool profile_read_device_info(alsa_device_profile* profile)
+{
+ if (!profile_is_initialized(profile)) {
+ return false;
+ }
+
+ /* let's get some defaults */
+ read_alsa_device_config(profile, &profile->default_config);
+ ALOGV("default_config chans:%d rate:%d format:%d count:%d size:%d",
+ profile->default_config.channels, profile->default_config.rate,
+ profile->default_config.format, profile->default_config.period_count,
+ profile->default_config.period_size);
+
+ struct pcm_params * alsa_hw_params = pcm_params_get(profile->card,
+ profile->device,
+ profile->direction);
+ if (alsa_hw_params == NULL) {
+ return false;
+ }
+
+ /* Formats */
+ struct pcm_mask * format_mask = pcm_params_get_mask(alsa_hw_params, PCM_PARAM_FORMAT);
+ profile_enum_sample_formats(profile, format_mask);
+
+ /* Channels */
+ profile_enum_channel_counts(
+ profile, pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS));
+
+ /* Sample Rates */
+ profile_enum_sample_rates(
+ profile, pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE));
+
+ profile->is_valid = true;
+
+ return true;
+}
+
+char * profile_get_sample_rate_strs(alsa_device_profile* profile)
+{
+ char buffer[128];
+ buffer[0] = '\0';
+ int buffSize = ARRAY_SIZE(buffer);
+
+ char numBuffer[32];
+
+ int numEntries = 0;
+ unsigned index;
+ for (index = 0; profile->sample_rates[index] != 0; index++) {
+ if (numEntries++ != 0) {
+ strncat(buffer, "|", buffSize);
+ }
+ snprintf(numBuffer, sizeof(numBuffer), "%u", profile->sample_rates[index]);
+ strncat(buffer, numBuffer, buffSize);
+ }
+
+ return strdup(buffer);
+}
+
+char * profile_get_format_strs(alsa_device_profile* profile)
+{
+ /* TODO remove this hack when we have support for input in non PCM16 formats */
+ if (profile->direction == PCM_IN) {
+ return strdup("AUDIO_FORMAT_PCM_16_BIT");
+ }
+
+ char buffer[128];
+ buffer[0] = '\0';
+ int buffSize = ARRAY_SIZE(buffer);
+
+ int numEntries = 0;
+ unsigned index = 0;
+ for (index = 0; profile->formats[index] != PCM_FORMAT_INVALID; index++) {
+ if (numEntries++ != 0) {
+ strncat(buffer, "|", buffSize);
+ }
+ strncat(buffer, format_string_map[profile->formats[index]], buffSize);
+ }
+
+ return strdup(buffer);
+}
+
+char * profile_get_channel_count_strs(alsa_device_profile* profile)
+{
+ static const char * const out_chans_strs[] = {
+ /* 0 */"AUDIO_CHANNEL_NONE", /* will never be taken as this is a terminator */
+ /* 1 */"AUDIO_CHANNEL_OUT_MONO",
+ /* 2 */"AUDIO_CHANNEL_OUT_STEREO",
+ /* 3 */ /* "AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_FRONT_CENTER" */ NULL,
+ /* 4 */"AUDIO_CHANNEL_OUT_QUAD",
+ /* 5 */ /* "AUDIO_CHANNEL_OUT_QUAD|AUDIO_CHANNEL_OUT_FRONT_CENTER" */ NULL,
+ /* 6 */"AUDIO_CHANNEL_OUT_5POINT1",
+ /* 7 */ /* "AUDIO_CHANNEL_OUT_5POINT1|AUDIO_CHANNEL_OUT_BACK_CENTER" */ NULL,
+ /* 8 */"AUDIO_CHANNEL_OUT_7POINT1",
+ /* channel counts greater than this not considered */
+ };
+
+ static const char * const in_chans_strs[] = {
+ /* 0 */"AUDIO_CHANNEL_NONE", /* will never be taken as this is a terminator */
+ /* 1 */"AUDIO_CHANNEL_IN_MONO",
+ /* 2 */"AUDIO_CHANNEL_IN_STEREO",
+ /* channel counts greater than this not considered */
+ };
+
+ const bool isOutProfile = profile->direction == PCM_OUT;
+ const char * const * const names_array = isOutProfile ? out_chans_strs : in_chans_strs;
+ const size_t names_size = isOutProfile ? ARRAY_SIZE(out_chans_strs)
+ : ARRAY_SIZE(in_chans_strs);
+
+ char buffer[256]; /* caution, may need to be expanded */
+ buffer[0] = '\0';
+ const int buffer_size = ARRAY_SIZE(buffer);
+ int num_entries = 0;
+ bool stereo_allowed = false;
+ unsigned index;
+ unsigned channel_count;
+
+ for (index = 0; (channel_count = profile->channel_counts[index]) != 0; index++) {
+ stereo_allowed = stereo_allowed || channel_count == 2;
+ if (channel_count < names_size && names_array[channel_count] != NULL) {
+ if (num_entries++ != 0) {
+ strncat(buffer, "|", buffer_size);
+ }
+ strncat(buffer, names_array[channel_count], buffer_size);
+ }
+ }
+ /* emulated modes:
+ * always expose stereo as we can emulate it for PCM_OUT
+ */
+ if (!stereo_allowed && isOutProfile) {
+ if (num_entries++ != 0) {
+ strncat(buffer, "|", buffer_size);
+ }
+ strncat(buffer, names_array[2], buffer_size); /* stereo */
+ }
+ return strdup(buffer);
+}
diff --git a/modules/usbaudio/alsa_device_profile.h b/modules/usbaudio/alsa_device_profile.h
new file mode 100644
index 0000000..2c0da39
--- /dev/null
+++ b/modules/usbaudio/alsa_device_profile.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_ALSA_DEVICE_PROFILE_H
+#define ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_ALSA_DEVICE_PROFILE_H
+
+#include <stdbool.h>
+
+#include <tinyalsa/asoundlib.h>
+
+#define MAX_PROFILE_FORMATS 6 /* We long support the 5 standard formats defined
+ * in asound.h, so we just need this to be 1 more
+ * than that */
+#define MAX_PROFILE_SAMPLE_RATES 10 /* this number needs to be 1 more than the number of
+ * standard formats in std_sample_rates[]
+ * (in alsa_device_profile.c) */
+#define MAX_PROFILE_CHANNEL_COUNTS 5 /* this number need to be 1 more than the number of
+ * standard channel formats in std_channel_counts[]
+ * (in alsa_device_profile.c) */
+
+#define DEFAULT_SAMPLE_RATE 44100
+#define DEFAULT_SAMPLE_FORMAT PCM_FORMAT_S16_LE
+#define DEFAULT_CHANNEL_COUNT 2
+
+typedef struct {
+ int card;
+ int device;
+ int direction; /* PCM_OUT or PCM_IN */
+
+ enum pcm_format formats[MAX_PROFILE_FORMATS];
+
+ unsigned sample_rates[MAX_PROFILE_SAMPLE_RATES];
+
+ unsigned channel_counts[MAX_PROFILE_CHANNEL_COUNTS];
+
+ bool is_valid;
+
+ /* read from the hardware device */
+ struct pcm_config default_config;
+
+ unsigned min_period_size;
+ unsigned max_period_size;
+
+ unsigned min_channel_count;
+ unsigned max_channel_count;
+} alsa_device_profile;
+
+void profile_init(alsa_device_profile* profile, int direction);
+bool profile_is_initialized(alsa_device_profile* profile);
+bool profile_is_valid(alsa_device_profile* profile);
+bool profile_is_cached_for(alsa_device_profile* profile, int card, int device);
+void profile_decache(alsa_device_profile* profile);
+
+bool profile_read_device_info(alsa_device_profile* profile);
+
+/* Audio Config Strings Methods */
+char * profile_get_sample_rate_strs(alsa_device_profile* profile);
+char * profile_get_format_strs(alsa_device_profile* profile);
+char * profile_get_channel_count_strs(alsa_device_profile* profile);
+
+/* Sample Rate Methods */
+unsigned profile_get_default_sample_rate(alsa_device_profile* profile);
+bool profile_is_sample_rate_valid(alsa_device_profile* profile, unsigned rate);
+
+/* Format Methods */
+enum pcm_format profile_get_default_format(alsa_device_profile* profile);
+bool profile_is_format_valid(alsa_device_profile* profile, enum pcm_format fmt);
+
+/* Channel Methods */
+unsigned profile_get_default_channel_count(alsa_device_profile* profile);
+bool profile_is_channel_count_valid(alsa_device_profile* profile, unsigned count);
+
+/* Utility */
+unsigned profile_calc_min_period_size(alsa_device_profile* profile, unsigned sample_rate);
+unsigned int profile_get_period_size(alsa_device_profile* profile, unsigned sample_rate);
+
+#endif /* ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_ALSA_DEVICE_PROFILE_H */
diff --git a/modules/usbaudio/alsa_device_proxy.c b/modules/usbaudio/alsa_device_proxy.c
new file mode 100644
index 0000000..081c05b
--- /dev/null
+++ b/modules/usbaudio/alsa_device_proxy.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "alsa_device_proxy"
+/*#define LOG_NDEBUG 0*/
+/*#define LOG_PCM_PARAMS 0*/
+
+#include <log/log.h>
+
+#include "alsa_device_proxy.h"
+
+#include "logging.h"
+
+#define DEFAULT_PERIOD_SIZE 1024
+#define DEFAULT_PERIOD_COUNT 2
+
+void proxy_prepare(alsa_device_proxy * proxy, alsa_device_profile* profile,
+ struct pcm_config * config)
+{
+ ALOGV("proxy_prepare()");
+
+ proxy->profile = profile;
+
+#ifdef LOG_PCM_PARAMS
+ log_pcm_config(config, "proxy_setup()");
+#endif
+
+ proxy->alsa_config.format =
+ config->format != PCM_FORMAT_INVALID && profile_is_format_valid(profile, config->format)
+ ? config->format : profile->default_config.format;
+ proxy->alsa_config.rate =
+ config->rate != 0 && profile_is_sample_rate_valid(profile, config->rate)
+ ? config->rate : profile->default_config.rate;
+ proxy->alsa_config.channels =
+ config->channels != 0 && profile_is_channel_count_valid(profile, config->channels)
+ ? config->channels : profile->default_config.channels;
+
+ proxy->alsa_config.period_count = profile->default_config.period_count;
+ proxy->alsa_config.period_size =
+ profile_get_period_size(proxy->profile, proxy->alsa_config.rate);
+
+ // Hack for USB accessory audio.
+ // Here we set the correct value for period_count if tinyalsa fails to get it from the
+ // f_audio_source driver.
+ if (proxy->alsa_config.period_count == 0) {
+ proxy->alsa_config.period_count = 4;
+ }
+
+ proxy->pcm = NULL;
+}
+
+int proxy_open(alsa_device_proxy * proxy)
+{
+ alsa_device_profile* profile = proxy->profile;
+ ALOGV("proxy_open(card:%d device:%d %s)", profile->card, profile->device,
+ profile->direction == PCM_OUT ? "PCM_OUT" : "PCM_IN");
+
+ proxy->pcm = pcm_open(profile->card, profile->device, profile->direction, &proxy->alsa_config);
+ if (proxy->pcm == NULL) {
+ return -ENOMEM;
+ }
+
+ if (!pcm_is_ready(proxy->pcm)) {
+ ALOGE("[%s] proxy_open() pcm_open() failed: %s", LOG_TAG, pcm_get_error(proxy->pcm));
+#ifdef LOG_PCM_PARAMS
+ log_pcm_config(&proxy->alsa_config, "config");
+#endif
+ pcm_close(proxy->pcm);
+ proxy->pcm = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void proxy_close(alsa_device_proxy * proxy)
+{
+ ALOGV("proxy_close() [pcm:%p]", proxy->pcm);
+
+ if (proxy->pcm != NULL) {
+ pcm_close(proxy->pcm);
+ proxy->pcm = NULL;
+ }
+}
+
+/*
+ * Sample Rate
+ */
+unsigned proxy_get_sample_rate(const alsa_device_proxy * proxy)
+{
+ return proxy->alsa_config.rate;
+}
+
+/*
+ * Format
+ */
+enum pcm_format proxy_get_format(const alsa_device_proxy * proxy)
+{
+ return proxy->alsa_config.format;
+}
+
+/*
+ * Channel Count
+ */
+unsigned proxy_get_channel_count(const alsa_device_proxy * proxy)
+{
+ return proxy->alsa_config.channels;
+}
+
+/*
+ * Other
+ */
+unsigned int proxy_get_period_size(const alsa_device_proxy * proxy)
+{
+ return proxy->alsa_config.period_size;
+}
+
+unsigned int proxy_get_period_count(const alsa_device_proxy * proxy)
+{
+ return proxy->alsa_config.period_count;
+}
+
+unsigned proxy_get_latency(const alsa_device_proxy * proxy)
+{
+ return (proxy_get_period_size(proxy) * proxy_get_period_count(proxy) * 1000)
+ / proxy_get_sample_rate(proxy);
+}
+
+/*
+ * I/O
+ */
+int proxy_write(const alsa_device_proxy * proxy, const void *data, unsigned int count)
+{
+ return pcm_write(proxy->pcm, data, count);
+}
+
+int proxy_read(const alsa_device_proxy * proxy, void *data, unsigned int count)
+{
+ return pcm_read(proxy->pcm, data, count);
+}
diff --git a/modules/usbaudio/alsa_device_proxy.h b/modules/usbaudio/alsa_device_proxy.h
new file mode 100644
index 0000000..f090c56
--- /dev/null
+++ b/modules/usbaudio/alsa_device_proxy.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_ALSA_DEVICE_PROXY_H
+#define ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_ALSA_DEVICE_PROXY_H
+
+#include <tinyalsa/asoundlib.h>
+
+#include "alsa_device_profile.h"
+
+typedef struct {
+ alsa_device_profile* profile;
+
+ struct pcm_config alsa_config;
+
+ struct pcm * pcm;
+} alsa_device_proxy;
+
+void proxy_prepare(alsa_device_proxy * proxy, alsa_device_profile * profile,
+ struct pcm_config * config);
+
+unsigned proxy_get_sample_rate(const alsa_device_proxy * proxy);
+enum pcm_format proxy_get_format(const alsa_device_proxy * proxy);
+unsigned proxy_get_channel_count(const alsa_device_proxy * proxy);
+
+unsigned int proxy_get_period_size(const alsa_device_proxy * proxy);
+
+unsigned proxy_get_latency(const alsa_device_proxy * proxy);
+
+int proxy_open(alsa_device_proxy * proxy);
+void proxy_close(alsa_device_proxy * proxy);
+
+int proxy_write(const alsa_device_proxy * proxy, const void *data, unsigned int count);
+int proxy_read(const alsa_device_proxy * proxy, void *data, unsigned int count);
+
+#endif /* ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_ALSA_DEVICE_PROXY_H */
diff --git a/modules/usbaudio/audio_hw.c b/modules/usbaudio/audio_hw.c
index 24a2d63..caddf97 100644
--- a/modules/usbaudio/audio_hw.c
+++ b/modules/usbaudio/audio_hw.c
@@ -18,80 +18,229 @@
/*#define LOG_NDEBUG 0*/
#include <errno.h>
+#include <inttypes.h>
#include <pthread.h>
#include <stdint.h>
-#include <sys/time.h>
#include <stdlib.h>
+#include <sys/time.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <cutils/str_parms.h>
#include <cutils/properties.h>
-#include <hardware/hardware.h>
-#include <system/audio.h>
#include <hardware/audio.h>
+#include <hardware/audio_alsaops.h>
+#include <hardware/hardware.h>
+
+#include <system/audio.h>
#include <tinyalsa/asoundlib.h>
-struct pcm_config pcm_config = {
- .channels = 2,
- .rate = 44100,
- .period_size = 1024,
- .period_count = 4,
- .format = PCM_FORMAT_S16_LE,
-};
+#include <audio_utils/channels.h>
+
+/* FOR TESTING:
+ * Set k_force_channels to force the number of channels to present to AudioFlinger.
+ * 0 disables (this is default: present the device channels to AudioFlinger).
+ * 2 forces to legacy stereo mode.
+ *
+ * Others values can be tried (up to 8).
+ * TODO: AudioFlinger cannot support more than 8 active output channels
+ * at this time, so limiting logic needs to be put here or communicated from above.
+ */
+static const unsigned k_force_channels = 0;
+
+#include "alsa_device_profile.h"
+#include "alsa_device_proxy.h"
+#include "logging.h"
+
+#define DEFAULT_INPUT_BUFFER_SIZE_MS 20
struct audio_device {
struct audio_hw_device hw_device;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
- int card;
- int device;
+
+ /* output */
+ alsa_device_profile out_profile;
+
+ /* input */
+ alsa_device_profile in_profile;
+
bool standby;
};
struct stream_out {
struct audio_stream_out stream;
- pthread_mutex_t lock; /* see note below on mutex acquisition order */
- struct pcm *pcm;
+ pthread_mutex_t lock; /* see note below on mutex acquisition order */
bool standby;
- struct audio_device *dev;
+ struct audio_device *dev; /* hardware information - only using this for the lock */
+
+ alsa_device_profile * profile;
+ alsa_device_proxy proxy; /* state of the stream */
+
+ unsigned hal_channel_count; /* channel count exposed to AudioFlinger.
+ * This may differ from the device channel count when
+ * the device is not compatible with AudioFlinger
+ * capabilities, e.g. exposes too many channels or
+ * too few channels. */
+ void * conversion_buffer; /* any conversions are put into here
+ * they could come from here too if
+ * there was a previous conversion */
+ size_t conversion_buffer_size; /* in bytes */
};
+struct stream_in {
+ struct audio_stream_in stream;
+
+ pthread_mutex_t lock; /* see note below on mutex acquisition order */
+ bool standby;
+
+ struct audio_device *dev; /* hardware information - only using this for the lock */
+
+ alsa_device_profile * profile;
+ alsa_device_proxy proxy; /* state of the stream */
+
+ // not used?
+ // struct audio_config hal_pcm_config;
+
+ /* We may need to read more data from the device in order to data reduce to 16bit, 4chan */
+ void * conversion_buffer; /* any conversions are put into here
+ * they could come from here too if
+ * there was a previous conversion */
+ size_t conversion_buffer_size; /* in bytes */
+};
+
+/*
+ * Data Conversions
+ */
+/*
+ * Convert a buffer of packed (3-byte) PCM24LE samples to PCM16LE samples.
+ * in_buff points to the buffer of PCM24LE samples
+ * num_in_samples size of input buffer in SAMPLES
+ * out_buff points to the buffer to receive converted PCM16LE LE samples.
+ * returns
+ * the number of BYTES of output data.
+ * We are doing this since we *always* present to The Framework as A PCM16LE device, but need to
+ * support PCM24_3LE (24-bit, packed).
+ * NOTE:
+ * This conversion is safe to do in-place (in_buff == out_buff).
+ * TODO Move this to a utilities module.
+ */
+static size_t convert_24_3_to_16(const unsigned char * in_buff, size_t num_in_samples,
+ short * out_buff)
+{
+ /*
+ * Move from front to back so that the conversion can be done in-place
+ * i.e. in_buff == out_buff
+ */
+ /* we need 2 bytes in the output for every 3 bytes in the input */
+ unsigned char* dst_ptr = (unsigned char*)out_buff;
+ const unsigned char* src_ptr = in_buff;
+ size_t src_smpl_index;
+ for (src_smpl_index = 0; src_smpl_index < num_in_samples; src_smpl_index++) {
+ src_ptr++; /* lowest-(skip)-byte */
+ *dst_ptr++ = *src_ptr++; /* low-byte */
+ *dst_ptr++ = *src_ptr++; /* high-byte */
+ }
+
+ /* return number of *bytes* generated: */
+ return num_in_samples * 2;
+}
+
+/*
+ * Convert a buffer of packed (3-byte) PCM32 samples to PCM16LE samples.
+ * in_buff points to the buffer of PCM32 samples
+ * num_in_samples size of input buffer in SAMPLES
+ * out_buff points to the buffer to receive converted PCM16LE LE samples.
+ * returns
+ * the number of BYTES of output data.
+ * We are doing this since we *always* present to The Framework as A PCM16LE device, but need to
+ * support PCM_FORMAT_S32_LE (32-bit).
+ * NOTE:
+ * This conversion is safe to do in-place (in_buff == out_buff).
+ * TODO Move this to a utilities module.
+ */
+static size_t convert_32_to_16(const int32_t * in_buff, size_t num_in_samples, short * out_buff)
+{
+ /*
+ * Move from front to back so that the conversion can be done in-place
+ * i.e. in_buff == out_buff
+ */
+
+ short * dst_ptr = out_buff;
+ const int32_t* src_ptr = in_buff;
+ size_t src_smpl_index;
+ for (src_smpl_index = 0; src_smpl_index < num_in_samples; src_smpl_index++) {
+ *dst_ptr++ = *src_ptr++ >> 16;
+ }
+
+ /* return number of *bytes* generated: */
+ return num_in_samples * 2;
+}
+
+static char * device_get_parameters(alsa_device_profile * profile, const char * keys)
+{
+ ALOGV("usb:audio_hw::device_get_parameters() keys:%s", keys);
+
+ if (profile->card < 0 || profile->device < 0) {
+ return strdup("");
+ }
+
+ struct str_parms *query = str_parms_create_str(keys);
+ struct str_parms *result = str_parms_create();
+
+ /* These keys are from hardware/libhardware/include/audio.h */
+ /* supported sample rates */
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES)) {
+ char* rates_list = profile_get_sample_rate_strs(profile);
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES,
+ rates_list);
+ free(rates_list);
+ }
+
+ /* supported channel counts */
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_CHANNELS)) {
+ char* channels_list = profile_get_channel_count_strs(profile);
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_CHANNELS,
+ channels_list);
+ free(channels_list);
+ }
+
+ /* supported sample formats */
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_FORMATS)) {
+ char * format_params = profile_get_format_strs(profile);
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_FORMATS,
+ format_params);
+ free(format_params);
+ }
+ str_parms_destroy(query);
+
+ char* result_str = str_parms_to_str(result);
+ str_parms_destroy(result);
+
+ ALOGV("usb:audio_hw::device_get_parameters = %s", result_str);
+
+ return result_str;
+}
+
+/*
+ * HAl Functions
+ */
/**
* NOTE: when multiple mutexes have to be acquired, always respect the
* following order: hw device > out stream
*/
-/* Helper functions */
-
-/* must be called with hw device and output stream mutexes locked */
-static int start_output_stream(struct stream_out *out)
-{
- struct audio_device *adev = out->dev;
- int i;
-
- if ((adev->card < 0) || (adev->device < 0))
- return -EINVAL;
-
- out->pcm = pcm_open(adev->card, adev->device, PCM_OUT, &pcm_config);
-
- if (out->pcm && !pcm_is_ready(out->pcm)) {
- ALOGE("pcm_open() failed: %s", pcm_get_error(out->pcm));
- pcm_close(out->pcm);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* API functions */
-
+/*
+ * OUT functions
+ */
static uint32_t out_get_sample_rate(const struct audio_stream *stream)
{
- return pcm_config.rate;
+ uint32_t rate = proxy_get_sample_rate(&((struct stream_out*)stream)->proxy);
+ ALOGV("out_get_sample_rate() = %d", rate);
+ return rate;
}
static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
@@ -101,18 +250,27 @@
static size_t out_get_buffer_size(const struct audio_stream *stream)
{
- return pcm_config.period_size *
- audio_stream_frame_size((struct audio_stream *)stream);
+ const struct stream_out* out = (const struct stream_out*)stream;
+ size_t buffer_size =
+ proxy_get_period_size(&out->proxy) * audio_stream_out_frame_size(&(out->stream));
+ return buffer_size;
}
static uint32_t out_get_channels(const struct audio_stream *stream)
{
- return AUDIO_CHANNEL_OUT_STEREO;
+ const struct stream_out *out = (const struct stream_out*)stream;
+ return audio_channel_out_mask_from_count(out->hal_channel_count);
}
static audio_format_t out_get_format(const struct audio_stream *stream)
{
- return AUDIO_FORMAT_PCM_16_BIT;
+ /* Note: The HAL doesn't do any FORMAT conversion at this time. It
+ * Relies on the framework to provide data in the specified format.
+ * This could change in the future.
+ */
+ alsa_device_proxy * proxy = &((struct stream_out*)stream)->proxy;
+ audio_format_t format = audio_format_from_pcm_format(proxy_get_format(proxy));
+ return format;
}
static int out_set_format(struct audio_stream *stream, audio_format_t format)
@@ -128,8 +286,7 @@
pthread_mutex_lock(&out->lock);
if (!out->standby) {
- pcm_close(out->pcm);
- out->pcm = NULL;
+ proxy_close(&out->proxy);
out->standby = true;
}
@@ -146,49 +303,88 @@
static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
{
+ ALOGV("usb:audio_hw::out out_set_parameters() keys:%s", kvpairs);
+
struct stream_out *out = (struct stream_out *)stream;
- struct audio_device *adev = out->dev;
- struct str_parms *parms;
+
char value[32];
- int ret;
+ int param_val;
int routing = 0;
+ int ret_value = 0;
+ int card = -1;
+ int device = -1;
- parms = str_parms_create_str(kvpairs);
- pthread_mutex_lock(&adev->lock);
+ struct str_parms * parms = str_parms_create_str(kvpairs);
+ pthread_mutex_lock(&out->dev->lock);
+ pthread_mutex_lock(&out->lock);
- ret = str_parms_get_str(parms, "card", value, sizeof(value));
- if (ret >= 0)
- adev->card = atoi(value);
+ param_val = str_parms_get_str(parms, "card", value, sizeof(value));
+ if (param_val >= 0)
+ card = atoi(value);
- ret = str_parms_get_str(parms, "device", value, sizeof(value));
- if (ret >= 0)
- adev->device = atoi(value);
+ param_val = str_parms_get_str(parms, "device", value, sizeof(value));
+ if (param_val >= 0)
+ device = atoi(value);
- pthread_mutex_unlock(&adev->lock);
+ if (card >= 0 && device >= 0 && !profile_is_cached_for(out->profile, card, device)) {
+ /* cannot read pcm device info if playback is active */
+ if (!out->standby)
+ ret_value = -ENOSYS;
+ else {
+ int saved_card = out->profile->card;
+ int saved_device = out->profile->device;
+ out->profile->card = card;
+ out->profile->device = device;
+ ret_value = profile_read_device_info(out->profile) ? 0 : -EINVAL;
+ if (ret_value != 0) {
+ out->profile->card = saved_card;
+ out->profile->device = saved_device;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&out->lock);
+ pthread_mutex_unlock(&out->dev->lock);
str_parms_destroy(parms);
- return 0;
+ return ret_value;
}
static char * out_get_parameters(const struct audio_stream *stream, const char *keys)
{
- return strdup("");
+ struct stream_out *out = (struct stream_out *)stream;
+ pthread_mutex_lock(&out->dev->lock);
+ pthread_mutex_lock(&out->lock);
+
+ char * params_str = device_get_parameters(out->profile, keys);
+
+ pthread_mutex_unlock(&out->lock);
+ pthread_mutex_unlock(&out->dev->lock);
+
+ return params_str;
}
static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
- return (pcm_config.period_size * pcm_config.period_count * 1000) /
- out_get_sample_rate(&stream->common);
+ alsa_device_proxy * proxy = &((struct stream_out*)stream)->proxy;
+ return proxy_get_latency(proxy);
}
-static int out_set_volume(struct audio_stream_out *stream, float left,
- float right)
+static int out_set_volume(struct audio_stream_out *stream, float left, float right)
{
return -ENOSYS;
}
-static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
- size_t bytes)
+/* must be called with hw device and output stream mutexes locked */
+static int start_output_stream(struct stream_out *out)
+{
+ ALOGV("usb:audio_hw::out start_output_stream(card:%d device:%d)",
+ out->profile->card, out->profile->device);
+
+ return proxy_open(&out->proxy);
+}
+
+static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes)
{
int ret;
struct stream_out *out = (struct stream_out *)stream;
@@ -198,35 +394,67 @@
if (out->standby) {
ret = start_output_stream(out);
if (ret != 0) {
+ pthread_mutex_unlock(&out->dev->lock);
goto err;
}
out->standby = false;
}
+ pthread_mutex_unlock(&out->dev->lock);
- pcm_write(out->pcm, (void *)buffer, bytes);
+ alsa_device_proxy* proxy = &out->proxy;
+ const void * write_buff = buffer;
+ int num_write_buff_bytes = bytes;
+ const int num_device_channels = proxy_get_channel_count(proxy); /* what we told alsa */
+ const int num_req_channels = out->hal_channel_count; /* what we told AudioFlinger */
+ if (num_device_channels != num_req_channels) {
+ /* allocate buffer */
+ const size_t required_conversion_buffer_size =
+ bytes * num_device_channels / num_req_channels;
+ if (required_conversion_buffer_size > out->conversion_buffer_size) {
+ out->conversion_buffer_size = required_conversion_buffer_size;
+ out->conversion_buffer = realloc(out->conversion_buffer,
+ out->conversion_buffer_size);
+ }
+ /* convert data */
+ const audio_format_t audio_format = out_get_format(&(out->stream.common));
+ const unsigned sample_size_in_bytes = audio_bytes_per_sample(audio_format);
+ num_write_buff_bytes =
+ adjust_channels(write_buff, num_req_channels,
+ out->conversion_buffer, num_device_channels,
+ sample_size_in_bytes, num_write_buff_bytes);
+ write_buff = out->conversion_buffer;
+ }
+
+ if (write_buff != NULL && num_write_buff_bytes != 0) {
+ proxy_write(&out->proxy, write_buff, num_write_buff_bytes);
+ }
pthread_mutex_unlock(&out->lock);
- pthread_mutex_unlock(&out->dev->lock);
return bytes;
err:
pthread_mutex_unlock(&out->lock);
- pthread_mutex_unlock(&out->dev->lock);
if (ret != 0) {
- usleep(bytes * 1000000 / audio_stream_frame_size(&stream->common) /
+ usleep(bytes * 1000000 / audio_stream_out_frame_size(stream) /
out_get_sample_rate(&stream->common));
}
return bytes;
}
-static int out_get_render_position(const struct audio_stream_out *stream,
- uint32_t *dsp_frames)
+static int out_get_render_position(const struct audio_stream_out *stream, uint32_t *dsp_frames)
{
return -EINVAL;
}
+static int out_get_presentation_position(const struct audio_stream_out *stream,
+ uint64_t *frames, struct timespec *timestamp)
+{
+ /* FIXME - This needs to be implemented */
+ return -EINVAL;
+}
+
static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
{
return 0;
@@ -237,8 +465,7 @@
return 0;
}
-static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
- int64_t *timestamp)
+static int out_get_next_write_timestamp(const struct audio_stream_out *stream, int64_t *timestamp)
{
return -EINVAL;
}
@@ -248,16 +475,21 @@
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
- struct audio_stream_out **stream_out)
+ struct audio_stream_out **stream_out,
+ const char *address __unused)
{
+ ALOGV("usb:audio_hw::out adev_open_output_stream() handle:0x%X, device:0x%X, flags:0x%X",
+ handle, devices, flags);
+
struct audio_device *adev = (struct audio_device *)dev;
+
struct stream_out *out;
- int ret;
out = (struct stream_out *)calloc(1, sizeof(struct stream_out));
if (!out)
return -ENOMEM;
+ /* setup function pointers */
out->stream.common.get_sample_rate = out_get_sample_rate;
out->stream.common.set_sample_rate = out_set_sample_rate;
out->stream.common.get_buffer_size = out_get_buffer_size;
@@ -274,44 +506,535 @@
out->stream.set_volume = out_set_volume;
out->stream.write = out_write;
out->stream.get_render_position = out_get_render_position;
+ out->stream.get_presentation_position = out_get_presentation_position;
out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
out->dev = adev;
- config->format = out_get_format(&out->stream.common);
- config->channel_mask = out_get_channels(&out->stream.common);
- config->sample_rate = out_get_sample_rate(&out->stream.common);
+ out->profile = &adev->out_profile;
+
+ // build this to hand to the alsa_device_proxy
+ struct pcm_config proxy_config;
+ memset(&proxy_config, 0, sizeof(proxy_config));
+
+ int ret = 0;
+
+ /* Rate */
+ if (config->sample_rate == 0) {
+ proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(out->profile);
+ } else if (profile_is_sample_rate_valid(out->profile, config->sample_rate)) {
+ proxy_config.rate = config->sample_rate;
+ } else {
+ proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(out->profile);
+ ret = -EINVAL;
+ }
+
+ /* Format */
+ if (config->format == AUDIO_FORMAT_DEFAULT) {
+ proxy_config.format = profile_get_default_format(out->profile);
+ config->format = audio_format_from_pcm_format(proxy_config.format);
+ } else {
+ enum pcm_format fmt = pcm_format_from_audio_format(config->format);
+ if (profile_is_format_valid(out->profile, fmt)) {
+ proxy_config.format = fmt;
+ } else {
+ proxy_config.format = profile_get_default_format(out->profile);
+ config->format = audio_format_from_pcm_format(proxy_config.format);
+ ret = -EINVAL;
+ }
+ }
+
+ /* Channels */
+ unsigned proposed_channel_count = profile_get_default_channel_count(out->profile);
+ if (k_force_channels) {
+ proposed_channel_count = k_force_channels;
+ } else if (config->channel_mask != AUDIO_CHANNEL_NONE) {
+ proposed_channel_count = audio_channel_count_from_out_mask(config->channel_mask);
+ }
+ /* we can expose any channel count mask, and emulate internally. */
+ config->channel_mask = audio_channel_out_mask_from_count(proposed_channel_count);
+ out->hal_channel_count = proposed_channel_count;
+ /* no validity checks are needed as proxy_prepare() forces channel_count to be valid.
+ * and we emulate any channel count discrepancies in out_write(). */
+ proxy_config.channels = proposed_channel_count;
+
+ proxy_prepare(&out->proxy, out->profile, &proxy_config);
+
+ /* TODO The retry mechanism isn't implemented in AudioPolicyManager/AudioFlinger. */
+ ret = 0;
+
+ out->conversion_buffer = NULL;
+ out->conversion_buffer_size = 0;
out->standby = true;
- adev->card = -1;
- adev->device = -1;
-
*stream_out = &out->stream;
- return 0;
+
+ return ret;
err_open:
free(out);
*stream_out = NULL;
- return ret;
+ return -ENOSYS;
}
static void adev_close_output_stream(struct audio_hw_device *dev,
struct audio_stream_out *stream)
{
+ ALOGV("usb:audio_hw::out adev_close_output_stream()");
struct stream_out *out = (struct stream_out *)stream;
+ /* Close the pcm device */
out_standby(&stream->common);
+
+ free(out->conversion_buffer);
+
+ out->conversion_buffer = NULL;
+ out->conversion_buffer_size = 0;
+
free(stream);
}
-static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs)
+static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
+ const struct audio_config *config)
+{
+ /* TODO This needs to be calculated based on format/channels/rate */
+ return 320;
+}
+
+/*
+ * IN functions
+ */
+static uint32_t in_get_sample_rate(const struct audio_stream *stream)
+{
+ uint32_t rate = proxy_get_sample_rate(&((const struct stream_in *)stream)->proxy);
+ ALOGV("in_get_sample_rate() = %d", rate);
+ return rate;
+}
+
+static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
+{
+ ALOGV("in_set_sample_rate(%d) - NOPE", rate);
+ return -ENOSYS;
+}
+
+static size_t in_get_buffer_size(const struct audio_stream *stream)
+{
+ const struct stream_in * in = ((const struct stream_in*)stream);
+ size_t buffer_size =
+ proxy_get_period_size(&in->proxy) * audio_stream_in_frame_size(&(in->stream));
+ ALOGV("in_get_buffer_size() = %zd", buffer_size);
+
+ return buffer_size;
+}
+
+static uint32_t in_get_channels(const struct audio_stream *stream)
+{
+ /* TODO Here is the code we need when we support arbitrary channel counts
+ * alsa_device_proxy * proxy = ((struct stream_in*)stream)->proxy;
+ * unsigned channel_count = proxy_get_channel_count(proxy);
+ * uint32_t channel_mask = audio_channel_in_mask_from_count(channel_count);
+ * ALOGV("in_get_channels() = 0x%X count:%d", channel_mask, channel_count);
+ * return channel_mask;
+ */
+ /* TODO When AudioPolicyManager & AudioFlinger supports arbitrary channels
+ rewrite this to return the ACTUAL channel format */
+ return AUDIO_CHANNEL_IN_STEREO;
+}
+
+static audio_format_t in_get_format(const struct audio_stream *stream)
+{
+ /* TODO Here is the code we need when we support arbitrary input formats
+ * alsa_device_proxy * proxy = ((struct stream_in*)stream)->proxy;
+ * audio_format_t format = audio_format_from_pcm_format(proxy_get_format(proxy));
+ * ALOGV("in_get_format() = %d", format);
+ * return format;
+ */
+ /* Input only supports PCM16 */
+ /* TODO When AudioPolicyManager & AudioFlinger supports arbitrary input formats
+ rewrite this to return the ACTUAL channel format (above) */
+ return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+static int in_set_format(struct audio_stream *stream, audio_format_t format)
+{
+ ALOGV("in_set_format(%d) - NOPE", format);
+
+ return -ENOSYS;
+}
+
+static int in_standby(struct audio_stream *stream)
+{
+ struct stream_in *in = (struct stream_in *)stream;
+
+ pthread_mutex_lock(&in->dev->lock);
+ pthread_mutex_lock(&in->lock);
+
+ if (!in->standby) {
+ proxy_close(&in->proxy);
+ in->standby = true;
+ }
+
+ pthread_mutex_unlock(&in->lock);
+ pthread_mutex_unlock(&in->dev->lock);
+
+ return 0;
+}
+
+static int in_dump(const struct audio_stream *stream, int fd)
{
return 0;
}
-static char * adev_get_parameters(const struct audio_hw_device *dev,
- const char *keys)
+static int in_set_parameters(struct audio_stream *stream, const char *kvpairs)
+{
+ ALOGV("usb: audio_hw::in in_set_parameters() keys:%s", kvpairs);
+
+ struct stream_in *in = (struct stream_in *)stream;
+
+ char value[32];
+ int param_val;
+ int routing = 0;
+ int ret_value = 0;
+ int card = -1;
+ int device = -1;
+
+ struct str_parms * parms = str_parms_create_str(kvpairs);
+
+ pthread_mutex_lock(&in->dev->lock);
+ pthread_mutex_lock(&in->lock);
+
+ /* Device Connection Message ("card=1,device=0") */
+ param_val = str_parms_get_str(parms, "card", value, sizeof(value));
+ if (param_val >= 0)
+ card = atoi(value);
+
+ param_val = str_parms_get_str(parms, "device", value, sizeof(value));
+ if (param_val >= 0)
+ device = atoi(value);
+
+ if (card >= 0 && device >= 0 && !profile_is_cached_for(in->profile, card, device)) {
+ /* cannot read pcm device info if playback is active */
+ if (!in->standby)
+ ret_value = -ENOSYS;
+ else {
+ int saved_card = in->profile->card;
+ int saved_device = in->profile->device;
+ in->profile->card = card;
+ in->profile->device = device;
+ ret_value = profile_read_device_info(in->profile) ? 0 : -EINVAL;
+ if (ret_value != 0) {
+ in->profile->card = saved_card;
+ in->profile->device = saved_device;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&in->lock);
+ pthread_mutex_unlock(&in->dev->lock);
+
+ str_parms_destroy(parms);
+
+ return ret_value;
+}
+
+static char * in_get_parameters(const struct audio_stream *stream, const char *keys)
+{
+ struct stream_in *in = (struct stream_in *)stream;
+
+ pthread_mutex_lock(&in->dev->lock);
+ pthread_mutex_lock(&in->lock);
+
+ char * params_str = device_get_parameters(in->profile, keys);
+
+ pthread_mutex_unlock(&in->lock);
+ pthread_mutex_unlock(&in->dev->lock);
+
+ return params_str;
+}
+
+static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+ return 0;
+}
+
+static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+ return 0;
+}
+
+static int in_set_gain(struct audio_stream_in *stream, float gain)
+{
+ return 0;
+}
+
+/* must be called with hw device and output stream mutexes locked */
+static int start_input_stream(struct stream_in *in)
+{
+ ALOGV("usb:audio_hw::start_input_stream(card:%d device:%d)",
+ in->profile->card, in->profile->device);
+
+ return proxy_open(&in->proxy);
+}
+
+/* TODO mutex stuff here (see out_write) */
+static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes)
+{
+ size_t num_read_buff_bytes = 0;
+ void * read_buff = buffer;
+ void * out_buff = buffer;
+
+ struct stream_in * in = (struct stream_in *)stream;
+
+ pthread_mutex_lock(&in->dev->lock);
+ pthread_mutex_lock(&in->lock);
+ if (in->standby) {
+ if (start_input_stream(in) != 0) {
+ pthread_mutex_unlock(&in->dev->lock);
+ goto err;
+ }
+ in->standby = false;
+ }
+ pthread_mutex_unlock(&in->dev->lock);
+
+
+ alsa_device_profile * profile = in->profile;
+
+ /*
+ * OK, we need to figure out how much data to read to be able to output the requested
+ * number of bytes in the HAL format (16-bit, stereo).
+ */
+ num_read_buff_bytes = bytes;
+ int num_device_channels = proxy_get_channel_count(&in->proxy);
+ int num_req_channels = 2; /* always, for now */
+
+ if (num_device_channels != num_req_channels) {
+ num_read_buff_bytes = (num_device_channels * num_read_buff_bytes) / num_req_channels;
+ }
+
+ enum pcm_format format = proxy_get_format(&in->proxy);
+ if (format == PCM_FORMAT_S24_3LE) {
+ /* 24-bit USB device */
+ num_read_buff_bytes = (3 * num_read_buff_bytes) / 2;
+ } else if (format == PCM_FORMAT_S32_LE) {
+ /* 32-bit USB device */
+ num_read_buff_bytes = num_read_buff_bytes * 2;
+ }
+
+ /* Setup/Realloc the conversion buffer (if necessary). */
+ if (num_read_buff_bytes != bytes) {
+ if (num_read_buff_bytes > in->conversion_buffer_size) {
+ /*TODO Remove this when AudioPolicyManger/AudioFlinger support arbitrary formats
+ (and do these conversions themselves) */
+ in->conversion_buffer_size = num_read_buff_bytes;
+ in->conversion_buffer = realloc(in->conversion_buffer, in->conversion_buffer_size);
+ }
+ read_buff = in->conversion_buffer;
+ }
+
+ if (proxy_read(&in->proxy, read_buff, num_read_buff_bytes) == 0) {
+ /*
+ * Do any conversions necessary to send the data in the format specified to/by the HAL
+ * (but different from the ALSA format), such as 24bit ->16bit, or 4chan -> 2chan.
+ */
+ if (format != PCM_FORMAT_S16_LE) {
+ /* we need to convert */
+ if (num_device_channels != num_req_channels) {
+ out_buff = read_buff;
+ }
+
+ if (format == PCM_FORMAT_S24_3LE) {
+ num_read_buff_bytes =
+ convert_24_3_to_16(read_buff, num_read_buff_bytes / 3, out_buff);
+ } else if (format == PCM_FORMAT_S32_LE) {
+ num_read_buff_bytes =
+ convert_32_to_16(read_buff, num_read_buff_bytes / 4, out_buff);
+ } else {
+ goto err;
+ }
+ }
+
+ if (num_device_channels != num_req_channels) {
+ // ALOGV("chans dev:%d req:%d", num_device_channels, num_req_channels);
+
+ out_buff = buffer;
+ /* Num Channels conversion */
+ if (num_device_channels != num_req_channels) {
+ audio_format_t audio_format = in_get_format(&(in->stream.common));
+ unsigned sample_size_in_bytes = audio_bytes_per_sample(audio_format);
+
+ num_read_buff_bytes =
+ adjust_channels(read_buff, num_device_channels,
+ out_buff, num_req_channels,
+ sample_size_in_bytes, num_read_buff_bytes);
+ }
+ }
+ }
+
+err:
+ pthread_mutex_unlock(&in->lock);
+
+ return num_read_buff_bytes;
+}
+
+static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream)
+{
+ return 0;
+}
+
+static int adev_open_input_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ struct audio_stream_in **stream_in,
+ audio_input_flags_t flags __unused,
+ const char *address __unused,
+ audio_source_t source __unused)
+{
+ ALOGV("usb: in adev_open_input_stream() rate:%" PRIu32 ", chanMask:0x%" PRIX32 ", fmt:%" PRIu8,
+ config->sample_rate, config->channel_mask, config->format);
+
+ struct stream_in *in = (struct stream_in *)calloc(1, sizeof(struct stream_in));
+ int ret = 0;
+
+ if (in == NULL)
+ return -ENOMEM;
+
+ /* setup function pointers */
+ in->stream.common.get_sample_rate = in_get_sample_rate;
+ in->stream.common.set_sample_rate = in_set_sample_rate;
+ in->stream.common.get_buffer_size = in_get_buffer_size;
+ in->stream.common.get_channels = in_get_channels;
+ in->stream.common.get_format = in_get_format;
+ in->stream.common.set_format = in_set_format;
+ in->stream.common.standby = in_standby;
+ in->stream.common.dump = in_dump;
+ in->stream.common.set_parameters = in_set_parameters;
+ in->stream.common.get_parameters = in_get_parameters;
+ in->stream.common.add_audio_effect = in_add_audio_effect;
+ in->stream.common.remove_audio_effect = in_remove_audio_effect;
+
+ in->stream.set_gain = in_set_gain;
+ in->stream.read = in_read;
+ in->stream.get_input_frames_lost = in_get_input_frames_lost;
+
+ in->dev = (struct audio_device *)dev;
+
+ in->profile = &in->dev->in_profile;
+
+ struct pcm_config proxy_config;
+ memset(&proxy_config, 0, sizeof(proxy_config));
+
+ /* Rate */
+ if (config->sample_rate == 0) {
+ proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(in->profile);
+ } else if (profile_is_sample_rate_valid(in->profile, config->sample_rate)) {
+ proxy_config.rate = config->sample_rate;
+ } else {
+ proxy_config.rate = config->sample_rate = profile_get_default_sample_rate(in->profile);
+ ret = -EINVAL;
+ }
+
+ /* Format */
+ /* until the framework supports format conversion, just take what it asks for
+ * i.e. AUDIO_FORMAT_PCM_16_BIT */
+ if (config->format == AUDIO_FORMAT_DEFAULT) {
+ /* just return AUDIO_FORMAT_PCM_16_BIT until the framework supports other input
+ * formats */
+ config->format = AUDIO_FORMAT_PCM_16_BIT;
+ proxy_config.format = PCM_FORMAT_S16_LE;
+ } else if (config->format == AUDIO_FORMAT_PCM_16_BIT) {
+ /* Always accept AUDIO_FORMAT_PCM_16_BIT until the framework supports other input
+ * formats */
+ proxy_config.format = PCM_FORMAT_S16_LE;
+ } else {
+ /* When the framework support other formats, validate here */
+ config->format = AUDIO_FORMAT_PCM_16_BIT;
+ proxy_config.format = PCM_FORMAT_S16_LE;
+ ret = -EINVAL;
+ }
+
+ if (config->channel_mask == AUDIO_CHANNEL_NONE) {
+ /* just return AUDIO_CHANNEL_IN_STEREO until the framework supports other input
+ * formats */
+ config->channel_mask = AUDIO_CHANNEL_IN_STEREO;
+
+ } else if (config->channel_mask != AUDIO_CHANNEL_IN_STEREO) {
+ /* allow only stereo capture for now */
+ config->channel_mask = AUDIO_CHANNEL_IN_STEREO;
+ ret = -EINVAL;
+ }
+ // proxy_config.channels = 0; /* don't change */
+ proxy_config.channels = profile_get_default_channel_count(in->profile);
+
+ proxy_prepare(&in->proxy, in->profile, &proxy_config);
+
+ in->standby = true;
+
+ in->conversion_buffer = NULL;
+ in->conversion_buffer_size = 0;
+
+ *stream_in = &in->stream;
+
+ return ret;
+}
+
+static void adev_close_input_stream(struct audio_hw_device *dev, struct audio_stream_in *stream)
+{
+ struct stream_in *in = (struct stream_in *)stream;
+
+ /* Close the pcm device */
+ in_standby(&stream->common);
+
+ free(in->conversion_buffer);
+
+ free(stream);
+}
+
+/*
+ * ADEV Functions
+ */
+static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs)
+{
+ ALOGV("audio_hw:usb adev_set_parameters(%s)", kvpairs);
+
+ struct audio_device * adev = (struct audio_device *)dev;
+
+ char value[32];
+ int param_val;
+
+ struct str_parms * parms = str_parms_create_str(kvpairs);
+
+ /* Check for the "disconnect" message */
+ param_val = str_parms_get_str(parms, "disconnect", value, sizeof(value));
+ if (param_val >= 0) {
+ audio_devices_t device = (audio_devices_t)atoi(value);
+
+ param_val = str_parms_get_str(parms, "card", value, sizeof(value));
+ int alsa_card = param_val >= 0 ? atoi(value) : -1;
+
+ param_val = str_parms_get_str(parms, "device", value, sizeof(value));
+ int alsa_device = param_val >= 0 ? atoi(value) : -1;
+
+ if (alsa_card >= 0 && alsa_device >= 0) {
+ /* "decache" the profile */
+ pthread_mutex_lock(&adev->lock);
+ if (device == AUDIO_DEVICE_OUT_USB_DEVICE &&
+ profile_is_cached_for(&adev->out_profile, alsa_card, alsa_device)) {
+ profile_decache(&adev->out_profile);
+ }
+ if (device == AUDIO_DEVICE_IN_USB_DEVICE &&
+ profile_is_cached_for(&adev->in_profile, alsa_card, alsa_device)) {
+ profile_decache(&adev->in_profile);
+ }
+ pthread_mutex_unlock(&adev->lock);
+ }
+ }
+
+ return 0;
+}
+
+static char * adev_get_parameters(const struct audio_hw_device *dev, const char *keys)
{
return strdup("");
}
@@ -346,26 +1069,6 @@
return -ENOSYS;
}
-static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
- const struct audio_config *config)
-{
- return 0;
-}
-
-static int adev_open_input_stream(struct audio_hw_device *dev,
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- struct audio_stream_in **stream_in)
-{
- return -ENOSYS;
-}
-
-static void adev_close_input_stream(struct audio_hw_device *dev,
- struct audio_stream_in *stream)
-{
-}
-
static int adev_dump(const audio_hw_device_t *device, int fd)
{
return 0;
@@ -374,27 +1077,26 @@
static int adev_close(hw_device_t *device)
{
struct audio_device *adev = (struct audio_device *)device;
-
free(device);
+
return 0;
}
-static int adev_open(const hw_module_t* module, const char* name,
- hw_device_t** device)
+static int adev_open(const hw_module_t* module, const char* name, hw_device_t** device)
{
- struct audio_device *adev;
- int ret;
-
if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
return -EINVAL;
- adev = calloc(1, sizeof(struct audio_device));
+ struct audio_device *adev = calloc(1, sizeof(struct audio_device));
if (!adev)
return -ENOMEM;
+ profile_init(&adev->out_profile, PCM_OUT);
+ profile_init(&adev->in_profile, PCM_IN);
+
adev->hw_device.common.tag = HARDWARE_DEVICE_TAG;
adev->hw_device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
- adev->hw_device.common.module = (struct hw_module_t *) module;
+ adev->hw_device.common.module = (struct hw_module_t *)module;
adev->hw_device.common.close = adev_close;
adev->hw_device.init_check = adev_init_check;
diff --git a/modules/usbaudio/format.c b/modules/usbaudio/format.c
new file mode 100644
index 0000000..6aac1d3
--- /dev/null
+++ b/modules/usbaudio/format.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "usb_profile"
+/*#define LOG_NDEBUG 0*/
+
+#include "format.h"
+
+#include <tinyalsa/asoundlib.h>
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/*
+ * Maps from bit position in pcm_mask to AUDIO_ format constants.
+ */
+static audio_format_t const format_value_map[] = {
+ AUDIO_FORMAT_PCM_8_BIT, /* 00 - SNDRV_PCM_FORMAT_S8 */
+ AUDIO_FORMAT_PCM_8_BIT, /* 01 - SNDRV_PCM_FORMAT_U8 */
+ AUDIO_FORMAT_PCM_16_BIT, /* 02 - SNDRV_PCM_FORMAT_S16_LE */
+ AUDIO_FORMAT_INVALID, /* 03 - SNDRV_PCM_FORMAT_S16_BE */
+ AUDIO_FORMAT_INVALID, /* 04 - SNDRV_PCM_FORMAT_U16_LE */
+ AUDIO_FORMAT_INVALID, /* 05 - SNDRV_PCM_FORMAT_U16_BE */
+ AUDIO_FORMAT_INVALID, /* 06 - SNDRV_PCM_FORMAT_S24_LE */
+ AUDIO_FORMAT_INVALID, /* 07 - SNDRV_PCM_FORMAT_S24_BE */
+ AUDIO_FORMAT_INVALID, /* 08 - SNDRV_PCM_FORMAT_U24_LE */
+ AUDIO_FORMAT_INVALID, /* 09 - SNDRV_PCM_FORMAT_U24_BE */
+ AUDIO_FORMAT_PCM_32_BIT, /* 10 - SNDRV_PCM_FORMAT_S32_LE */
+ AUDIO_FORMAT_INVALID, /* 11 - SNDRV_PCM_FORMAT_S32_BE */
+ AUDIO_FORMAT_INVALID, /* 12 - SNDRV_PCM_FORMAT_U32_LE */
+ AUDIO_FORMAT_INVALID, /* 13 - SNDRV_PCM_FORMAT_U32_BE */
+ AUDIO_FORMAT_PCM_FLOAT, /* 14 - SNDRV_PCM_FORMAT_FLOAT_LE */
+ AUDIO_FORMAT_INVALID, /* 15 - SNDRV_PCM_FORMAT_FLOAT_BE */
+ AUDIO_FORMAT_INVALID, /* 16 - SNDRV_PCM_FORMAT_FLOAT64_LE */
+ AUDIO_FORMAT_INVALID, /* 17 - SNDRV_PCM_FORMAT_FLOAT64_BE */
+ AUDIO_FORMAT_INVALID, /* 18 - SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE */
+ AUDIO_FORMAT_INVALID, /* 19 - SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE */
+ AUDIO_FORMAT_INVALID, /* 20 - SNDRV_PCM_FORMAT_MU_LAW */
+ AUDIO_FORMAT_INVALID, /* 21 - SNDRV_PCM_FORMAT_A_LAW */
+ AUDIO_FORMAT_INVALID, /* 22 - SNDRV_PCM_FORMAT_IMA_ADPCM */
+ AUDIO_FORMAT_INVALID, /* 23 - SNDRV_PCM_FORMAT_MPEG */
+ AUDIO_FORMAT_INVALID, /* 24 - SNDRV_PCM_FORMAT_GSM */
+ AUDIO_FORMAT_INVALID, /* 25 -> 30 (not assigned) */
+ AUDIO_FORMAT_INVALID,
+ AUDIO_FORMAT_INVALID,
+ AUDIO_FORMAT_INVALID,
+ AUDIO_FORMAT_INVALID,
+ AUDIO_FORMAT_INVALID,
+ AUDIO_FORMAT_INVALID, /* 31 - SNDRV_PCM_FORMAT_SPECIAL */
+ AUDIO_FORMAT_PCM_24_BIT_PACKED, /* 32 - SNDRV_PCM_FORMAT_S24_3LE */
+ AUDIO_FORMAT_INVALID, /* 33 - SNDRV_PCM_FORMAT_S24_3BE */
+ AUDIO_FORMAT_INVALID, /* 34 - SNDRV_PCM_FORMAT_U24_3LE */
+ AUDIO_FORMAT_INVALID, /* 35 - SNDRV_PCM_FORMAT_U24_3BE */
+ AUDIO_FORMAT_INVALID, /* 36 - SNDRV_PCM_FORMAT_S20_3LE */
+ AUDIO_FORMAT_INVALID, /* 37 - SNDRV_PCM_FORMAT_S20_3BE */
+ AUDIO_FORMAT_INVALID, /* 38 - SNDRV_PCM_FORMAT_U20_3LE */
+ AUDIO_FORMAT_INVALID, /* 39 - SNDRV_PCM_FORMAT_U20_3BE */
+ AUDIO_FORMAT_INVALID, /* 40 - SNDRV_PCM_FORMAT_S18_3LE */
+ AUDIO_FORMAT_INVALID, /* 41 - SNDRV_PCM_FORMAT_S18_3BE */
+ AUDIO_FORMAT_INVALID, /* 42 - SNDRV_PCM_FORMAT_U18_3LE */
+ AUDIO_FORMAT_INVALID, /* 43 - SNDRV_PCM_FORMAT_U18_3BE */
+ AUDIO_FORMAT_INVALID, /* 44 - SNDRV_PCM_FORMAT_G723_24 */
+ AUDIO_FORMAT_INVALID, /* 45 - SNDRV_PCM_FORMAT_G723_24_1B */
+ AUDIO_FORMAT_INVALID, /* 46 - SNDRV_PCM_FORMAT_G723_40 */
+ AUDIO_FORMAT_INVALID, /* 47 - SNDRV_PCM_FORMAT_G723_40_1B */
+ AUDIO_FORMAT_INVALID, /* 48 - SNDRV_PCM_FORMAT_DSD_U8 */
+ AUDIO_FORMAT_INVALID /* 49 - SNDRV_PCM_FORMAT_DSD_U16_LE */
+};
+
+audio_format_t get_format_for_mask(struct pcm_mask* mask)
+{
+ int num_slots = sizeof(mask->bits) / sizeof(mask->bits[0]);
+ int bits_per_slot = sizeof(mask->bits[0]) * 8;
+
+ int table_size = sizeof(format_value_map) / sizeof(format_value_map[0]);
+
+ int slot_index, bit_index, table_index;
+ table_index = 0;
+ int num_written = 0;
+ for (slot_index = 0; slot_index < num_slots; slot_index++) {
+ unsigned bit_mask = 1;
+ for (bit_index = 0; bit_index < bits_per_slot; bit_index++) {
+ /* don't return b-bit formats even if they are supported */
+ if (table_index >= 2 && (mask->bits[slot_index] & bit_mask) != 0) {
+ /* just return the first one */
+ return table_index < table_size
+ ? format_value_map[table_index]
+ : AUDIO_FORMAT_INVALID;
+ }
+ bit_mask <<= 1;
+ table_index++;
+ }
+ }
+
+ return AUDIO_FORMAT_INVALID;
+}
+
+/*
+ * Maps from bit position in pcm_mask to PCM_ format constants.
+ */
+int8_t const pcm_format_value_map[50] = {
+ PCM_FORMAT_S8, /* 00 - SNDRV_PCM_FORMAT_S8 */
+ PCM_FORMAT_INVALID, /* 01 - SNDRV_PCM_FORMAT_U8 */
+ PCM_FORMAT_S16_LE, /* 02 - SNDRV_PCM_FORMAT_S16_LE */
+ PCM_FORMAT_INVALID, /* 03 - SNDRV_PCM_FORMAT_S16_BE */
+ PCM_FORMAT_INVALID, /* 04 - SNDRV_PCM_FORMAT_U16_LE */
+ PCM_FORMAT_INVALID, /* 05 - SNDRV_PCM_FORMAT_U16_BE */
+ PCM_FORMAT_S24_3LE, /* 06 - SNDRV_PCM_FORMAT_S24_LE */
+ PCM_FORMAT_INVALID, /* 07 - SNDRV_PCM_FORMAT_S24_BE */
+ PCM_FORMAT_INVALID, /* 08 - SNDRV_PCM_FORMAT_U24_LE */
+ PCM_FORMAT_INVALID, /* 09 - SNDRV_PCM_FORMAT_U24_BE */
+ PCM_FORMAT_S32_LE, /* 10 - SNDRV_PCM_FORMAT_S32_LE */
+ PCM_FORMAT_INVALID, /* 11 - SNDRV_PCM_FORMAT_S32_BE */
+ PCM_FORMAT_INVALID, /* 12 - SNDRV_PCM_FORMAT_U32_LE */
+ PCM_FORMAT_INVALID, /* 13 - SNDRV_PCM_FORMAT_U32_BE */
+ PCM_FORMAT_INVALID, /* 14 - SNDRV_PCM_FORMAT_FLOAT_LE */
+ PCM_FORMAT_INVALID, /* 15 - SNDRV_PCM_FORMAT_FLOAT_BE */
+ PCM_FORMAT_INVALID, /* 16 - SNDRV_PCM_FORMAT_FLOAT64_LE */
+ PCM_FORMAT_INVALID, /* 17 - SNDRV_PCM_FORMAT_FLOAT64_BE */
+ PCM_FORMAT_INVALID, /* 18 - SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE */
+ PCM_FORMAT_INVALID, /* 19 - SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE */
+ PCM_FORMAT_INVALID, /* 20 - SNDRV_PCM_FORMAT_MU_LAW */
+ PCM_FORMAT_INVALID, /* 21 - SNDRV_PCM_FORMAT_A_LAW */
+ PCM_FORMAT_INVALID, /* 22 - SNDRV_PCM_FORMAT_IMA_ADPCM */
+ PCM_FORMAT_INVALID, /* 23 - SNDRV_PCM_FORMAT_MPEG */
+ PCM_FORMAT_INVALID, /* 24 - SNDRV_PCM_FORMAT_GSM */
+ PCM_FORMAT_INVALID, /* 25 -> 30 (not assigned) */
+ PCM_FORMAT_INVALID,
+ PCM_FORMAT_INVALID,
+ PCM_FORMAT_INVALID,
+ PCM_FORMAT_INVALID,
+ PCM_FORMAT_INVALID,
+ PCM_FORMAT_INVALID, /* 31 - SNDRV_PCM_FORMAT_SPECIAL */
+ PCM_FORMAT_S24_3LE, /* 32 - SNDRV_PCM_FORMAT_S24_3LE */ /* ??? */
+ PCM_FORMAT_INVALID, /* 33 - SNDRV_PCM_FORMAT_S24_3BE */
+ PCM_FORMAT_INVALID, /* 34 - SNDRV_PCM_FORMAT_U24_3LE */
+ PCM_FORMAT_INVALID, /* 35 - SNDRV_PCM_FORMAT_U24_3BE */
+ PCM_FORMAT_INVALID, /* 36 - SNDRV_PCM_FORMAT_S20_3LE */
+ PCM_FORMAT_INVALID, /* 37 - SNDRV_PCM_FORMAT_S20_3BE */
+ PCM_FORMAT_INVALID, /* 38 - SNDRV_PCM_FORMAT_U20_3LE */
+ PCM_FORMAT_INVALID, /* 39 - SNDRV_PCM_FORMAT_U20_3BE */
+ PCM_FORMAT_INVALID, /* 40 - SNDRV_PCM_FORMAT_S18_3LE */
+ PCM_FORMAT_INVALID, /* 41 - SNDRV_PCM_FORMAT_S18_3BE */
+ PCM_FORMAT_INVALID, /* 42 - SNDRV_PCM_FORMAT_U18_3LE */
+ PCM_FORMAT_INVALID, /* 43 - SNDRV_PCM_FORMAT_U18_3BE */
+ PCM_FORMAT_INVALID, /* 44 - SNDRV_PCM_FORMAT_G723_24 */
+ PCM_FORMAT_INVALID, /* 45 - SNDRV_PCM_FORMAT_G723_24_1B */
+ PCM_FORMAT_INVALID, /* 46 - SNDRV_PCM_FORMAT_G723_40 */
+ PCM_FORMAT_INVALID, /* 47 - SNDRV_PCM_FORMAT_G723_40_1B */
+ PCM_FORMAT_INVALID, /* 48 - SNDRV_PCM_FORMAT_DSD_U8 */
+ PCM_FORMAT_INVALID /* 49 - SNDRV_PCM_FORMAT_DSD_U16_LE */
+};
+
+/*
+ * Scans the provided format mask and returns the first non-8 bit sample
+ * format supported by the devices.
+ */
+enum pcm_format get_pcm_format_for_mask(struct pcm_mask* mask)
+{
+ int num_slots = ARRAY_SIZE(mask->bits);
+ int bits_per_slot = sizeof(mask->bits[0]) * 8;
+
+ int table_size = ARRAY_SIZE(pcm_format_value_map);
+
+ int slot_index, bit_index, table_index;
+ table_index = 0;
+ int num_written = 0;
+ for (slot_index = 0; slot_index < num_slots && table_index < table_size; slot_index++) {
+ unsigned bit_mask = 1;
+ for (bit_index = 0; bit_index < bits_per_slot && table_index < table_size; bit_index++) {
+ /* skip any 8-bit formats */
+ if (table_index >= 2 && (mask->bits[slot_index] & bit_mask) != 0) {
+ /* just return the first one which will be at least 16-bit */
+ return (int)pcm_format_value_map[table_index];
+ }
+ bit_mask <<= 1;
+ table_index++;
+ }
+ }
+
+ return PCM_FORMAT_INVALID;
+}
diff --git a/modules/usbaudio/format.h b/modules/usbaudio/format.h
new file mode 100644
index 0000000..e23935e
--- /dev/null
+++ b/modules/usbaudio/format.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_FORMAT_H
+#define ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_FORMAT_H
+
+#include <system/audio.h>
+
+#include <tinyalsa/asoundlib.h>
+
+audio_format_t get_format_for_mask(struct pcm_mask* mask);
+enum pcm_format get_pcm_format_for_mask(struct pcm_mask* mask);
+
+#endif /* ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_FORMAT_H */
diff --git a/modules/usbaudio/logging.c b/modules/usbaudio/logging.c
new file mode 100644
index 0000000..0a05511
--- /dev/null
+++ b/modules/usbaudio/logging.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "usb_logging"
+/*#define LOG_NDEBUG 0*/
+
+#include <string.h>
+
+#include <log/log.h>
+
+#include "logging.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/*
+ * Logging
+ */
+void log_pcm_mask(const char* mask_name, struct pcm_mask* mask)
+{
+ const size_t num_slots = ARRAY_SIZE(mask->bits);
+ const size_t bits_per_slot = (sizeof(mask->bits[0]) * 8);
+ const size_t chars_per_slot = (bits_per_slot + 1); /* comma */
+
+ const size_t BUFF_SIZE =
+ (num_slots * chars_per_slot + 2 + 1); /* brackets and null-terminator */
+ char buff[BUFF_SIZE];
+ buff[0] = '\0';
+
+ size_t slot_index, bit_index;
+ strcat(buff, "[");
+ for (slot_index = 0; slot_index < num_slots; slot_index++) {
+ unsigned bit_mask = 1;
+ for (bit_index = 0; bit_index < bits_per_slot; bit_index++) {
+ strcat(buff, (mask->bits[slot_index] & bit_mask) != 0 ? "1" : "0");
+ bit_mask <<= 1;
+ }
+ if (slot_index < num_slots - 1) {
+ strcat(buff, ",");
+ }
+ }
+ strcat(buff, "]");
+
+ ALOGV("%s: mask:%s", mask_name, buff);
+}
+
+void log_pcm_params(struct pcm_params * alsa_hw_params)
+{
+ ALOGV("usb:audio_hw - PCM_PARAM_SAMPLE_BITS min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_SAMPLE_BITS));
+ ALOGV("usb:audio_hw - PCM_PARAM_FRAME_BITS min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_FRAME_BITS),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_FRAME_BITS));
+ log_pcm_mask("PCM_PARAM_FORMAT",
+ pcm_params_get_mask(alsa_hw_params, PCM_PARAM_FORMAT));
+ log_pcm_mask("PCM_PARAM_SUBFORMAT",
+ pcm_params_get_mask(alsa_hw_params, PCM_PARAM_SUBFORMAT));
+ ALOGV("usb:audio_hw - PCM_PARAM_CHANNELS min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS));
+ ALOGV("usb:audio_hw - PCM_PARAM_RATE min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE));
+ ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_TIME min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_TIME),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_TIME));
+ ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_SIZE min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_SIZE),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_SIZE));
+ ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_BYTES min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_BYTES),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_BYTES));
+ ALOGV("usb:audio_hw - PCM_PARAM_PERIODS min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIODS),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIODS));
+ ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_TIME min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_TIME),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_TIME));
+ ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_SIZE min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_SIZE),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_SIZE));
+ ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_BYTES min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_BYTES),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_BYTES));
+ ALOGV("usb:audio_hw - PCM_PARAM_TICK_TIME min:%u, max:%u",
+ pcm_params_get_min(alsa_hw_params, PCM_PARAM_TICK_TIME),
+ pcm_params_get_max(alsa_hw_params, PCM_PARAM_TICK_TIME));
+}
+
+void log_pcm_config(struct pcm_config * config, const char* label) {
+ ALOGV("log_pcm_config() - %s", label);
+ ALOGV(" channels:%d", config->channels);
+ ALOGV(" rate:%d", config->rate);
+ ALOGV(" period_size:%d", config->period_size);
+ ALOGV(" period_count:%d", config->period_count);
+ ALOGV(" format:%d", config->format);
+#if 0
+ /* Values to use for the ALSA start, stop and silence thresholds. Setting
+ * any one of these values to 0 will cause the default tinyalsa values to be
+ * used instead. Tinyalsa defaults are as follows.
+ *
+ * start_threshold : period_count * period_size
+ * stop_threshold : period_count * period_size
+ * silence_threshold : 0
+ */
+ unsigned int start_threshold;
+ unsigned int stop_threshold;
+ unsigned int silence_threshold;
+
+ /* Minimum number of frames available before pcm_mmap_write() will actually
+ * write into the kernel buffer. Only used if the stream is opened in mmap mode
+ * (pcm_open() called with PCM_MMAP flag set). Use 0 for default.
+ */
+ int avail_min;
+#endif
+}
diff --git a/modules/usbaudio/logging.h b/modules/usbaudio/logging.h
new file mode 100644
index 0000000..b5640ed
--- /dev/null
+++ b/modules/usbaudio/logging.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_LOGGING_H
+#define ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_LOGGING_H
+
+#include <tinyalsa/asoundlib.h>
+
+void log_pcm_mask(const char* mask_name, struct pcm_mask* mask);
+void log_pcm_params(struct pcm_params * alsa_hw_params);
+void log_pcm_config(struct pcm_config * config, const char* label);
+
+#endif /* ANDROID_HARDWARE_LIBHARDWARE_MODULES_USBAUDIO_LOGGING_H */
diff --git a/tests/camera2/Android.mk b/tests/camera2/Android.mk
index 9efac0f..577ba0a 100644
--- a/tests/camera2/Android.mk
+++ b/tests/camera2/Android.mk
@@ -45,6 +45,9 @@
LOCAL_CFLAGS += -Wall -Wextra
LOCAL_MODULE:= camera2_test
+LOCAL_MODULE_STEM_32 := camera2_test
+LOCAL_MODULE_STEM_64 := camera2_test64
+LOCAL_MULTILIB := both
LOCAL_MODULE_TAGS := tests
include $(BUILD_NATIVE_TEST)
diff --git a/tests/camera2/CameraBurstTests.cpp b/tests/camera2/CameraBurstTests.cpp
index 7301fce..58763de 100644
--- a/tests/camera2/CameraBurstTests.cpp
+++ b/tests/camera2/CameraBurstTests.cpp
@@ -15,6 +15,7 @@
*/
#include <gtest/gtest.h>
+#include <inttypes.h>
#define LOG_TAG "CameraBurstTest"
//#define LOG_NDEBUG 0
@@ -218,7 +219,7 @@
CameraMetadata tmpRequest = previewRequest;
ASSERT_EQ(OK, tmpRequest.update(ANDROID_SENSOR_EXPOSURE_TIME,
&exposures[i], 1));
- ALOGV("Submitting capture request %d with exposure %lld", i,
+ ALOGV("Submitting capture request %d with exposure %"PRId64, i,
exposures[i]);
dout << "Capture request " << i << " exposure is "
<< (exposures[i]/1e6f) << std::endl;
@@ -230,11 +231,11 @@
float brightnesses[CAMERA_FRAME_BURST_COUNT];
// Get each frame (metadata) and then the buffer. Calculate brightness.
for (int i = 0; i < CAMERA_FRAME_BURST_COUNT; ++i) {
- ALOGV("Reading capture request %d with exposure %lld", i, exposures[i]);
+ ALOGV("Reading capture request %d with exposure %"PRId64, i, exposures[i]);
ASSERT_EQ(OK, mDevice->waitForNextFrame(CAMERA_FRAME_TIMEOUT));
ALOGV("Reading capture request-1 %d", i);
- CameraMetadata frameMetadata;
- ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata));
+ CaptureResult result;
+ ASSERT_EQ(OK, mDevice->getNextResult(&result));
ALOGV("Reading capture request-2 %d", i);
ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT));
@@ -310,7 +311,8 @@
* $ setenv CAMERA2_TEST_VARIABLE_BURST_DUMP_FRAMES 1
* $ /data/nativetest/camera2_test/camera2_test --gtest_filter="*VariableBurst"
*/
-TEST_F(CameraBurstTest, VariableBurst) {
+// Disable this test for now, as we need cleanup the usage of the deprecated tag quite a bit.
+TEST_F(CameraBurstTest, DISABLED_VariableBurst) {
TEST_EXTENSION_FORKING_INIT;
@@ -613,7 +615,7 @@
&durationList[i], 1));
ASSERT_EQ(OK, tmpRequest.update(ANDROID_SENSOR_SENSITIVITY,
&sensitivityList[i], 1));
- ALOGV("Submitting capture %d with exposure %lld, frame duration %lld, sensitivity %d",
+ ALOGV("Submitting capture %zu with exposure %"PRId64", frame duration %"PRId64", sensitivity %d",
i, expList[i], durationList[i], sensitivityList[i]);
dout << "Capture request " << i <<
": exposure is " << (expList[i]/1e6f) << " ms" <<
@@ -631,7 +633,7 @@
// Get each frame (metadata) and then the buffer. Calculate brightness.
for (size_t i = 0; i < expList.size(); ++i) {
- ALOGV("Reading request %d", i);
+ ALOGV("Reading request %zu", i);
dout << "Waiting for capture " << i << ": " <<
" exposure " << (expList[i]/1e6f) << " ms," <<
" frame duration " << (durationList[i]/1e6f) << " ms," <<
@@ -644,10 +646,10 @@
if (durationList[i] * 2 > waitLimit) waitLimit = durationList[i] * 2;
ASSERT_EQ(OK, mDevice->waitForNextFrame(waitLimit));
- ALOGV("Reading capture request-1 %d", i);
- CameraMetadata frameMetadata;
- ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata));
- ALOGV("Reading capture request-2 %d", i);
+ ALOGV("Reading capture request-1 %zu", i);
+ CaptureResult result;
+ ASSERT_EQ(OK, mDevice->getNextResult(&result));
+ ALOGV("Reading capture request-2 %zu", i);
ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT));
ALOGV("We got the frame now");
@@ -668,7 +670,7 @@
avgBrightness = 255;
}
- ALOGV("Total brightness for frame %d was %lld (underexposed %d, "
+ ALOGV("Total brightness for frame %zu was %lld (underexposed %d, "
"overexposed %d), avg %f", i, brightness, underexposed,
overexposed, avgBrightness);
dout << "Average brightness (frame " << i << ") was " << avgBrightness
@@ -711,7 +713,7 @@
if (dumpFrames) {
String8 dumpName =
- String8::format("/data/local/tmp/camera2_test_variable_burst_frame_%03d.yuv", i);
+ String8::format("/data/local/tmp/camera2_test_variable_burst_frame_%03zu.yuv", i);
dout << " Writing YUV dump to " << dumpName << std::endl;
DumpYuvToFile(dumpName, imgBuffer);
}
diff --git a/tests/camera2/CameraFrameTests.cpp b/tests/camera2/CameraFrameTests.cpp
index e78a862..3c5abf7 100644
--- a/tests/camera2/CameraFrameTests.cpp
+++ b/tests/camera2/CameraFrameTests.cpp
@@ -115,8 +115,8 @@
ALOGV("Reading capture request %d", i);
ASSERT_EQ(OK, mDevice->waitForNextFrame(CAMERA_FRAME_TIMEOUT));
- CameraMetadata frameMetadata;
- ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata));
+ CaptureResult result;
+ ASSERT_EQ(OK, mDevice->getNextResult(&result));
// wait for buffer to be available
ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT));
diff --git a/tests/camera2/CameraModuleFixture.h b/tests/camera2/CameraModuleFixture.h
index acf41e1..0bb0e7d 100644
--- a/tests/camera2/CameraModuleFixture.h
+++ b/tests/camera2/CameraModuleFixture.h
@@ -97,6 +97,8 @@
*device = new Camera2Device(cameraID);
break;
case CAMERA_DEVICE_API_VERSION_3_0:
+ case CAMERA_DEVICE_API_VERSION_3_1:
+ case CAMERA_DEVICE_API_VERSION_3_2:
*device = new Camera3Device(cameraID);
break;
default:
diff --git a/tests/camera2/CameraMultiStreamTests.cpp b/tests/camera2/CameraMultiStreamTests.cpp
index a78950c..2742466 100644
--- a/tests/camera2/CameraMultiStreamTests.cpp
+++ b/tests/camera2/CameraMultiStreamTests.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <inttypes.h>
#define LOG_TAG "CameraMultiStreamTest"
//#define LOG_NDEBUG 0
#include "CameraStreamFixture.h"
@@ -181,11 +182,13 @@
mHeight(height) {
mFormat = param.mFormat;
if (useCpuConsumer) {
- sp<BufferQueue> bq = new BufferQueue();
- mCpuConsumer = new CpuConsumer(bq, param.mHeapCount);
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mCpuConsumer = new CpuConsumer(consumer, param.mHeapCount);
mCpuConsumer->setName(String8(
"CameraMultiStreamTest::mCpuConsumer"));
- mNativeWindow = new Surface(bq);
+ mNativeWindow = new Surface(producer);
} else {
// Render the stream to screen.
mCpuConsumer = NULL;
@@ -205,7 +208,7 @@
void SetUp() {
ASSERT_EQ(OK,
mDevice->createStream(mNativeWindow,
- mWidth, mHeight, mFormat, /*size (for jpegs)*/0,
+ mWidth, mHeight, mFormat,
&mStreamId));
ASSERT_NE(-1, mStreamId);
@@ -353,7 +356,7 @@
ASSERT_EQ(OK, request.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposures[i], 1));
ASSERT_EQ(OK, request.update(ANDROID_SENSOR_SENSITIVITY, &sensitivities[i], 1));
ASSERT_EQ(OK, mDevice->capture(request));
- ALOGV("Submitting request with: id %d with exposure %lld, sensitivity %d",
+ ALOGV("Submitting request with: id %d with exposure %"PRId64", sensitivity %d",
*requestIdStart, exposures[i], sensitivities[i]);
if (CAMERA_MULTI_STREAM_DEBUGGING) {
request.dump(STDOUT_FILENO);
@@ -368,7 +371,7 @@
// Set wait limit based on expected frame duration.
int64_t waitLimit = CAMERA_FRAME_TIMEOUT;
for (size_t i = 0; i < requestCount; i++) {
- ALOGV("Reading request result %d", i);
+ ALOGV("Reading request result %zu", i);
/**
* Raise the timeout to be at least twice as long as the exposure
@@ -378,11 +381,13 @@
waitLimit = exposures[i] * EXP_WAIT_MULTIPLIER;
}
+ CaptureResult result;
CameraMetadata frameMetadata;
int32_t resultRequestId;
do {
ASSERT_EQ(OK, mDevice->waitForNextFrame(waitLimit));
- ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata));
+ ASSERT_EQ(OK, mDevice->getNextResult(&result));
+ frameMetadata = result.mMetadata;
camera_metadata_entry_t resultEntry = frameMetadata.find(ANDROID_REQUEST_ID);
ASSERT_EQ(1u, resultEntry.count);
@@ -392,7 +397,7 @@
}
} while (resultRequestId != targetRequestId);
targetRequestId++;
- ALOGV("Got capture burst result for request %d", i);
+ ALOGV("Got capture burst result for request %zu", i);
// Validate capture result
if (CAMERA_MULTI_STREAM_DEBUGGING) {
@@ -411,7 +416,7 @@
captureBurstTimes.push_back(systemTime());
CpuConsumer::LockedBuffer imgBuffer;
ASSERT_EQ(OK, consumer->lockNextBuffer(&imgBuffer));
- ALOGV("Got capture buffer for request %d", i);
+ ALOGV("Got capture buffer for request %zu", i);
/**
* TODO: Validate capture buffer. Current brightness calculation
@@ -462,7 +467,8 @@
*
* 2. Manual control(gain/exposure) of mutiple burst capture.
*/
-TEST_F(CameraMultiStreamTest, MultiBurst) {
+// Disable this test for now, as we need cleanup the usage of the deprecated tag quite a bit.
+TEST_F(CameraMultiStreamTest, DISABLED_MultiBurst) {
TEST_EXTENSION_FORKING_INIT;
@@ -523,7 +529,7 @@
minFrameDuration = DEFAULT_FRAME_DURATION;
}
- ALOGV("targeted minimal frame duration is: %lldns", minFrameDuration);
+ ALOGV("targeted minimal frame duration is: %"PRId64"ns", minFrameDuration);
data = &(availableJpegSizes.data.i32[0]);
count = availableJpegSizes.count;
@@ -643,7 +649,7 @@
ASSERT_EQ(OK, previewRequest.update(
ANDROID_SENSOR_EXPOSURE_TIME,
&exposures[i], 1));
- ALOGV("Submitting preview request %d with exposure %lld",
+ ALOGV("Submitting preview request %zu with exposure %"PRId64,
i, exposures[i]);
ASSERT_EQ(OK, mDevice->setStreamingRequest(previewRequest));
diff --git a/tests/camera2/CameraStreamFixture.h b/tests/camera2/CameraStreamFixture.h
index a1f3aae..f56daf0 100644
--- a/tests/camera2/CameraStreamFixture.h
+++ b/tests/camera2/CameraStreamFixture.h
@@ -161,17 +161,19 @@
sp<CameraDeviceBase> device = mDevice;
CameraStreamParams p = mParam;
- sp<BufferQueue> bq = new BufferQueue();
- mCpuConsumer = new CpuConsumer(bq, p.mHeapCount);
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mCpuConsumer = new CpuConsumer(consumer, p.mHeapCount);
mCpuConsumer->setName(String8("CameraStreamTest::mCpuConsumer"));
- mNativeWindow = new Surface(bq);
+ mNativeWindow = new Surface(producer);
int format = MapAutoFormat(p.mFormat);
ASSERT_EQ(OK,
device->createStream(mNativeWindow,
- mWidth, mHeight, format, /*size (for jpegs)*/0,
+ mWidth, mHeight, format,
&mStreamId));
ASSERT_NE(-1, mStreamId);
diff --git a/tests/camera2/camera2.cpp b/tests/camera2/camera2.cpp
index 600d440..e3e7d9a 100644
--- a/tests/camera2/camera2.cpp
+++ b/tests/camera2/camera2.cpp
@@ -172,13 +172,6 @@
err = listener.getNotificationsFrom(dev);
if (err != OK) return err;
- vendor_tag_query_ops_t *vendor_metadata_tag_ops;
- err = dev->ops->get_metadata_vendor_tag_ops(dev, &vendor_metadata_tag_ops);
- if (err != OK) return err;
-
- err = set_camera_metadata_vendor_tag_ops(vendor_metadata_tag_ops);
- if (err != OK) return err;
-
return OK;
}
@@ -388,8 +381,10 @@
ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
- sp<BufferQueue> bq = new BufferQueue();
- sp<CpuConsumer> rawConsumer = new CpuConsumer(bq, 1);
+ sp<IGraphicBufferProducer> bqProducer;
+ sp<IGraphicBufferConsumer> bqConsumer;
+ BufferQueue::createBufferQueue(&bqProducer, &bqConsumer);
+ sp<CpuConsumer> rawConsumer = new CpuConsumer(bqConsumer, 1);
sp<FrameWaiter> rawWaiter = new FrameWaiter();
rawConsumer->setFrameAvailableListener(rawWaiter);
@@ -420,7 +415,7 @@
int streamId;
ASSERT_NO_FATAL_FAILURE(
- setUpStream(bq, width, height, format, &streamId) );
+ setUpStream(bqProducer, width, height, format, &streamId) );
camera_metadata_t *request;
request = allocate_camera_metadata(20, 2000);
@@ -522,8 +517,10 @@
ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
- sp<BufferQueue> bq = new BufferQueue();
- sp<CpuConsumer> rawConsumer = new CpuConsumer(bq, 1);
+ sp<IGraphicBufferProducer> bqProducer;
+ sp<IGraphicBufferConsumer> bqConsumer;
+ BufferQueue::createBufferQueue(&bqProducer, &bqConsumer);
+ sp<CpuConsumer> rawConsumer = new CpuConsumer(bqConsumer, 1);
sp<FrameWaiter> rawWaiter = new FrameWaiter();
rawConsumer->setFrameAvailableListener(rawWaiter);
@@ -554,7 +551,7 @@
int streamId;
ASSERT_NO_FATAL_FAILURE(
- setUpStream(bq, width, height, format, &streamId) );
+ setUpStream(bqProducer, width, height, format, &streamId) );
camera_metadata_t *request;
request = allocate_camera_metadata(20, 2000);
@@ -703,8 +700,10 @@
ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
- sp<BufferQueue> bq = new BufferQueue();
- sp<CpuConsumer> jpegConsumer = new CpuConsumer(bq, 1);
+ sp<IGraphicBufferProducer> bqProducer;
+ sp<IGraphicBufferConsumer> bqConsumer;
+ BufferQueue::createBufferQueue(&bqProducer, &bqConsumer);
+ sp<CpuConsumer> jpegConsumer = new CpuConsumer(bqConsumer, 1);
sp<FrameWaiter> jpegWaiter = new FrameWaiter();
jpegConsumer->setFrameAvailableListener(jpegWaiter);
@@ -723,7 +722,7 @@
int streamId;
ASSERT_NO_FATAL_FAILURE(
- setUpStream(bq, width, height, format, &streamId) );
+ setUpStream(bqProducer, width, height, format, &streamId) );
camera_metadata_t *request;
request = allocate_camera_metadata(20, 2000);
diff --git a/tests/fingerprint/Android.mk b/tests/fingerprint/Android.mk
new file mode 100644
index 0000000..4f03c39
--- /dev/null
+++ b/tests/fingerprint/Android.mk
@@ -0,0 +1,19 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ fingerprint_tests.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ liblog \
+ libhardware \
+
+#LOCAL_C_INCLUDES += \
+# system/media/camera/include \
+
+LOCAL_CFLAGS += -Wall -Wextra
+
+LOCAL_MODULE:= fingerprint_tests
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_NATIVE_TEST)
diff --git a/tests/fingerprint/fingerprint_test_fixtures.h b/tests/fingerprint/fingerprint_test_fixtures.h
new file mode 100644
index 0000000..a526203
--- /dev/null
+++ b/tests/fingerprint/fingerprint_test_fixtures.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANDROID_HAL_FINGERPRINT_TEST_COMMON__
+#define __ANDROID_HAL_FINGERPRINT_TEST_COMMON__
+
+#include <gtest/gtest.h>
+#include <hardware/hardware.h>
+#include <hardware/fingerprint.h>
+
+namespace tests {
+
+static const uint16_t kVersion = HARDWARE_MODULE_API_VERSION(1, 0);
+
+class FingerprintModule : public testing::Test {
+ public:
+ FingerprintModule() :
+ fp_module_(NULL) {}
+ ~FingerprintModule() {}
+ protected:
+ virtual void SetUp() {
+ const hw_module_t *hw_module = NULL;
+ ASSERT_EQ(0, hw_get_module(FINGERPRINT_HARDWARE_MODULE_ID, &hw_module))
+ << "Can't get fingerprint module";
+ ASSERT_TRUE(NULL != hw_module)
+ << "hw_get_module didn't return a valid fingerprint module";
+
+ fp_module_ = reinterpret_cast<const fingerprint_module_t*>(hw_module);
+ }
+ const fingerprint_module_t* fp_module() { return fp_module_; }
+ private:
+ const fingerprint_module_t *fp_module_;
+};
+
+class FingerprintDevice : public FingerprintModule {
+ public:
+ FingerprintDevice() :
+ fp_device_(NULL) {}
+ ~FingerprintDevice() {}
+ protected:
+ virtual void SetUp() {
+ FingerprintModule::SetUp();
+ hw_device_t *device = NULL;
+ ASSERT_TRUE(NULL != fp_module()->common.methods->open)
+ << "Fingerprint open() is unimplemented";
+ ASSERT_EQ(0, fp_module()->common.methods->open(
+ (const hw_module_t*)fp_module(), NULL, &device))
+ << "Can't open fingerprint device";
+ ASSERT_TRUE(NULL != device)
+ << "Fingerprint open() returned a NULL device";
+ ASSERT_EQ(kVersion, device->version)
+ << "Unsupported version";
+ fp_device_ = reinterpret_cast<fingerprint_device_t*>(device);
+ }
+ fingerprint_device_t* fp_device() { return fp_device_; }
+ private:
+ fingerprint_device_t *fp_device_;
+};
+
+} // namespace tests
+
+#endif // __ANDROID_HAL_FINGERPRINT_TEST_COMMON__
diff --git a/tests/fingerprint/fingerprint_tests.cpp b/tests/fingerprint/fingerprint_tests.cpp
new file mode 100644
index 0000000..4463751
--- /dev/null
+++ b/tests/fingerprint/fingerprint_tests.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include "fingerprint_test_fixtures.h"
+
+namespace tests {
+
+TEST_F(FingerprintDevice, isThereEnroll) {
+ ASSERT_TRUE(NULL != fp_device()->enroll)
+ << "enroll() function is not implemented";
+}
+
+TEST_F(FingerprintDevice, isThereRemove) {
+ ASSERT_TRUE(NULL != fp_device()->remove)
+ << "remove() function is not implemented";
+}
+
+TEST_F(FingerprintDevice, isThereSetNotify) {
+ ASSERT_TRUE(NULL != fp_device()->set_notify)
+ << "set_notify() function is not implemented";
+}
+
+} // namespace tests
diff --git a/tests/hardware/Android.mk b/tests/hardware/Android.mk
new file mode 100644
index 0000000..02540c9
--- /dev/null
+++ b/tests/hardware/Android.mk
@@ -0,0 +1,12 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := static-hal-check
+LOCAL_SRC_FILES := struct-size.cpp struct-offset.cpp struct-last.cpp
+LOCAL_SHARED_LIBRARIES := libhardware
+LOCAL_CFLAGS := -std=gnu++11 -O0
+
+LOCAL_C_INCLUDES += \
+ system/media/camera/include
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/tests/hardware/struct-last.cpp b/tests/hardware/struct-last.cpp
new file mode 100644
index 0000000..44a7b2d
--- /dev/null
+++ b/tests/hardware/struct-last.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstddef>
+#include <system/window.h>
+#include <hardware/hardware.h>
+#include <hardware/sensors.h>
+#include <hardware/fb.h>
+#include <hardware/hwcomposer.h>
+#include <hardware/gralloc.h>
+#include <hardware/consumerir.h>
+#include <hardware/camera_common.h>
+#include <hardware/camera3.h>
+
+#define GET_PADDING(align, size) (((align) - ((size) % (align))) % (align))
+
+#define CHECK_LAST_MEMBER(type, member) \
+do { \
+static constexpr size_t calc_size = offsetof(type, member) + sizeof(((type *)0)->member); \
+static_assert(sizeof(type) == calc_size + GET_PADDING(alignof(type), calc_size), \
+"" #member " is not the last element of " #type); \
+} while (0)
+
+void CheckSizes(void) {
+ //Types defined in hardware.h
+ CHECK_LAST_MEMBER(hw_module_t, reserved);
+ CHECK_LAST_MEMBER(hw_device_t, close);
+
+ //Types defined in sensors.h
+ CHECK_LAST_MEMBER(sensors_vec_t, reserved);
+ CHECK_LAST_MEMBER(sensors_event_t, reserved1);
+ CHECK_LAST_MEMBER(struct sensor_t, reserved);
+ CHECK_LAST_MEMBER(sensors_poll_device_1_t, reserved_procs);
+
+ //Types defined in fb.h
+ CHECK_LAST_MEMBER(framebuffer_device_t, reserved_proc);
+
+ //Types defined in hwcomposer.h
+ CHECK_LAST_MEMBER(hwc_layer_1_t, reserved);
+ CHECK_LAST_MEMBER(hwc_composer_device_1_t, reserved_proc);
+
+ //Types defined in gralloc.h
+ CHECK_LAST_MEMBER(gralloc_module_t, reserved_proc);
+ CHECK_LAST_MEMBER(alloc_device_t, reserved_proc);
+
+ //Types defined in consumerir.h
+ CHECK_LAST_MEMBER(consumerir_device_t, reserved);
+
+ //Types defined in camera_common.h
+ CHECK_LAST_MEMBER(vendor_tag_ops_t, reserved);
+ CHECK_LAST_MEMBER(camera_module_t, reserved);
+
+ //Types defined in camera3.h
+ CHECK_LAST_MEMBER(camera3_device_ops_t, reserved);
+}
+
diff --git a/tests/hardware/struct-offset.cpp b/tests/hardware/struct-offset.cpp
new file mode 100644
index 0000000..a7ff797
--- /dev/null
+++ b/tests/hardware/struct-offset.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstddef>
+#include <system/window.h>
+#include <hardware/hardware.h>
+#include <hardware/sensors.h>
+#include <hardware/fb.h>
+#include <hardware/hwcomposer.h>
+#include <hardware/gralloc.h>
+#include <hardware/consumerir.h>
+#include <hardware/camera_common.h>
+#include <hardware/camera3.h>
+
+//Ideally this would print type.member instead we need to rely on the line number from the output
+template <size_t actual, size_t expected> void check_member(void) {
+ static_assert(actual == expected, "");
+}
+
+#ifdef __LP64__
+#define CHECK_MEMBER_AT(type, member, off32, off64) \
+ check_member<offsetof(type, member), off64>()
+#else
+#define CHECK_MEMBER_AT(type, member, off32, off64) \
+ check_member<offsetof(type, member), off32>()
+#endif
+
+void CheckOffsets(void) {
+ //Types defined in hardware.h
+ CHECK_MEMBER_AT(hw_module_t, tag, 0, 0);
+ CHECK_MEMBER_AT(hw_module_t, module_api_version, 4, 4);
+ CHECK_MEMBER_AT(hw_module_t, hal_api_version, 6, 6);
+ CHECK_MEMBER_AT(hw_module_t, id, 8, 8);
+ CHECK_MEMBER_AT(hw_module_t, name, 12, 16);
+ CHECK_MEMBER_AT(hw_module_t, author, 16, 24);
+ CHECK_MEMBER_AT(hw_module_t, methods, 20, 32);
+ CHECK_MEMBER_AT(hw_module_t, dso, 24, 40);
+ CHECK_MEMBER_AT(hw_module_t, reserved, 28, 48);
+
+ CHECK_MEMBER_AT(hw_device_t, tag, 0, 0);
+ CHECK_MEMBER_AT(hw_device_t, version, 4, 4);
+ CHECK_MEMBER_AT(hw_device_t, module, 8, 8);
+ CHECK_MEMBER_AT(hw_device_t, reserved, 12, 16);
+ CHECK_MEMBER_AT(hw_device_t, close, 60, 112);
+
+ //Types defined in sensors.h
+ CHECK_MEMBER_AT(sensors_vec_t, v, 0, 0);
+ CHECK_MEMBER_AT(sensors_vec_t, x, 0, 0);
+ CHECK_MEMBER_AT(sensors_vec_t, y, 4, 4);
+ CHECK_MEMBER_AT(sensors_vec_t, z, 8, 8);
+ CHECK_MEMBER_AT(sensors_vec_t, azimuth, 0, 0);
+ CHECK_MEMBER_AT(sensors_vec_t, pitch, 4, 4);
+ CHECK_MEMBER_AT(sensors_vec_t, roll, 8, 8);
+ CHECK_MEMBER_AT(sensors_vec_t, status, 12, 12);
+ CHECK_MEMBER_AT(sensors_vec_t, reserved, 13, 13);
+
+ CHECK_MEMBER_AT(sensors_event_t, version, 0, 0);
+ CHECK_MEMBER_AT(sensors_event_t, sensor, 4, 4);
+ CHECK_MEMBER_AT(sensors_event_t, type, 8, 8);
+ CHECK_MEMBER_AT(sensors_event_t, reserved0, 12, 12);
+ CHECK_MEMBER_AT(sensors_event_t, timestamp, 16, 16);
+ CHECK_MEMBER_AT(sensors_event_t, data, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, acceleration, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, magnetic, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, orientation, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, gyro, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, temperature, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, distance, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, light, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, pressure, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, relative_humidity, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, uncalibrated_gyro, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, uncalibrated_magnetic, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, meta_data, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, u64, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, u64.data, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, u64.step_counter, 24, 24);
+ CHECK_MEMBER_AT(sensors_event_t, flags, 88, 88);
+ CHECK_MEMBER_AT(sensors_event_t, reserved1, 92, 92);
+
+ CHECK_MEMBER_AT(struct sensor_t, name, 0, 0);
+ CHECK_MEMBER_AT(struct sensor_t, vendor, 4, 8);
+ CHECK_MEMBER_AT(struct sensor_t, version, 8, 16);
+ CHECK_MEMBER_AT(struct sensor_t, handle, 12, 20);
+ CHECK_MEMBER_AT(struct sensor_t, type, 16, 24);
+ CHECK_MEMBER_AT(struct sensor_t, maxRange, 20, 28);
+ CHECK_MEMBER_AT(struct sensor_t, resolution, 24, 32);
+ CHECK_MEMBER_AT(struct sensor_t, power, 28, 36);
+ CHECK_MEMBER_AT(struct sensor_t, minDelay, 32, 40);
+ CHECK_MEMBER_AT(struct sensor_t, fifoReservedEventCount, 36, 44);
+ CHECK_MEMBER_AT(struct sensor_t, fifoMaxEventCount, 40, 48);
+ CHECK_MEMBER_AT(struct sensor_t, stringType, 44, 56);
+ CHECK_MEMBER_AT(struct sensor_t, requiredPermission, 48, 64);
+ CHECK_MEMBER_AT(struct sensor_t, maxDelay, 52, 72);
+ CHECK_MEMBER_AT(struct sensor_t, flags, 56, 80);
+ CHECK_MEMBER_AT(struct sensor_t, reserved, 60, 88);
+
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, v0, 0, 0);
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, common, 0, 0);
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, activate, 64, 120);
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, setDelay, 68, 128);
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, poll, 72, 136);
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, batch, 76, 144);
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, flush, 80, 152);
+ CHECK_MEMBER_AT(sensors_poll_device_1_t, reserved_procs, 84, 160);
+
+ //Types defined in fb.h
+ CHECK_MEMBER_AT(framebuffer_device_t, common, 0, 0);
+ CHECK_MEMBER_AT(framebuffer_device_t, flags, 64, 120);
+ CHECK_MEMBER_AT(framebuffer_device_t, width, 68, 124);
+ CHECK_MEMBER_AT(framebuffer_device_t, height, 72, 128);
+ CHECK_MEMBER_AT(framebuffer_device_t, stride, 76, 132);
+ CHECK_MEMBER_AT(framebuffer_device_t, format, 80, 136);
+ CHECK_MEMBER_AT(framebuffer_device_t, xdpi, 84, 140);
+ CHECK_MEMBER_AT(framebuffer_device_t, ydpi, 88, 144);
+ CHECK_MEMBER_AT(framebuffer_device_t, fps, 92, 148);
+ CHECK_MEMBER_AT(framebuffer_device_t, minSwapInterval, 96, 152);
+ CHECK_MEMBER_AT(framebuffer_device_t, maxSwapInterval, 100, 156);
+ CHECK_MEMBER_AT(framebuffer_device_t, numFramebuffers, 104, 160);
+ CHECK_MEMBER_AT(framebuffer_device_t, reserved, 108, 164);
+ CHECK_MEMBER_AT(framebuffer_device_t, setSwapInterval, 136, 192);
+ CHECK_MEMBER_AT(framebuffer_device_t, setUpdateRect, 140, 200);
+ CHECK_MEMBER_AT(framebuffer_device_t, post, 144, 208);
+ CHECK_MEMBER_AT(framebuffer_device_t, compositionComplete, 148, 216);
+ CHECK_MEMBER_AT(framebuffer_device_t, dump, 152, 224);
+ CHECK_MEMBER_AT(framebuffer_device_t, enableScreen, 156, 232);
+ CHECK_MEMBER_AT(framebuffer_device_t, reserved_proc, 160, 240);
+
+ //Types defined in hwcomposer.h
+ CHECK_MEMBER_AT(hwc_layer_1_t, compositionType, 0, 0);
+ CHECK_MEMBER_AT(hwc_layer_1_t, hints, 4, 4);
+ CHECK_MEMBER_AT(hwc_layer_1_t, flags, 8, 8);
+ CHECK_MEMBER_AT(hwc_layer_1_t, backgroundColor, 12, 16);
+ CHECK_MEMBER_AT(hwc_layer_1_t, handle, 12, 16);
+ CHECK_MEMBER_AT(hwc_layer_1_t, transform, 16, 24);
+ CHECK_MEMBER_AT(hwc_layer_1_t, blending, 20, 28);
+ CHECK_MEMBER_AT(hwc_layer_1_t, sourceCropi, 24, 32);
+ CHECK_MEMBER_AT(hwc_layer_1_t, sourceCrop, 24, 32);
+ CHECK_MEMBER_AT(hwc_layer_1_t, sourceCropf, 24, 32);
+ CHECK_MEMBER_AT(hwc_layer_1_t, displayFrame, 40, 48);
+ CHECK_MEMBER_AT(hwc_layer_1_t, visibleRegionScreen, 56, 64);
+ CHECK_MEMBER_AT(hwc_layer_1_t, acquireFenceFd, 64, 80);
+ CHECK_MEMBER_AT(hwc_layer_1_t, releaseFenceFd, 68, 84);
+ CHECK_MEMBER_AT(hwc_layer_1_t, planeAlpha, 72, 88);
+ CHECK_MEMBER_AT(hwc_layer_1_t, _pad, 73, 89);
+
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, common, 0, 0);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, prepare, 64, 120);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, set, 68, 128);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, eventControl, 72, 136);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, blank, 76, 144);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, query, 80, 152);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, registerProcs, 84, 160);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, dump, 88, 168);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, getDisplayConfigs, 92, 176);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, getDisplayAttributes, 96, 184);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, getActiveConfig, 100, 192);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, setActiveConfig, 104, 200);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, setCursorPositionAsync, 108, 208);
+ CHECK_MEMBER_AT(hwc_composer_device_1_t, reserved_proc, 112, 216);
+
+ //Types defined in gralloc.h
+ CHECK_MEMBER_AT(gralloc_module_t, common, 0, 0);
+ CHECK_MEMBER_AT(gralloc_module_t, registerBuffer, 128, 248);
+ CHECK_MEMBER_AT(gralloc_module_t, unregisterBuffer, 132, 256);
+ CHECK_MEMBER_AT(gralloc_module_t, lock, 136, 264);
+ CHECK_MEMBER_AT(gralloc_module_t, unlock, 140, 272);
+ CHECK_MEMBER_AT(gralloc_module_t, perform, 144, 280);
+ CHECK_MEMBER_AT(gralloc_module_t, lock_ycbcr, 148, 288);
+ CHECK_MEMBER_AT(gralloc_module_t, lockAsync, 152, 296);
+ CHECK_MEMBER_AT(gralloc_module_t, unlockAsync, 156, 304);
+ CHECK_MEMBER_AT(gralloc_module_t, lockAsync_ycbcr, 160, 312);
+ CHECK_MEMBER_AT(gralloc_module_t, reserved_proc, 164, 320);
+
+ CHECK_MEMBER_AT(alloc_device_t, common, 0, 0);
+ CHECK_MEMBER_AT(alloc_device_t, alloc, 64, 120);
+ CHECK_MEMBER_AT(alloc_device_t, free, 68, 128);
+ CHECK_MEMBER_AT(alloc_device_t, dump, 72, 136);
+ CHECK_MEMBER_AT(alloc_device_t, reserved_proc, 76, 144);
+
+ //Types defined in consumerir.h
+ CHECK_MEMBER_AT(consumerir_device_t, common, 0, 0);
+ CHECK_MEMBER_AT(consumerir_device_t, transmit, 64, 120);
+ CHECK_MEMBER_AT(consumerir_device_t, get_num_carrier_freqs, 68, 128);
+ CHECK_MEMBER_AT(consumerir_device_t, get_carrier_freqs, 72, 136);
+ CHECK_MEMBER_AT(consumerir_device_t, reserved, 76, 144);
+
+ //Types defined in camera_common.h
+ CHECK_MEMBER_AT(vendor_tag_ops_t, get_tag_count, 0, 0);
+ CHECK_MEMBER_AT(vendor_tag_ops_t, get_all_tags, 4, 8);
+ CHECK_MEMBER_AT(vendor_tag_ops_t, get_section_name, 8, 16);
+ CHECK_MEMBER_AT(vendor_tag_ops_t, get_tag_name, 12, 24);
+ CHECK_MEMBER_AT(vendor_tag_ops_t, get_tag_type, 16, 32);
+ CHECK_MEMBER_AT(vendor_tag_ops_t, reserved, 20, 40);
+
+ CHECK_MEMBER_AT(camera_module_t, common, 0, 0);
+ CHECK_MEMBER_AT(camera_module_t, get_number_of_cameras, 128, 248);
+ CHECK_MEMBER_AT(camera_module_t, get_camera_info, 132, 256);
+ CHECK_MEMBER_AT(camera_module_t, set_callbacks, 136, 264);
+ CHECK_MEMBER_AT(camera_module_t, get_vendor_tag_ops, 140, 272);
+ CHECK_MEMBER_AT(camera_module_t, open_legacy, 144, 280);
+ CHECK_MEMBER_AT(camera_module_t, reserved, 148, 288);
+
+ //Types defined in camera3.h
+ CHECK_MEMBER_AT(camera3_device_ops_t, initialize, 0, 0);
+ CHECK_MEMBER_AT(camera3_device_ops_t, configure_streams, 4, 8);
+ CHECK_MEMBER_AT(camera3_device_ops_t, register_stream_buffers, 8, 16);
+ CHECK_MEMBER_AT(camera3_device_ops_t, construct_default_request_settings, 12, 24);
+ CHECK_MEMBER_AT(camera3_device_ops_t, process_capture_request, 16, 32);
+ CHECK_MEMBER_AT(camera3_device_ops_t, get_metadata_vendor_tag_ops, 20, 40);
+ CHECK_MEMBER_AT(camera3_device_ops_t, dump, 24, 48);
+ CHECK_MEMBER_AT(camera3_device_ops_t, flush, 28, 56);
+ CHECK_MEMBER_AT(camera3_device_ops_t, reserved, 32, 64);
+}
+
diff --git a/tests/hardware/struct-size.cpp b/tests/hardware/struct-size.cpp
new file mode 100644
index 0000000..4207ea8
--- /dev/null
+++ b/tests/hardware/struct-size.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <system/window.h>
+#include <hardware/hardware.h>
+#include <hardware/sensors.h>
+#include <hardware/fb.h>
+#include <hardware/hwcomposer.h>
+#include <hardware/gralloc.h>
+#include <hardware/consumerir.h>
+#include <hardware/camera_common.h>
+#include <hardware/camera3.h>
+
+template<size_t> static constexpr size_t CheckSizeHelper(size_t, size_t);
+
+template<> constexpr size_t CheckSizeHelper<4>(size_t size32, size_t size64) {
+ return size32;
+}
+
+template<> constexpr size_t CheckSizeHelper<8>(size_t size32, size_t size64) {
+ return size64;
+}
+
+template<typename T, size_t size32, size_t size64> static void CheckTypeSize() {
+ const size_t mySize = CheckSizeHelper<sizeof(void *)>(size32, size64);
+
+ static_assert(sizeof(T) == mySize, "struct is the wrong size");
+}
+
+void CheckSizes(void) {
+ //Types defined in hardware.h
+ CheckTypeSize<hw_module_t, 128, 248>();
+ CheckTypeSize<hw_device_t, 64, 120>();
+
+ //Types defined in sensors.h
+ CheckTypeSize<sensors_vec_t, 16, 16>();
+ CheckTypeSize<sensors_event_t, 104, 104>();
+ CheckTypeSize<struct sensor_t, 68, 104>();
+ CheckTypeSize<sensors_poll_device_1_t, 116, 224>();
+
+ //Types defined in fb.h
+ CheckTypeSize<framebuffer_device_t, 184, 288>();
+
+ //Types defined in hwcomposer.h
+ CheckTypeSize<hwc_layer_1_t, 96, 120>();
+ CheckTypeSize<hwc_composer_device_1_t, 116, 224>();
+
+ //Types defined in gralloc.h
+ CheckTypeSize<gralloc_module_t, 176, 344>();
+ CheckTypeSize<alloc_device_t, 104, 200>();
+
+ //Types defined in consumerir.h
+ CheckTypeSize<consumerir_device_t, 96, 184>();
+
+ //Types defined in camera_common.h
+ CheckTypeSize<vendor_tag_ops_t, 52, 104>();
+ CheckTypeSize<camera_module_t, 176, 344>();
+
+ //Types defined in camera3.h
+ CheckTypeSize<camera3_device_ops_t, 64, 128>();
+}
+
diff --git a/tests/hwc/test-arrows.c b/tests/hwc/test-arrows.c
index a35faa7..12e7c8f 100644
--- a/tests/hwc/test-arrows.c
+++ b/tests/hwc/test-arrows.c
@@ -140,7 +140,7 @@
int main(int argc, char **argv) {
EGLDisplay display;
EGLSurface surface;
- int w, h, count;
+ int w, h, count = 0;
if (argc > 1)
count = atoi(argv[1]);