Merge "Remove offset checks for reserved fields in sensor_t and sensors_event_t."
diff --git a/include/hardware/audio_alsaops.h b/include/hardware/audio_alsaops.h
new file mode 100644
index 0000000..0d266ff
--- /dev/null
+++ b/include/hardware/audio_alsaops.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains shared utility functions to handle the tinyalsa
+ * implementation for Android internal audio, generally in the hardware layer.
+ * Some routines may log a fatal error on failure, as noted.
+ */
+
+#ifndef ANDROID_AUDIO_ALSAOPS_H
+#define ANDROID_AUDIO_ALSAOPS_H
+
+#include <cutils/log.h>
+#include <system/audio.h>
+#include <tinyalsa/asoundlib.h>
+
+__BEGIN_DECLS
+
+/* Converts audio_format to pcm_format.
+ * Parameters:
+ * format the audio_format_t to convert
+ *
+ * Logs a fatal error if format is not a valid convertible audio_format_t.
+ */
+static inline enum pcm_format pcm_format_from_audio_format(audio_format_t format)
+{
+ switch (format) {
+#ifdef HAVE_BIG_ENDIAN
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return PCM_FORMAT_S16_BE;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ return PCM_FORMAT_S24_3BE;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ return PCM_FORMAT_S32_BE;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ return PCM_FORMAT_S24_BE;
+#else
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return PCM_FORMAT_S16_LE;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ return PCM_FORMAT_S24_3LE;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ return PCM_FORMAT_S32_LE;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ return PCM_FORMAT_S24_LE;
+#endif
+ case AUDIO_FORMAT_PCM_FLOAT: /* there is no equivalent for float */
+ default:
+ LOG_ALWAYS_FATAL("pcm_format_from_audio_format: invalid audio format %#x", format);
+ return 0;
+ }
+}
+
+/* Converts pcm_format to audio_format.
+ * Parameters:
+ * format the pcm_format to convert
+ *
+ * Logs a fatal error if format is not a valid convertible pcm_format.
+ */
+static inline audio_format_t audio_format_from_pcm_format(enum pcm_format format)
+{
+ switch (format) {
+#ifdef HAVE_BIG_ENDIAN
+ case PCM_FORMAT_S16_BE:
+ return AUDIO_FORMAT_PCM_16_BIT;
+ case PCM_FORMAT_S24_3BE:
+ return AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ case PCM_FORMAT_S24_BE:
+ return AUDIO_FORMAT_PCM_8_24_BIT;
+ case PCM_FORMAT_S32_BE:
+ return AUDIO_FORMAT_PCM_32_BIT;
+#else
+ case PCM_FORMAT_S16_LE:
+ return AUDIO_FORMAT_PCM_16_BIT;
+ case PCM_FORMAT_S24_3LE:
+ return AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ case PCM_FORMAT_S24_LE:
+ return AUDIO_FORMAT_PCM_8_24_BIT;
+ case PCM_FORMAT_S32_LE:
+ return AUDIO_FORMAT_PCM_32_BIT;
+#endif
+ default:
+ LOG_ALWAYS_FATAL("audio_format_from_pcm_format: invalid pcm format %#x", format);
+ return 0;
+ }
+}
+
+__END_DECLS
+
+#endif /* ANDROID_AUDIO_ALSAOPS_H */
diff --git a/include/hardware/bt_gatt_client.h b/include/hardware/bt_gatt_client.h
index cf4fabe..11b146d 100644
--- a/include/hardware/bt_gatt_client.h
+++ b/include/hardware/bt_gatt_client.h
@@ -156,6 +156,9 @@
*/
typedef void (*listen_callback)(int status, int server_if);
+/** Callback invoked when the MTU for a given connection changes */
+typedef void (*configure_mtu_callback)(int conn_id, int status, int mtu);
+
typedef struct {
register_client_callback register_client_cb;
scan_result_callback scan_result_cb;
@@ -175,6 +178,7 @@
execute_write_callback execute_write_cb;
read_remote_rssi_callback read_remote_rssi_cb;
listen_callback listen_cb;
+ configure_mtu_callback configure_mtu_cb;
} btgatt_client_callbacks_t;
/** Represents the standard BT-GATT client interface. */
@@ -282,6 +286,9 @@
uint16_t service_data_len, char* service_data,
uint16_t service_uuid_len, char* service_uuid);
+ /** Configure the MTU for a given connection */
+ bt_status_t (*configure_mtu)(int conn_id, int mtu);
+
/** Test mode interface */
bt_status_t (*test_command)( int command, btgatt_test_params_t* params);
} btgatt_client_interface_t;
diff --git a/include/hardware/bt_pan.h b/include/hardware/bt_pan.h
index c8b36b4..83e7949 100644
--- a/include/hardware/bt_pan.h
+++ b/include/hardware/bt_pan.h
@@ -40,8 +40,8 @@
*/
typedef void (*btpan_connection_state_callback)(btpan_connection_state_t state, bt_status_t error,
const bt_bdaddr_t *bd_addr, int local_role, int remote_role);
-typedef void (*btpan_control_state_callback)(btpan_control_state_t state, bt_status_t error,
- int local_role, const char* ifname);
+typedef void (*btpan_control_state_callback)(btpan_control_state_t state, int local_role,
+ bt_status_t error, const char* ifname);
typedef struct {
size_t size;
diff --git a/include/hardware/camera3.h b/include/hardware/camera3.h
index 9eb52ed..4503885 100644
--- a/include/hardware/camera3.h
+++ b/include/hardware/camera3.h
@@ -52,6 +52,7 @@
* S6. Error management
* S7. Key Performance Indicator (KPI) glossary
* S8. Sample Use Cases
+ * S9. Notes on Controls and Metadata
*/
/**
@@ -1139,6 +1140,32 @@
*
*/
+/**
+ * S9. Notes on Controls and Metadata
+ *
+ * This section contains notes about the interpretation and usage of various metadata tags.
+ *
+ * S9.1 HIGH_QUALITY and FAST modes.
+ *
+ * Many camera post-processing blocks may be listed as having HIGH_QUALITY,
+ * FAST, and OFF operating modes. These blocks will typically also have an
+ * 'available modes' tag representing which of these operating modes are
+ * available on a given device. The general policy regarding implementing
+ * these modes is as follows:
+ *
+ * 1. Operating mode controls of hardware blocks that cannot be disabled
+ * must not list OFF in their corresponding 'available modes' tags.
+ *
+ * 2. OFF will always be included in their corresponding 'available modes'
+ * tag if it is possible to disable that hardware block.
+ *
+ * 3. FAST must always be included in the 'available modes' tags for all
+ * post-processing blocks supported on the device. If a post-processing
+ * block also has a slower and higher quality operating mode that does
+ * not meet the framerate requirements for FAST mode, HIGH_QUALITY should
+ * be included in the 'available modes' tag to represent this operating
+ * mode.
+ */
__BEGIN_DECLS
struct camera3_device;
diff --git a/include/hardware/hdmi_cec.h b/include/hardware/hdmi_cec.h
index 938e9dc..de6c70c 100644
--- a/include/hardware/hdmi_cec.h
+++ b/include/hardware/hdmi_cec.h
@@ -34,6 +34,7 @@
#define HDMI_CEC_HARDWARE_INTERFACE "hdmi_cec_hw_if"
typedef enum cec_device_type {
+ CEC_DEVICE_INACTIVE = -1,
CEC_DEVICE_TV = 0,
CEC_DEVICE_RECORDER = 1,
CEC_DEVICE_RESERVED = 2,
@@ -127,7 +128,20 @@
CEC_MESSAGE_GET_CEC_VERSION = 0x9F,
CEC_MESSAGE_VENDOR_COMMAND_WITH_ID = 0xA0,
CEC_MESSAGE_CLEAR_EXTERNAL_TIMER = 0xA1,
- CEC_MESSAGE_SET_EXTERNAL_TIMER = 0xA2
+ CEC_MESSAGE_SET_EXTERNAL_TIMER = 0xA2,
+ CEC_MESSAGE_ABORT = 0xFF
+};
+
+/*
+ * Operand description [Abort Reason]
+ */
+enum abort_reason {
+ ABORT_UNRECOGNIZED_MODE = 0,
+ ABORT_NOT_IN_CORRECT_MODE = 1,
+ ABORT_CANNOT_PROVIDE_SOURCE = 2,
+ ABORT_INVALID_OPERAND = 3,
+ ABORT_REFUSED = 4,
+ ABORT_UNABLE_TO_DETERMINE = 5
};
/*
@@ -194,7 +208,7 @@
* Callback function type that will be called by HAL implementation.
* Services can not close/open the device in the callback.
*/
-typedef void (*event_callback_t)(const hdmi_event_t* event);
+typedef void (*event_callback_t)(const hdmi_event_t* event, void* arg);
typedef struct hdmi_cec_module {
struct hw_module_t common;
@@ -210,16 +224,32 @@
* (*allocate_logical_address)() allocates a new logical address
* for a given device type. The address is written to addr. The HAL
* implementation is also expected to configure itself to start receiving
- * the messages addressed to the allocated one. If allocation
- * is not successful the addr will be set to CEC_ADDR_UNREGISTERED.
+ * the messages addressed to the allocated one. If the address has been already
+ * allocated, it should simply return the allocated address without attempting
+ * the allocation again. If allocation is not successful the addr will be
+ * set to CEC_ADDR_UNREGISTERED.
*
* Returns 0 on success or -errno on error.
- *
*/
int (*allocate_logical_address)(const struct hdmi_cec_device* dev,
- int device_type, cec_logical_address_t* addr);
+ cec_device_type_t device_type, cec_logical_address_t* addr);
/*
+ * (*get_logical_address)() returns the logical address already allocated
+ * for the device of the given type. It is necessary to call this function
+ * when HAL implementation, without being triggered by service, updated
+ * the address by itself. Such situation happens when an event like
+ * hotplug occurs, since it is possible the HDMI network topology or
+ * the port which the device was connected to might have changed while it
+ * was unplugged. In response to such events, the service is required to
+ * call this function to get the updated address. The address is written
+ * to addr.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ int (*get_logical_address)(const struct hdmi_cec_device* dev,
+ cec_device_type_t device_type, cec_logical_address_t* addr);
+ /*
* (*get_physical_address)() returns the CEC physical address. The
* address is written to addr.
*
@@ -237,15 +267,17 @@
*
* Returns 0 on success or -errno on error.
*/
- int (*send_message)(const struct hdmi_cec_device* dev,
- const cec_message_t *);
+ int (*send_message)(const struct hdmi_cec_device* dev, const cec_message_t*);
/*
* (*register_event_callback)() registers a callback that HDMI-CEC HAL
* can later use for incoming CEC messages or internal HDMI events.
+ * When calling from C++, use the argument arg to pass the calling object.
+ * It will be passed back when the callback is invoked so that the context
+ * can be retrieved.
*/
void (*register_event_callback)(const struct hdmi_cec_device* dev,
- event_callback_t callback);
+ event_callback_t callback, void* arg);
/*
* (*get_version)() returns the CEC version supported by underlying
@@ -262,7 +294,7 @@
void (*get_vendor_id)(const struct hdmi_cec_device* dev, uint32_t* vendor_id);
/* Reserved for future use to maximum 16 functions. Must be NULL. */
- void* reserved[16 - 6];
+ void* reserved[16 - 7];
} hdmi_cec_device_t;
/** convenience API for opening and closing a device */
diff --git a/include/hardware/hwcomposer.h b/include/hardware/hwcomposer.h
index 82e7671..afb4e99 100644
--- a/include/hardware/hwcomposer.h
+++ b/include/hardware/hwcomposer.h
@@ -121,6 +121,26 @@
* that the layer will be handled by the HWC (ie: it must not be
* composited with OpenGL ES).
*
+ *
+ * HWC_SIDEBAND
+ * Set by the caller before calling (*prepare)(), this value indicates
+ * the contents of this layer come from a sideband video stream.
+ *
+ * The h/w composer is responsible for receiving new image buffers from
+ * the stream at the appropriate time (e.g. synchronized to a separate
+ * audio stream), compositing them with the current contents of other
+ * layers, and displaying the resulting image. This happens
+ * independently of the normal prepare/set cycle. The prepare/set calls
+ * only happen when other layers change, or when properties of the
+ * sideband layer such as position or size change.
+ *
+ * If the h/w composer can't handle the layer as a sideband stream for
+ * some reason (e.g. unsupported scaling/blending/rotation, or too many
+ * sideband layers) it can set compositionType to HWC_FRAMEBUFFER in
+ * (*prepare)(). However, doing so will result in the layer being shown
+ * as a solid color since the platform is not currently able to composite
+ * sideband layers with the GPU. This may be improved in future
+ * versions of the platform.
*/
int32_t compositionType;
@@ -141,13 +161,21 @@
hwc_color_t backgroundColor;
struct {
- /* handle of buffer to compose. This handle is guaranteed to have been
- * allocated from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag. If
- * the layer's handle is unchanged across two consecutive prepare calls and
- * the HWC_GEOMETRY_CHANGED flag is not set for the second call then the
- * HWComposer implementation may assume that the contents of the buffer have
- * not changed. */
- buffer_handle_t handle;
+ union {
+ /* When compositionType is HWC_FRAMEBUFFER, HWC_OVERLAY,
+ * HWC_FRAMEBUFFER_TARGET, this is the handle of the buffer to
+ * compose. This handle is guaranteed to have been allocated
+ * from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag.
+ * If the layer's handle is unchanged across two consecutive
+ * prepare calls and the HWC_GEOMETRY_CHANGED flag is not set
+ * for the second call then the HWComposer implementation may
+ * assume that the contents of the buffer have not changed. */
+ buffer_handle_t handle;
+
+ /* When compositionType is HWC_SIDEBAND, this is the handle
+ * of the sideband video stream to compose. */
+ const native_handle_t* sidebandStream;
+ };
/* transformation to apply to the buffer during composition */
uint32_t transform;
@@ -191,6 +219,10 @@
* reads from them are complete before the framebuffer is ready for
* display.
*
+ * HWC_SIDEBAND layers will never have an acquire fence, since
+ * synchronization is handled through implementation-defined
+ * sideband mechanisms.
+ *
* The HWC takes ownership of the acquireFenceFd and is responsible
* for closing it when no longer needed.
*/
@@ -214,6 +246,10 @@
* produce a release fence for them. The releaseFenceFd will be -1
* for these layers when set() is called.
*
+ * Since HWC_SIDEBAND buffers don't pass through the HWC client,
+ * the HWC shouldn't produce a release fence for them. The
+ * releaseFenceFd will be -1 for these layers when set() is called.
+ *
* The HWC client taks ownership of the releaseFenceFd and is
* responsible for closing it when no longer needed.
*/
@@ -262,7 +298,7 @@
};
/* Allow for expansion w/o breaking binary compatibility.
- * Pad layer to 96 bytes.
+ * Pad layer to 96 bytes, assuming 32-bit pointers.
*/
int32_t reserved[24 - 19];
diff --git a/include/hardware/hwcomposer_defs.h b/include/hardware/hwcomposer_defs.h
index c69a4bc..242e3f6 100644
--- a/include/hardware/hwcomposer_defs.h
+++ b/include/hardware/hwcomposer_defs.h
@@ -36,6 +36,7 @@
#define HWC_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, HWC_HEADER_VERSION)
#define HWC_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, HWC_HEADER_VERSION)
#define HWC_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, HWC_HEADER_VERSION)
+#define HWC_DEVICE_API_VERSION_1_4 HARDWARE_DEVICE_API_VERSION_2(1, 4, HWC_HEADER_VERSION)
enum {
/* hwc_composer_device_t::set failed in EGL */
@@ -95,6 +96,10 @@
/* this layer holds the result of compositing the HWC_FRAMEBUFFER layers.
* Added in HWC_DEVICE_API_VERSION_1_1. */
HWC_FRAMEBUFFER_TARGET = 3,
+
+ /* this layer's contents are taken from a sideband buffer stream.
+ * Added in HWC_DEVICE_API_VERSION_1_4. */
+ HWC_SIDEBAND = 4,
};
/*
diff --git a/include/hardware/tv_input.h b/include/hardware/tv_input.h
new file mode 100644
index 0000000..e04ad2f
--- /dev/null
+++ b/include/hardware/tv_input.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TV_INPUT_INTERFACE_H
+#define ANDROID_TV_INPUT_INTERFACE_H
+
+#include <stdint.h>
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+#include <hardware/hardware.h>
+
+__BEGIN_DECLS
+
+/*
+ * Module versioning information for the TV input hardware module, based on
+ * tv_input_module_t.common.module_api_version.
+ *
+ * Version History:
+ *
+ * TV_INPUT_MODULE_API_VERSION_0_1:
+ * Initial TV input hardware module API.
+ *
+ */
+
+#define TV_INPUT_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1)
+
+#define TV_INPUT_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION(0, 1)
+
+/*
+ * The id of this module
+ */
+#define TV_INPUT_HARDWARE_MODULE_ID "tv_input"
+
+#define TV_INPUT_DEFAULT_DEVICE "default"
+
+/*****************************************************************************/
+
+/*
+ * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
+ * and the fields of this data structure must begin with hw_module_t
+ * followed by module specific information.
+ */
+typedef struct tv_input_module {
+ struct hw_module_t common;
+} tv_input_module_t;
+
+/*****************************************************************************/
+
+typedef enum tv_input_type {
+ /* HDMI */
+ TV_INPUT_TYPE_HDMI = 1,
+
+ /* Built-in tuners. */
+ TV_INPUT_TYPE_BUILT_IN_TUNER = 2,
+
+ /* Passthrough */
+ TV_INPUT_TYPE_PASSTHROUGH = 3,
+} tv_input_type_t;
+
+typedef struct tv_input_device_info {
+ /* Device ID */
+ int device_id;
+
+ /* Type of physical TV input. */
+ tv_input_type_t type;
+
+ /*
+ * TODO: A union of type specific information. For example, HDMI port
+ * identifier that HDMI hardware understands.
+ */
+
+ /* TODO: Add capability if necessary. */
+
+ /* TODO: Audio info */
+} tv_input_device_info_t;
+
+typedef enum {
+ /*
+ * Hardware notifies the framework that a device is available.
+ */
+ TV_INPUT_EVENT_DEVICE_AVAILABLE = 1,
+ /*
+ * Hardware notifies the framework that a device is unavailable.
+ */
+ TV_INPUT_EVENT_DEVICE_UNAVAILABLE = 2,
+ /*
+ * Stream configurations are changed. Client should regard all open streams
+ * at the specific device are closed, and should call
+ * get_stream_configurations() again, opening some of them if necessary.
+ */
+ TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED = 3,
+ /* TODO: Buffer notifications, etc. */
+} tv_input_event_type_t;
+
+typedef struct tv_input_event {
+ tv_input_event_type_t type;
+
+ union {
+ /*
+ * TV_INPUT_EVENT_DEVICE_AVAILABLE: all fields are relevant
+ * TV_INPUT_EVENT_DEVICE_UNAVAILABLE: only device_id is relevant
+ * TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED: only device_id is
+ * relevant
+ */
+ tv_input_device_info_t device_info;
+ };
+} tv_input_event_t;
+
+typedef struct tv_input_callback_ops {
+ /*
+ * event contains the type of the event and additional data if necessary.
+ * The event object is guaranteed to be valid only for the duration of the
+ * call.
+ *
+ * data is an object supplied at device initialization, opaque to the
+ * hardware.
+ */
+ void (*notify)(struct tv_input_device* dev,
+ tv_input_event_t* event, void* data);
+} tv_input_callback_ops_t;
+
+typedef enum {
+ TV_STREAM_TYPE_INDEPENDENT_VIDEO_SOURCE = 1,
+ /* TODO: TV_STREAM_TYPE_BUFFER_PRODUCER = 2, */
+} tv_stream_type_t;
+
+typedef struct tv_stream_config {
+ /*
+ * ID number of the stream. This value is used to identify the whole stream
+ * configuration.
+ */
+ int stream_id;
+
+ /* Type of the stream */
+ tv_stream_type_t type;
+
+ /* Max width/height of the stream. */
+ uint32_t max_video_width;
+ uint32_t max_video_height;
+} tv_stream_config_t;
+
+typedef struct tv_stream {
+ /* IN: ID in the stream configuration. */
+ int stream_id;
+
+ /* OUT: Type of the stream (for convenience) */
+ tv_stream_type_t type;
+
+ /* OUT: Data associated with the stream for client's use */
+ union {
+ native_handle_t* sideband_stream_source_handle;
+ /* TODO: buffer_producer_stream_t buffer_producer; */
+ };
+} tv_stream_t;
+
+/*
+ * Every device data structure must begin with hw_device_t
+ * followed by module specific public methods and attributes.
+ */
+typedef struct tv_input_device {
+ struct hw_device_t common;
+
+ /*
+ * initialize:
+ *
+ * Provide callbacks to the device and start operation. At first, no device
+ * is available and after initialize() completes, currently available
+ * devices including static devices should notify via callback.
+ *
+ * Framework owns callbacks object.
+ *
+ * data is a framework-owned object which would be sent back to the
+ * framework for each callback notifications.
+ *
+ * Return 0 on success.
+ */
+ int (*initialize)(struct tv_input_device* dev,
+ const tv_input_callback_ops_t* callback, void* data);
+
+ /*
+ * get_stream_configurations:
+ *
+ * Get stream configurations for a specific device. An input device may have
+ * multiple configurations.
+ *
+ * The configs object is guaranteed to be valid only until the next call to
+ * get_stream_configurations() or STREAM_CONFIGURATIONS_CHANGED event.
+ *
+ * Return 0 on success.
+ */
+ int (*get_stream_configurations)(const struct tv_input_device* dev,
+ int device_id, int* num_configurations,
+ const tv_stream_config_t** configs);
+
+ /*
+ * open_stream:
+ *
+ * Open a stream with given stream ID. Caller owns stream object, and the
+ * populated data is only valid until the stream is closed.
+ *
+ * Return 0 on success; -EBUSY if the client should close other streams to
+ * open the stream; -EEXIST if the stream with the given ID is already open;
+ * -EINVAL if device_id and/or stream_id are invalid; other non-zero value
+ * denotes unknown error.
+ */
+ int (*open_stream)(struct tv_input_device* dev, int device_id,
+ tv_stream_t* stream);
+
+ /*
+ * close_stream:
+ *
+ * Close a stream to a device. data in tv_stream_t* object associated with
+ * the stream_id is obsolete once this call finishes.
+ *
+ * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if
+ * device_id and/or stream_id are invalid.
+ */
+ int (*close_stream)(struct tv_input_device* dev, int device_id,
+ int stream_id);
+
+ /*
+ * TODO: Add more APIs such as buffer operations in case of buffer producer
+ * profile.
+ */
+
+ void* reserved[16];
+} tv_input_device_t;
+
+__END_DECLS
+
+#endif // ANDROID_TV_INPUT_INTERFACE_H
diff --git a/modules/Android.mk b/modules/Android.mk
index f1a6c1c..5f1de32 100644
--- a/modules/Android.mk
+++ b/modules/Android.mk
@@ -1,4 +1,4 @@
hardware_modules := gralloc hwcomposer audio nfc nfc-nci local_time \
power usbaudio audio_remote_submix camera consumerir sensors vibrator \
- mcu
+ mcu tv_input
include $(call all-named-subdir-makefiles,$(hardware_modules))
diff --git a/modules/audio/Android.mk b/modules/audio/Android.mk
index 49ed312..84d8203 100644
--- a/modules/audio/Android.mk
+++ b/modules/audio/Android.mk
@@ -27,6 +27,7 @@
LOCAL_SRC_FILES := audio_hw.c
LOCAL_SHARED_LIBRARIES := liblog libcutils
LOCAL_MODULE_TAGS := optional
+LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
@@ -39,5 +40,6 @@
LOCAL_SRC_FILES := audio_policy.c
LOCAL_SHARED_LIBRARIES := liblog libcutils
LOCAL_MODULE_TAGS := optional
+LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
diff --git a/modules/audio_remote_submix/Android.mk b/modules/audio_remote_submix/Android.mk
index 50c8cb2..d718c76 100644
--- a/modules/audio_remote_submix/Android.mk
+++ b/modules/audio_remote_submix/Android.mk
@@ -26,5 +26,6 @@
LOCAL_SHARED_LIBRARIES := liblog libcutils libutils libnbaio
LOCAL_STATIC_LIBRARIES := libmedia_helper
LOCAL_MODULE_TAGS := optional
+LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
diff --git a/modules/sensors/multihal.cpp b/modules/sensors/multihal.cpp
index a145c37..36345f9 100644
--- a/modules/sensors/multihal.cpp
+++ b/modules/sensors/multihal.cpp
@@ -214,9 +214,11 @@
}
int sensors_poll_context_t::activate(int handle, int enabled) {
+ int retval = -EINVAL;
ALOGV("activate");
sensors_poll_device_t* v0 = this->get_v0_device_by_handle(handle);
- int retval = v0->activate(v0, get_local_handle(handle), enabled);
+ if (v0)
+ retval = v0->activate(v0, get_local_handle(handle), enabled);
ALOGV("retval %d", retval);
return retval;
}
@@ -576,7 +578,8 @@
sensors_module_t *sensors_module = (sensors_module_t*) *it;
struct hw_device_t* sub_hw_device;
int sub_open_result = sensors_module->common.methods->open(*it, name, &sub_hw_device);
- dev->addSubHwDevice(sub_hw_device);
+ if (!sub_open_result)
+ dev->addSubHwDevice(sub_hw_device);
}
// Prepare the output param and return
diff --git a/modules/tv_input/Android.mk b/modules/tv_input/Android.mk
new file mode 100644
index 0000000..e8aa7fc
--- /dev/null
+++ b/modules/tv_input/Android.mk
@@ -0,0 +1,24 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_SHARED_LIBRARIES := libcutils liblog
+LOCAL_SRC_FILES := tv_input.cpp
+LOCAL_MODULE := tv_input.default
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
diff --git a/modules/tv_input/tv_input.cpp b/modules/tv_input/tv_input.cpp
new file mode 100644
index 0000000..bdb47ca
--- /dev/null
+++ b/modules/tv_input/tv_input.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <errno.h>
+
+#include <cutils/log.h>
+#include <cutils/native_handle.h>
+
+#include <hardware/tv_input.h>
+
+/*****************************************************************************/
+
+typedef struct tv_input_private {
+ tv_input_device_t device;
+
+ // Callback related data
+ const tv_input_callback_ops_t* callback;
+ void* callback_data;
+} tv_input_private_t;
+
+static int tv_input_device_open(const struct hw_module_t* module,
+ const char* name, struct hw_device_t** device);
+
+static struct hw_module_methods_t tv_input_module_methods = {
+ open: tv_input_device_open
+};
+
+tv_input_module_t HAL_MODULE_INFO_SYM = {
+ common: {
+ tag: HARDWARE_MODULE_TAG,
+ version_major: 0,
+ version_minor: 1,
+ id: TV_INPUT_HARDWARE_MODULE_ID,
+ name: "Sample TV input module",
+ author: "The Android Open Source Project",
+ methods: &tv_input_module_methods,
+ }
+};
+
+/*****************************************************************************/
+
+static int tv_input_initialize(struct tv_input_device* dev,
+ const tv_input_callback_ops_t* callback, void* data)
+{
+ if (dev == NULL || callback == NULL) {
+ return -EINVAL;
+ }
+ tv_input_private_t* priv = (tv_input_private_t*)dev;
+ if (priv->callback != NULL) {
+ return -EEXIST;
+ }
+
+ priv->callback = callback;
+ priv->callback_data = data;
+
+ return 0;
+}
+
+static int tv_input_get_stream_configurations(
+ const struct tv_input_device*, int, int*, const tv_stream_config_t**)
+{
+ return -EINVAL;
+}
+
+static int tv_input_open_stream(struct tv_input_device*, int, tv_stream_t*)
+{
+ return -EINVAL;
+}
+
+static int tv_input_close_stream(struct tv_input_device*, int, int)
+{
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+
+static int tv_input_device_close(struct hw_device_t *dev)
+{
+ tv_input_private_t* priv = (tv_input_private_t*)dev;
+ if (priv) {
+ free(priv);
+ }
+ return 0;
+}
+
+/*****************************************************************************/
+
+static int tv_input_device_open(const struct hw_module_t* module,
+ const char* name, struct hw_device_t** device)
+{
+ int status = -EINVAL;
+ if (!strcmp(name, TV_INPUT_DEFAULT_DEVICE)) {
+ tv_input_private_t* dev = (tv_input_private_t*)malloc(sizeof(*dev));
+
+ /* initialize our state here */
+ memset(dev, 0, sizeof(*dev));
+
+ /* initialize the procs */
+ dev->device.common.tag = HARDWARE_DEVICE_TAG;
+ dev->device.common.version = TV_INPUT_DEVICE_API_VERSION_0_1;
+ dev->device.common.module = const_cast<hw_module_t*>(module);
+ dev->device.common.close = tv_input_device_close;
+
+ dev->device.initialize = tv_input_initialize;
+ dev->device.get_stream_configurations =
+ tv_input_get_stream_configurations;
+ dev->device.open_stream = tv_input_open_stream;
+ dev->device.close_stream = tv_input_close_stream;
+
+ *device = &dev->device.common;
+ status = 0;
+ }
+ return status;
+}
diff --git a/modules/usbaudio/Android.mk b/modules/usbaudio/Android.mk
index 199eb09..2acd171 100644
--- a/modules/usbaudio/Android.mk
+++ b/modules/usbaudio/Android.mk
@@ -24,6 +24,7 @@
external/tinyalsa/include
LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa
LOCAL_MODULE_TAGS := optional
+LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
diff --git a/modules/usbaudio/audio_hw.c b/modules/usbaudio/audio_hw.c
index 24a2d63..afe56b2 100644
--- a/modules/usbaudio/audio_hw.c
+++ b/modules/usbaudio/audio_hw.c
@@ -33,65 +33,270 @@
#include <tinyalsa/asoundlib.h>
-struct pcm_config pcm_config = {
+/* This is the default configuration to hand to The Framework on the initial
+ * adev_open_output_stream(). Actual device attributes will be used on the subsequent
+ * adev_open_output_stream() after the card and device number have been set in out_set_parameters()
+ */
+#define OUT_PERIOD_SIZE 1024
+#define OUT_PERIOD_COUNT 4
+#define OUT_SAMPLING_RATE 44100
+
+struct pcm_config default_alsa_out_config = {
.channels = 2,
- .rate = 44100,
- .period_size = 1024,
- .period_count = 4,
+ .rate = OUT_SAMPLING_RATE,
+ .period_size = OUT_PERIOD_SIZE,
+ .period_count = OUT_PERIOD_COUNT,
.format = PCM_FORMAT_S16_LE,
};
+/*
+ * Input defaults. See comment above.
+ */
+#define IN_PERIOD_SIZE 1024
+#define IN_PERIOD_COUNT 4
+#define IN_SAMPLING_RATE 44100
+
+struct pcm_config default_alsa_in_config = {
+ .channels = 2,
+ .rate = IN_SAMPLING_RATE,
+ .period_size = IN_PERIOD_SIZE,
+ .period_count = IN_PERIOD_COUNT,
+ .format = PCM_FORMAT_S16_LE,
+ .start_threshold = 1,
+ .stop_threshold = (IN_PERIOD_SIZE * IN_PERIOD_COUNT),
+};
+
struct audio_device {
struct audio_hw_device hw_device;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
- int card;
- int device;
+
+ /* output */
+ int out_card;
+ int out_device;
+
+ /* input */
+ int in_card;
+ int in_device;
+
bool standby;
};
struct stream_out {
struct audio_stream_out stream;
+ pthread_mutex_t lock; /* see note below on mutex acquisition order */
+ struct pcm *pcm; /* state of the stream */
+ bool standby;
+
+ struct audio_device *dev; /* hardware information */
+
+ void * conversion_buffer; /* any conversions are put into here
+ * they could come from here too if
+ * there was a previous conversion */
+ size_t conversion_buffer_size; /* in bytes */
+};
+
+/*
+ * Output Configuration Cache
+ * FIXME(pmclean) This is not rentrant. Should probably be moved into the stream structure
+ * but that will involve changes in The Framework.
+ */
+static struct pcm_config cached_output_hardware_config;
+static bool output_hardware_config_is_cached = false;
+
+struct stream_in {
+ struct audio_stream_in stream;
+
pthread_mutex_t lock; /* see note below on mutex acquisition order */
struct pcm *pcm;
bool standby;
+ struct pcm_config alsa_pcm_config;
+
struct audio_device *dev;
+
+ struct audio_config hal_pcm_config;
+
+ unsigned int requested_rate;
+// struct resampler_itfe *resampler;
+// struct resampler_buffer_provider buf_provider;
+ int16_t *buffer;
+ size_t buffer_size;
+ size_t frames_in;
+ int read_status;
};
+/*
+ * Utility
+ */
+/*
+ * Translates from ALSA format ID to ANDROID_AUDIO_CORE format ID
+ * (see master/system/core/include/core/audio.h)
+ * TODO(pmclean) Replace with audio_format_from_pcm_format() (in hardware/audio_alsaops.h).
+ * post-integration.
+ */
+static audio_format_t alsa_to_fw_format_id(int alsa_fmt_id)
+{
+ switch (alsa_fmt_id) {
+ case PCM_FORMAT_S8:
+ return AUDIO_FORMAT_PCM_8_BIT;
+
+ case PCM_FORMAT_S24_3LE:
+ //TODO(pmclean) make sure this is the 'right' sort of 24-bit
+ return AUDIO_FORMAT_PCM_8_24_BIT;
+
+ case PCM_FORMAT_S32_LE:
+ case PCM_FORMAT_S24_LE:
+ return AUDIO_FORMAT_PCM_32_BIT;
+ }
+
+ return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+/*
+ * Data Conversions
+ */
+/*
+ * Convert a buffer of PCM16LE samples to packed (3-byte) PCM24LE samples.
+ * in_buff points to the buffer of PCM16 samples
+ * num_in_samples size of input buffer in SAMPLES
+ * out_buff points to the buffer to receive converted PCM24 LE samples.
+ * returns the number of BYTES of output data.
+ * We are doing this since we *always* present to The Framework as A PCM16LE device, but need to
+ * support PCM24_3LE (24-bit, packed).
+ * NOTE: we're just filling the low-order byte of the PCM24LE samples with 0.
+ * TODO(pmclean, hung) Move this to a utilities module.
+ */
+static size_t convert_16_to_24_3(unsigned short * in_buff,
+ size_t num_in_samples,
+ unsigned char * out_buff) {
+ /*
+ * Move from back to front so that the conversion can be done in-place
+ * i.e. in_buff == out_buff
+ */
+ int in_buff_size_in_bytes = num_in_samples * 2;
+ /* we need 3 bytes in the output for every 2 bytes in the input */
+ int out_buff_size_in_bytes = ((3 * in_buff_size_in_bytes) / 2);
+ unsigned char* dst_ptr = out_buff + out_buff_size_in_bytes - 1;
+ int src_smpl_index;
+ unsigned char* src_ptr = ((unsigned char *)in_buff) + in_buff_size_in_bytes - 1;
+ for (src_smpl_index = 0; src_smpl_index < num_in_samples; src_smpl_index++) {
+ *dst_ptr-- = *src_ptr--; /* hi-byte */
+ *dst_ptr-- = *src_ptr--; /* low-byte */
+ *dst_ptr-- = 0; /* zero-byte */
+ }
+
+ /* return number of *bytes* generated */
+ return out_buff_size_in_bytes;
+}
+
+/*
+ * Convert a buffer of 2-channel PCM16 samples to 4-channel PCM16 channels
+ * in_buff points to the buffer of PCM16 samples
+ * num_in_samples size of input buffer in SAMPLES
+ * out_buff points to the buffer to receive converted PCM16 samples.
+ * returns the number of BYTES of output data.
+ * NOTE channels 3 & 4 are filled with silence.
+ * We are doing this since we *always* present to The Framework as STEREO device, but need to
+ * support 4-channel devices.
+ * TODO(pmclean, hung) Move this to a utilities module.
+ */
+static size_t convert_2chan16_to_4chan16(unsigned short* in_buff,
+ size_t num_in_samples,
+ unsigned short* out_buff) {
+ /*
+ * Move from back to front so that the conversion can be done in-place
+ * i.e. in_buff == out_buff
+ */
+ int out_buff_size = num_in_samples * 2;
+ unsigned short* dst_ptr = out_buff + out_buff_size - 1;
+ int src_index;
+ unsigned short* src_ptr = in_buff + num_in_samples - 1;
+ for (src_index = 0; src_index < num_in_samples; src_index += 2) {
+ *dst_ptr-- = 0; /* chan 4 */
+ *dst_ptr-- = 0; /* chan 3 */
+ *dst_ptr-- = *src_ptr--; /* chan 2 */
+ *dst_ptr-- = *src_ptr--; /* chan 1 */
+ }
+
+ /* return number of *bytes* generated */
+ return out_buff_size * 2;
+}
+
+/*
+ * ALSA Utilities
+ */
+/*
+ * gets the ALSA bit-format flag from a bits-per-sample value.
+ * TODO(pmclean, hung) Move this to a utilities module.
+ */
+static int bits_to_alsa_format(int bits_per_sample, int default_format)
+{
+ enum pcm_format format;
+ for (format = PCM_FORMAT_S16_LE; format < PCM_FORMAT_MAX; format++) {
+ if (pcm_format_to_bits(format) == bits_per_sample) {
+ return format;
+ }
+ }
+ return default_format;
+}
+
+/*
+ * Reads and decodes configuration info from the specified ALSA card/device
+ */
+static int read_alsa_device_config(int card, int device, int io_type, struct pcm_config * config)
+{
+ ALOGV("usb:audio_hw - read_alsa_device_config(card:%d device:%d)", card, device);
+
+ if (card < 0 || device < 0) {
+ return -EINVAL;
+ }
+
+ struct pcm_params * alsa_hw_params = pcm_params_get(card, device, io_type);
+ if (alsa_hw_params == NULL) {
+ return -EINVAL;
+ }
+
+ /*
+ * This Logging will be useful when testing new USB devices.
+ */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_SAMPLE_BITS min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS), pcm_params_get_max(alsa_hw_params, PCM_PARAM_SAMPLE_BITS)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_FRAME_BITS min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_FRAME_BITS), pcm_params_get_max(alsa_hw_params, PCM_PARAM_FRAME_BITS)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_CHANNELS min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS), pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_RATE min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE), pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_TIME min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_TIME), pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_TIME)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_SIZE min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_SIZE), pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_SIZE)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_BYTES min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_BYTES), pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_BYTES)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_PERIODS min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIODS), pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIODS)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_TIME min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_TIME), pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_TIME)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_SIZE min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_SIZE), pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_SIZE)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_BYTES min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_BYTES), pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_BYTES)); */
+ /* ALOGV("usb:audio_hw - PCM_PARAM_TICK_TIME min:%d, max:%d", pcm_params_get_min(alsa_hw_params, PCM_PARAM_TICK_TIME), pcm_params_get_max(alsa_hw_params, PCM_PARAM_TICK_TIME)); */
+
+ config->channels = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS);
+ config->rate = pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE);
+ config->period_size = pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIODS);
+ config->period_count = pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIODS);
+
+ int bits_per_sample = pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS);
+ config->format = bits_to_alsa_format(bits_per_sample, PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+/*
+ * HAl Functions
+ */
/**
* NOTE: when multiple mutexes have to be acquired, always respect the
* following order: hw device > out stream
*/
/* Helper functions */
-
-/* must be called with hw device and output stream mutexes locked */
-static int start_output_stream(struct stream_out *out)
-{
- struct audio_device *adev = out->dev;
- int i;
-
- if ((adev->card < 0) || (adev->device < 0))
- return -EINVAL;
-
- out->pcm = pcm_open(adev->card, adev->device, PCM_OUT, &pcm_config);
-
- if (out->pcm && !pcm_is_ready(out->pcm)) {
- ALOGE("pcm_open() failed: %s", pcm_get_error(out->pcm));
- pcm_close(out->pcm);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* API functions */
-
static uint32_t out_get_sample_rate(const struct audio_stream *stream)
{
- return pcm_config.rate;
+ return cached_output_hardware_config.rate;
}
static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
@@ -101,17 +306,22 @@
static size_t out_get_buffer_size(const struct audio_stream *stream)
{
- return pcm_config.period_size *
- audio_stream_frame_size((struct audio_stream *)stream);
+ return cached_output_hardware_config.period_size * audio_stream_frame_size(stream);
}
static uint32_t out_get_channels(const struct audio_stream *stream)
{
+ // Always Stero for now. We will do *some* conversions in this HAL.
+ // TODO(pmclean) When AudioPolicyManager & AudioFlinger supports arbitrary channels
+ // rewrite this to return the ACTUAL channel format
return AUDIO_CHANNEL_OUT_STEREO;
}
static audio_format_t out_get_format(const struct audio_stream *stream)
{
+ // Always return 16-bit PCM. We will do *some* conversions in this HAL.
+ // TODO(pmclean) When AudioPolicyManager & AudioFlinger supports arbitrary PCM formats
+ // rewrite this to return the ACTUAL data format
return AUDIO_FORMAT_PCM_16_BIT;
}
@@ -146,39 +356,122 @@
static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
{
+ ALOGV("usb:audio_hw::out out_set_parameters() keys:%s", kvpairs);
+
struct stream_out *out = (struct stream_out *)stream;
struct audio_device *adev = out->dev;
struct str_parms *parms;
char value[32];
- int ret;
+ int param_val;
int routing = 0;
+ int ret_value = 0;
parms = str_parms_create_str(kvpairs);
pthread_mutex_lock(&adev->lock);
- ret = str_parms_get_str(parms, "card", value, sizeof(value));
- if (ret >= 0)
- adev->card = atoi(value);
+ bool recache_device_params = false;
+ param_val = str_parms_get_str(parms, "card", value, sizeof(value));
+ if (param_val >= 0) {
+ adev->out_card = atoi(value);
+ recache_device_params = true;
+ }
- ret = str_parms_get_str(parms, "device", value, sizeof(value));
- if (ret >= 0)
- adev->device = atoi(value);
+ param_val = str_parms_get_str(parms, "device", value, sizeof(value));
+ if (param_val >= 0) {
+ adev->out_device = atoi(value);
+ recache_device_params = true;
+ }
+
+ if (recache_device_params && adev->out_card >= 0 && adev->out_device >= 0) {
+ ret_value = read_alsa_device_config(adev->out_card, adev->out_device, PCM_OUT,
+ &(cached_output_hardware_config));
+ output_hardware_config_is_cached = (ret_value == 0);
+ }
pthread_mutex_unlock(&adev->lock);
str_parms_destroy(parms);
- return 0;
+ return ret_value;
}
-static char * out_get_parameters(const struct audio_stream *stream, const char *keys)
-{
- return strdup("");
+//TODO(pmclean) it seems like both out_get_parameters() and in_get_parameters()
+// could be written in terms of a get_device_parameters(io_type)
+
+static char * out_get_parameters(const struct audio_stream *stream, const char *keys) {
+ struct stream_out *out = (struct stream_out *) stream;
+ struct audio_device *adev = out->dev;
+
+ unsigned min, max;
+
+ struct str_parms *query = str_parms_create_str(keys);
+ struct str_parms *result = str_parms_create();
+
+ int num_written = 0;
+ char buffer[256];
+ int buffer_size = sizeof(buffer) / sizeof(buffer[0]);
+ char* result_str = NULL;
+
+ struct pcm_params * alsa_hw_params = pcm_params_get(adev->out_card, adev->out_device, PCM_OUT);
+
+ // These keys are from hardware/libhardware/include/audio.h
+ // supported sample rates
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES)) {
+ // pcm_hw_params doesn't have a list of supported samples rates, just a min and a max, so
+ // if they are different, return a list containing those two values, otherwise just the one.
+ min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE);
+ max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE);
+ num_written = snprintf(buffer, buffer_size, "%d", min);
+ if (min != max) {
+ snprintf(buffer + num_written, buffer_size - num_written, "|%d",
+ max);
+ }
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES,
+ buffer);
+ } // AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES
+
+ // supported channel counts
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_CHANNELS)) {
+ // Similarly for output channels count
+ min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS);
+ max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS);
+ num_written = snprintf(buffer, buffer_size, "%d", min);
+ if (min != max) {
+ snprintf(buffer + num_written, buffer_size - num_written, "|%d", max);
+ }
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_CHANNELS, buffer);
+ } // AUDIO_PARAMETER_STREAM_SUP_CHANNELS
+
+ // supported sample formats
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_FORMATS)) {
+ // Similarly for output channels count
+ //TODO(pmclean): this is wrong.
+ min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS);
+ max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_SAMPLE_BITS);
+ num_written = snprintf(buffer, buffer_size, "%d", min);
+ if (min != max) {
+ snprintf(buffer + num_written, buffer_size - num_written, "|%d", max);
+ }
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_FORMATS, buffer);
+ } // AUDIO_PARAMETER_STREAM_SUP_FORMATS
+
+ result_str = str_parms_to_str(result);
+
+ // done with these...
+ str_parms_destroy(query);
+ str_parms_destroy(result);
+
+ return result_str;
}
static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
- return (pcm_config.period_size * pcm_config.period_count * 1000) /
- out_get_sample_rate(&stream->common);
+ struct stream_out *out = (struct stream_out *)stream;
+
+ //TODO(pmclean): Do we need a term here for the USB latency
+ // (as reported in the USB descriptors)?
+ uint32_t latency = (cached_output_hardware_config.period_size *
+ cached_output_hardware_config.period_count * 1000) / out_get_sample_rate(&stream->common);
+ return latency;
}
static int out_set_volume(struct audio_stream_out *stream, float left,
@@ -187,8 +480,41 @@
return -ENOSYS;
}
-static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
- size_t bytes)
+/* must be called with hw device and output stream mutexes locked */
+static int start_output_stream(struct stream_out *out)
+{
+ struct audio_device *adev = out->dev;
+ int return_val = 0;
+
+ ALOGV("usb:audio_hw::out start_output_stream(card:%d device:%d)",
+ adev->out_card, adev->out_device);
+
+ out->pcm = pcm_open(adev->out_card, adev->out_device, PCM_OUT, &cached_output_hardware_config);
+ if (out->pcm == NULL) {
+ return -ENOMEM;
+ }
+
+ if (out->pcm && !pcm_is_ready(out->pcm)) {
+ ALOGE("audio_hw audio_hw pcm_open() failed: %s", pcm_get_error(out->pcm));
+ pcm_close(out->pcm);
+ return -ENOMEM;
+ }
+
+ // Setup conversion buffer
+ size_t buffer_size = out_get_buffer_size(&(out->stream.common));
+
+ // computer maximum potential buffer size.
+ // * 2 for stereo -> quad conversion
+ // * 3/2 for 16bit -> 24 bit conversion
+ //TODO(pmclean) - remove this when AudioPolicyManger/AudioFlinger support arbitrary formats
+ // (and do these conversions themselves)
+ out->conversion_buffer_size = (buffer_size * 3 * 2) / 2;
+ out->conversion_buffer = realloc(out->conversion_buffer, out->conversion_buffer_size);
+
+ return 0;
+}
+
+static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes)
{
int ret;
struct stream_out *out = (struct stream_out *)stream;
@@ -203,7 +529,45 @@
out->standby = false;
}
- pcm_write(out->pcm, (void *)buffer, bytes);
+ void * write_buff = buffer;
+ int num_write_buff_bytes = bytes;
+
+ /*
+ * Num Channels conversion
+ */
+ int num_device_channels = cached_output_hardware_config.channels;
+ int num_req_channels = 2; /* always, for now */
+ if (num_device_channels != num_req_channels && num_device_channels == 4) {
+ num_write_buff_bytes =
+ convert_2chan16_to_4chan16(write_buff, num_write_buff_bytes / 2,
+ out->conversion_buffer);
+ write_buff = out->conversion_buffer;
+ }
+
+ /*
+ * 16 vs 24-bit logic here
+ */
+ switch (cached_output_hardware_config.format) {
+ case PCM_FORMAT_S16_LE:
+ // the output format is the same as the input format, so just write it out
+ break;
+
+ case PCM_FORMAT_S24_3LE:
+ // 16-bit LE2 - 24-bit LE3
+ num_write_buff_bytes =
+ convert_16_to_24_3(write_buff, num_write_buff_bytes / 2, out->conversion_buffer);
+ write_buff = out->conversion_buffer;
+ break;
+
+ default:
+ // hmmmmm.....
+ ALOGV("usb:Unknown Format!!!");
+ break;
+ }
+
+ if (write_buff != NULL && num_write_buff_bytes != 0) {
+ pcm_write(out->pcm, write_buff, num_write_buff_bytes);
+ }
pthread_mutex_unlock(&out->lock);
pthread_mutex_unlock(&out->dev->lock);
@@ -250,14 +614,18 @@
struct audio_config *config,
struct audio_stream_out **stream_out)
{
+ ALOGV("usb:audio_hw::out adev_open_output_stream() handle:0x%X, devices:0x%X, flags:0x%X",
+ handle, devices, flags);
+
struct audio_device *adev = (struct audio_device *)dev;
+
struct stream_out *out;
- int ret;
out = (struct stream_out *)calloc(1, sizeof(struct stream_out));
if (!out)
return -ENOMEM;
+ // setup function pointers
out->stream.common.get_sample_rate = out_get_sample_rate;
out->stream.common.set_sample_rate = out_set_sample_rate;
out->stream.common.get_buffer_size = out_get_buffer_size;
@@ -278,30 +646,64 @@
out->dev = adev;
- config->format = out_get_format(&out->stream.common);
- config->channel_mask = out_get_channels(&out->stream.common);
- config->sample_rate = out_get_sample_rate(&out->stream.common);
+ if (output_hardware_config_is_cached) {
+ config->sample_rate = cached_output_hardware_config.rate;
+
+ config->format = alsa_to_fw_format_id(cached_output_hardware_config.format);
+ if (config->format != AUDIO_FORMAT_PCM_16_BIT) {
+ // Always report PCM16 for now. AudioPolicyManagerBase/AudioFlinger dont' understand
+ // formats with more other format, so we won't get chosen (say with a 24bit DAC).
+ //TODO(pmclean) remove this when the above restriction is removed.
+ config->format = AUDIO_FORMAT_PCM_16_BIT;
+ }
+
+ config->channel_mask =
+ audio_channel_out_mask_from_count(cached_output_hardware_config.channels);
+ if (config->channel_mask != AUDIO_CHANNEL_OUT_STEREO) {
+ // Always report STEREO for now. AudioPolicyManagerBase/AudioFlinger dont' understand
+ // formats with more channels, so we won't get chosen (say with a 4-channel DAC).
+ //TODO(pmclean) remove this when the above restriction is removed.
+ config->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ }
+ } else {
+ cached_output_hardware_config = default_alsa_out_config;
+
+ config->format = out_get_format(&out->stream.common);
+ config->channel_mask = out_get_channels(&out->stream.common);
+ config->sample_rate = out_get_sample_rate(&out->stream.common);
+ }
+ ALOGV("usb:audio_hw config->sample_rate:%d", config->sample_rate);
+ ALOGV("usb:audio_hw config->format:0x%X", config->format);
+ ALOGV("usb:audio_hw config->channel_mask:0x%X", config->channel_mask);
+
+ out->conversion_buffer = NULL;
+ out->conversion_buffer_size = 0;
out->standby = true;
- adev->card = -1;
- adev->device = -1;
-
*stream_out = &out->stream;
return 0;
err_open:
free(out);
*stream_out = NULL;
- return ret;
+ return -ENOSYS;
}
static void adev_close_output_stream(struct audio_hw_device *dev,
struct audio_stream_out *stream)
{
+ ALOGV("usb:audio_hw::out adev_close_output_stream()");
struct stream_out *out = (struct stream_out *)stream;
+ //TODO(pmclean) why are we doing this when stream get's freed at the end
+ // because it closes the pcm device
out_standby(&stream->common);
+
+ free(out->conversion_buffer);
+ out->conversion_buffer = NULL;
+ out->conversion_buffer_size = 0;
+
free(stream);
}
@@ -352,13 +754,264 @@
return 0;
}
+/* Helper functions */
+static uint32_t in_get_sample_rate(const struct audio_stream *stream)
+{
+ struct stream_in *in = (struct stream_in *)stream;
+ return in->alsa_pcm_config.rate;
+}
+
+static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
+{
+ return -ENOSYS;
+}
+
+static size_t in_get_buffer_size(const struct audio_stream *stream)
+{
+ struct stream_in *in = (struct stream_in *)stream;
+ size_t buff_size =
+ in->alsa_pcm_config.period_size
+ * audio_stream_frame_size((struct audio_stream *)stream);
+ return buff_size;
+}
+
+static uint32_t in_get_channels(const struct audio_stream *stream)
+{
+ struct stream_in *in = (struct stream_in *)stream;
+ //TODO(pmclean) this should be done with a num_channels_to_alsa_channels()
+ return in->alsa_pcm_config.channels == 2
+ ? AUDIO_CHANNEL_IN_STEREO : AUDIO_CHANNEL_IN_MONO;
+}
+
+static audio_format_t in_get_format(const struct audio_stream *stream)
+{
+ // just report 16-bit, pcm for now.
+ return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+static int in_set_format(struct audio_stream *stream, audio_format_t format)
+{
+ return -ENOSYS;
+}
+
+static int in_standby(struct audio_stream *stream)
+{
+ ALOGV("-pcm-audio_hw::in in_standby() [Not Implemented]");
+ return 0;
+}
+
+static int in_dump(const struct audio_stream *stream, int fd)
+{
+ return 0;
+}
+
+static int in_set_parameters(struct audio_stream *stream, const char *kvpairs)
+{
+ ALOGV("Vaudio_hw::in in_set_parameters() keys:%s", kvpairs);
+
+ struct stream_in *in = (struct stream_in *)stream;
+ struct audio_device *adev = in->dev;
+ struct str_parms *parms;
+ char value[32];
+ int param_val;
+ int routing = 0;
+ int ret_value = 0;
+
+ parms = str_parms_create_str(kvpairs);
+ pthread_mutex_lock(&adev->lock);
+
+ // Card/Device
+ param_val = str_parms_get_str(parms, "card", value, sizeof(value));
+ if (param_val >= 0) {
+ adev->in_card = atoi(value);
+ }
+
+ param_val = str_parms_get_str(parms, "device", value, sizeof(value));
+ if (param_val >= 0) {
+ adev->in_device = atoi(value);
+ }
+
+ if (adev->in_card >= 0 && adev->in_device >= 0) {
+ ret_value = read_alsa_device_config(adev->in_card, adev->in_device, PCM_IN, &(in->alsa_pcm_config));
+ }
+
+ pthread_mutex_unlock(&adev->lock);
+ str_parms_destroy(parms);
+
+ return ret_value;
+}
+
+//TODO(pmclean) it seems like both out_get_parameters() and in_get_parameters()
+// could be written in terms of a get_device_parameters(io_type)
+
+static char * in_get_parameters(const struct audio_stream *stream, const char *keys)
+{
+ ALOGV("usb:audio_hw::in in_get_parameters() keys:%s", keys);
+
+ struct stream_in *in = (struct stream_in *)stream;
+ struct audio_device *adev = in->dev;
+
+ struct pcm_params * alsa_hw_params = pcm_params_get(adev->in_card, adev->in_device, PCM_IN);
+ if (alsa_hw_params == NULL)
+ return strdup("");
+
+ struct str_parms *query = str_parms_create_str(keys);
+ struct str_parms *result = str_parms_create();
+
+ int num_written = 0;
+ char buffer[256];
+ int buffer_size = sizeof(buffer)/sizeof(buffer[0]);
+ char* result_str = NULL;
+
+ unsigned min, max;
+
+ // These keys are from hardware/libhardware/include/audio.h
+ // supported sample rates
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES)) {
+ // pcm_hw_params doesn't have a list of supported samples rates, just a min and a max, so
+ // if they are different, return a list containing those two values, otherwise just the one.
+ min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE);
+ max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE);
+ num_written = snprintf(buffer, buffer_size, "%d", min);
+ if (min != max) {
+ snprintf(buffer + num_written, buffer_size - num_written, "|%d", max);
+ }
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SAMPLING_RATE, buffer);
+ } // AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES
+
+ // supported channel counts
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_CHANNELS)) {
+ // Similarly for output channels count
+ min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS);
+ max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS);
+ num_written = snprintf(buffer, buffer_size, "%d", min);
+ if (min != max) {
+ snprintf(buffer + num_written, buffer_size - num_written, "|%d", max);
+ }
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_CHANNELS, buffer);
+ } // AUDIO_PARAMETER_STREAM_SUP_CHANNELS
+
+ // supported sample formats
+ if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_FORMATS)) {
+ //TODO(pmclean): this is wrong.
+ min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS);
+ max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_SAMPLE_BITS);
+ num_written = snprintf(buffer, buffer_size, "%d", min);
+ if (min != max) {
+ snprintf(buffer + num_written, buffer_size - num_written, "|%d", max);
+ }
+ str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_FORMATS, buffer);
+ } // AUDIO_PARAMETER_STREAM_SUP_FORMATS
+
+ result_str = str_parms_to_str(result);
+
+ // done with these...
+ str_parms_destroy(query);
+ str_parms_destroy(result);
+
+ return result_str;
+}
+
+static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+ return 0;
+}
+
+static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+ return 0;
+}
+
+static int in_set_gain(struct audio_stream_in *stream, float gain) {
+ return 0;
+}
+
+static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes) {
+ struct stream_in * in = (struct stream_in *)stream;
+
+ int err = pcm_read(in->pcm, buffer, bytes);
+
+ return err == 0 ? bytes : 0;
+}
+
+static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream) {
+ return 0;
+}
+
static int adev_open_input_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
- struct audio_config *config,
+ struct audio_config *hal_config,
struct audio_stream_in **stream_in)
{
- return -ENOSYS;
+ ALOGV("usb:audio_hw::in adev_open_input_stream() rate:%d, chanMask:0x%X, fmt:%d",
+ hal_config->sample_rate,
+ hal_config->channel_mask,
+ hal_config->format);
+
+ struct stream_in *in = (struct stream_in *)calloc(1, sizeof(struct stream_in));
+ if (in == NULL)
+ return -ENOMEM;
+
+ // setup function pointers
+ in->stream.common.get_sample_rate = in_get_sample_rate;
+ in->stream.common.set_sample_rate = in_set_sample_rate;
+ in->stream.common.get_buffer_size = in_get_buffer_size;
+ in->stream.common.get_channels = in_get_channels;
+ in->stream.common.get_format = in_get_format;
+ in->stream.common.set_format = in_set_format;
+ in->stream.common.standby = in_standby;
+ in->stream.common.dump = in_dump;
+ in->stream.common.set_parameters = in_set_parameters;
+ in->stream.common.get_parameters = in_get_parameters;
+ in->stream.common.add_audio_effect = in_add_audio_effect;
+ in->stream.common.remove_audio_effect = in_remove_audio_effect;
+
+ in->stream.set_gain = in_set_gain;
+ in->stream.read = in_read;
+ in->stream.get_input_frames_lost = in_get_input_frames_lost;
+
+ struct audio_device *adev = (struct audio_device *)dev;
+ in->dev = adev;
+
+ in->standby = true;
+ in->requested_rate = hal_config->sample_rate;
+ in->alsa_pcm_config = default_alsa_in_config;
+
+ if (hal_config->sample_rate != 0)
+ in->alsa_pcm_config.rate = hal_config->sample_rate;
+
+ //TODO(pmclean) is this correct, or do we need to map from ALSA format?
+ // hal_config->format is an audio_format_t
+ // logical
+ // hal_config->format = default_alsa_in_config.format;
+ //TODO(pmclean) use audio_format_from_pcm_format() (in hardware/audio_alsaops.h)
+ switch (default_alsa_in_config.format) {
+ case PCM_FORMAT_S32_LE:
+ hal_config->format = AUDIO_FORMAT_PCM_32_BIT;
+ break;
+
+ case PCM_FORMAT_S8:
+ hal_config->format = AUDIO_FORMAT_PCM_8_BIT;
+ break;
+
+ case PCM_FORMAT_S24_LE:
+ hal_config->format = AUDIO_FORMAT_PCM_8_24_BIT;
+ break;
+
+ case PCM_FORMAT_S24_3LE:
+ hal_config->format = AUDIO_FORMAT_PCM_8_24_BIT;
+ break;
+
+ default:
+ case PCM_FORMAT_S16_LE:
+ hal_config->format = AUDIO_FORMAT_PCM_16_BIT;
+ break;
+ }
+
+ *stream_in = &in->stream;
+
+ return 0;
}
static void adev_close_input_stream(struct audio_hw_device *dev,
@@ -373,22 +1026,25 @@
static int adev_close(hw_device_t *device)
{
- struct audio_device *adev = (struct audio_device *)device;
+ ALOGV("usb:audio_hw::adev_close()");
+ struct audio_device *adev = (struct audio_device *)device;
free(device);
+
+ output_hardware_config_is_cached = false;
+
return 0;
}
static int adev_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
- struct audio_device *adev;
- int ret;
+ // ALOGV("usb:audio_hw::adev_open(%s)", name);
if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
return -EINVAL;
- adev = calloc(1, sizeof(struct audio_device));
+ struct audio_device *adev = calloc(1, sizeof(struct audio_device));
if (!adev)
return -ENOMEM;
diff --git a/tests/camera2/CameraMultiStreamTests.cpp b/tests/camera2/CameraMultiStreamTests.cpp
index de1cfd6..a78950c 100644
--- a/tests/camera2/CameraMultiStreamTests.cpp
+++ b/tests/camera2/CameraMultiStreamTests.cpp
@@ -35,7 +35,10 @@
#define CAMERA_MULTI_STREAM_DEBUGGING 0
#define CAMERA_FRAME_TIMEOUT 1000000000LL // nsecs (1 secs)
#define PREVIEW_RENDERING_TIME_INTERVAL 200000 // in unit of us, 200ms
-#define TOLERANCE_MARGIN 0.01 // 1% tolerance margin for exposure sanity check.
+// 1% tolerance margin for exposure sanity check against metadata
+#define TOLERANCE_MARGIN_METADATA 0.01
+// 5% tolerance margin for exposure sanity check against capture times
+#define TOLERANCE_MARGIN_CAPTURE 0.05
/* constants for display */
#define DISPLAY_BUFFER_HEIGHT 1024
#define DISPLAY_BUFFER_WIDTH 1024
@@ -399,10 +402,10 @@
// TODO: Need revisit it to figure out an accurate margin.
int64_t resultExposure = GetExposureValue(frameMetadata);
int32_t resultSensitivity = GetSensitivity(frameMetadata);
- EXPECT_LE(sensitivities[i] * (1.0 - TOLERANCE_MARGIN), resultSensitivity);
- EXPECT_GE(sensitivities[i] * (1.0 + TOLERANCE_MARGIN), resultSensitivity);
- EXPECT_LE(exposures[i] * (1.0 - TOLERANCE_MARGIN), resultExposure);
- EXPECT_GE(exposures[i] * (1.0 + TOLERANCE_MARGIN), resultExposure);
+ EXPECT_LE(sensitivities[i] * (1.0 - TOLERANCE_MARGIN_METADATA), resultSensitivity);
+ EXPECT_GE(sensitivities[i] * (1.0 + TOLERANCE_MARGIN_METADATA), resultSensitivity);
+ EXPECT_LE(exposures[i] * (1.0 - TOLERANCE_MARGIN_METADATA), resultExposure);
+ EXPECT_GE(exposures[i] * (1.0 + TOLERANCE_MARGIN_METADATA), resultExposure);
ASSERT_EQ(OK, listener->waitForFrame(waitLimit));
captureBurstTimes.push_back(systemTime());
@@ -422,7 +425,7 @@
if (i > 0) {
nsecs_t timeDelta =
captureBurstTimes[i] - captureBurstTimes[i-1];
- EXPECT_GE(timeDelta, exposures[i]);
+ EXPECT_GE(timeDelta * ( 1 + TOLERANCE_MARGIN_CAPTURE), exposures[i]);
}
}
}
diff --git a/tests/camera2/camera2.cpp b/tests/camera2/camera2.cpp
index 600d440..73f8c61 100644
--- a/tests/camera2/camera2.cpp
+++ b/tests/camera2/camera2.cpp
@@ -172,13 +172,6 @@
err = listener.getNotificationsFrom(dev);
if (err != OK) return err;
- vendor_tag_query_ops_t *vendor_metadata_tag_ops;
- err = dev->ops->get_metadata_vendor_tag_ops(dev, &vendor_metadata_tag_ops);
- if (err != OK) return err;
-
- err = set_camera_metadata_vendor_tag_ops(vendor_metadata_tag_ops);
- if (err != OK) return err;
-
return OK;
}