am 42a70b67: Merge "Enhance keymaster tests"

* commit '42a70b67abf0fce9c0f898c92a6e3c36e92773e9':
  Enhance keymaster tests
diff --git a/include/hardware/audio.h b/include/hardware/audio.h
index 01d79b2..26e9ea9 100644
--- a/include/hardware/audio.h
+++ b/include/hardware/audio.h
@@ -291,7 +291,7 @@
 /**
  * return the frame size (number of bytes per sample).
  */
-static inline size_t audio_stream_frame_size(struct audio_stream *s)
+static inline size_t audio_stream_frame_size(const struct audio_stream *s)
 {
     size_t chan_samp_sz;
 
@@ -352,7 +352,7 @@
      * master volume control.  AudioFlinger will query this value from the
      * primary audio HAL when the service starts and use the value for setting
      * the initial master volume across all HALs.  HALs which do not support
-     * this method should may leave it set to NULL.
+     * this method may leave it set to NULL.
      */
     int (*get_master_volume)(struct audio_hw_device *dev, float *volume);
 
@@ -407,6 +407,21 @@
 
     /** This method dumps the state of the audio hardware */
     int (*dump)(const struct audio_hw_device *dev, int fd);
+
+    /**
+     * set the audio mute status for all audio activities.  If any value other
+     * than 0 is returned, the software mixer will emulate this capability.
+     */
+    int (*set_master_mute)(struct audio_hw_device *dev, bool mute);
+
+    /**
+     * Get the current master mute status for the HAL, if the HAL supports
+     * master mute control.  AudioFlinger will query this value from the primary
+     * audio HAL when the service starts and use the value for setting the
+     * initial master mute across all HALs.  HALs which do not support this
+     * method may leave it set to NULL.
+     */
+    int (*get_master_mute)(struct audio_hw_device *dev, bool *mute);
 };
 typedef struct audio_hw_device audio_hw_device_t;
 
diff --git a/include/hardware/audio_effect.h b/include/hardware/audio_effect.h
index 65eba36..4037bbb 100644
--- a/include/hardware/audio_effect.h
+++ b/include/hardware/audio_effect.h
@@ -784,8 +784,8 @@
 // EFFECT_FEATURE_AUX_CHANNELS feature configuration descriptor. Describe a combination
 // of main and auxiliary channels supported
 typedef struct channel_config_s {
-    uint32_t   main_channels;   // channel mask for main channels
-    uint32_t   aux_channels;    // channel mask for auxiliary channels
+    audio_channel_mask_t main_channels; // channel mask for main channels
+    audio_channel_mask_t aux_channels;  // channel mask for auxiliary channels
 } channel_config_t;
 
 
diff --git a/include/hardware/audio_policy.h b/include/hardware/audio_policy.h
index 78a1a62..6dd5fd7 100644
--- a/include/hardware/audio_policy.h
+++ b/include/hardware/audio_policy.h
@@ -132,7 +132,7 @@
                                     audio_stream_type_t stream,
                                     uint32_t samplingRate,
                                     audio_format_t format,
-                                    uint32_t channels,
+                                    audio_channel_mask_t channelMask,
                                     audio_output_flags_t flags);
 
     /* indicates to the audio policy manager that the output starts being used
@@ -157,7 +157,7 @@
     audio_io_handle_t (*get_input)(struct audio_policy *pol, audio_source_t inputSource,
                                    uint32_t samplingRate,
                                    audio_format_t format,
-                                   uint32_t channels,
+                                   audio_channel_mask_t channelMask,
                                    audio_in_acoustics_t acoustics);
 
     /* indicates to the audio policy manager that the input starts being used */
@@ -215,10 +215,10 @@
 
     /* Audio effect management */
     audio_io_handle_t (*get_output_for_effect)(struct audio_policy *pol,
-                                            struct effect_descriptor_s *desc);
+                                            const struct effect_descriptor_s *desc);
 
     int (*register_effect)(struct audio_policy *pol,
-                           struct effect_descriptor_s *desc,
+                           const struct effect_descriptor_s *desc,
                            audio_io_handle_t output,
                            uint32_t strategy,
                            int session,
@@ -291,7 +291,10 @@
     /* Audio input Control functions */
     /* */
 
-    /* opens an audio input */
+    /* opens an audio input
+     * deprecated - new implementations should use open_input_on_module,
+     * and the acoustics parameter is ignored
+     */
     audio_io_handle_t (*open_input)(void *service,
                                     audio_devices_t *pDevices,
                                     uint32_t *pSamplingRate,
diff --git a/include/hardware/camera2.h b/include/hardware/camera2.h
index 36f2a9e..518130b 100644
--- a/include/hardware/camera2.h
+++ b/include/hardware/camera2.h
@@ -18,6 +18,7 @@
 #define ANDROID_INCLUDE_CAMERA2_H
 
 #include "camera_common.h"
+#include "system/camera_metadata.h"
 
 /**
  * Camera device HAL 2.0 [ CAMERA_DEVICE_API_VERSION_2_0 ]
@@ -35,7 +36,7 @@
  * version 2.0 of the camera module interface (as defined by
  * camera_module_t.common.module_api_version).
  *
- * See camera_common.h for more details.
+ * See camera_common.h for more versioning details.
  *
  */
 
@@ -43,105 +44,226 @@
 
 struct camera2_device;
 
-/**
- * Output image stream queue management
+/**********************************************************************
+ *
+ * Input/output stream buffer queue interface definitions
+ *
  */
 
+/**
+ * Output image stream queue interface. A set of these methods is provided to
+ * the HAL device in allocate_stream(), and are used to interact with the
+ * gralloc buffer queue for that stream. They may not be called until after
+ * allocate_stream returns.
+ */
 typedef struct camera2_stream_ops {
-    int (*dequeue_buffer)(struct camera2_stream_ops* w,
-                          buffer_handle_t** buffer, int *stride);
-    int (*enqueue_buffer)(struct camera2_stream_ops* w,
-                buffer_handle_t* buffer);
-    int (*cancel_buffer)(struct camera2_stream_ops* w,
-                buffer_handle_t* buffer);
-    int (*set_buffer_count)(struct camera2_stream_ops* w, int count);
-    int (*set_buffers_geometry)(struct camera2_stream_ops* pw,
-                int w, int h, int format);
-    int (*set_crop)(struct camera2_stream_ops *w,
-                int left, int top, int right, int bottom);
-    // Timestamps are measured in nanoseconds, and must be comparable
-    // and monotonically increasing between two frames in the same
-    // preview stream. They do not need to be comparable between
-    // consecutive or parallel preview streams, cameras, or app runs.
-    // The timestamp must be the time at the start of image exposure.
-    int (*set_timestamp)(struct camera2_stream_ops *w, int64_t timestamp);
-    int (*set_usage)(struct camera2_stream_ops* w, int usage);
-    int (*get_min_undequeued_buffer_count)(const struct camera2_stream_ops *w,
-                int *count);
-    int (*lock_buffer)(struct camera2_stream_ops* w,
-                buffer_handle_t* buffer);
+    /**
+     * Get a buffer to fill from the queue. The size and format of the buffer
+     * are fixed for a given stream (defined in allocate_stream), and the stride
+     * should be queried from the platform gralloc module. The gralloc buffer
+     * will have been allocated based on the usage flags provided by
+     * allocate_stream, and will be locked for use.
+     */
+    int (*dequeue_buffer)(const struct camera2_stream_ops* w,
+            buffer_handle_t** buffer);
+
+    /**
+     * Push a filled buffer to the stream to be used by the consumer.
+     *
+     * The timestamp represents the time at start of exposure of the first row
+     * of the image; it must be from a monotonic clock, and is measured in
+     * nanoseconds. The timestamps do not need to be comparable between
+     * different cameras, or consecutive instances of the same camera. However,
+     * they must be comparable between streams from the same camera. If one
+     * capture produces buffers for multiple streams, each stream must have the
+     * same timestamp for that buffer, and that timestamp must match the
+     * timestamp in the output frame metadata.
+     */
+    int (*enqueue_buffer)(const struct camera2_stream_ops* w,
+            int64_t timestamp,
+            buffer_handle_t* buffer);
+    /**
+     * Return a buffer to the queue without marking it as filled.
+     */
+    int (*cancel_buffer)(const struct camera2_stream_ops* w,
+            buffer_handle_t* buffer);
+    /**
+     * Set the crop window for subsequently enqueued buffers. The parameters are
+     * measured in pixels relative to the buffer width and height.
+     */
+    int (*set_crop)(const struct camera2_stream_ops *w,
+            int left, int top, int right, int bottom);
+
 } camera2_stream_ops_t;
 
+enum {
+    /**
+     * Special pixel format value used to indicate that the framework does not care
+     * what exact pixel format is to be used for an output stream. The device HAL is
+     * free to select any pixel format, platform-specific and otherwise, and this
+     * opaque value will be passed on to the platform gralloc module when buffers
+     * need to be allocated for the stream.
+     */
+    CAMERA2_HAL_PIXEL_FORMAT_OPAQUE     = -1,
+    /**
+     * Special pixel format value used to indicate that the framework will use
+     * the output buffers for zero-shutter-lag mode; these buffers should be
+     * efficient to produce at full sensor resolution, and efficient to send
+     * into a reprocess stream for final output processing.
+     */
+    CAMERA2_HAL_PIXEL_FORMAT_ZSL = -2
+};
+
 /**
+ * Input reprocess stream queue management. A set of these methods is provided
+ * to the HAL device in allocate_reprocess_stream(); they are used to interact
+ * with the reprocess stream's input gralloc buffer queue.
+ */
+typedef struct camera2_stream_in_ops {
+    /**
+     * Get the next buffer of image data to reprocess. The width, height, and
+     * format of the buffer is fixed in allocate_reprocess_stream(), and the
+     * stride and other details should be queried from the platform gralloc
+     * module as needed. The buffer will already be locked for use.
+     */
+    int (*acquire_buffer)(const struct camera2_stream_in_ops *w,
+            buffer_handle_t** buffer);
+    /**
+     * Return a used buffer to the buffer queue for reuse.
+     */
+    int (*release_buffer)(const struct camera2_stream_in_ops *w,
+            buffer_handle_t* buffer);
+
+} camera2_stream_in_ops_t;
+
+/**********************************************************************
+ *
  * Metadata queue management, used for requests sent to HAL module, and for
  * frames produced by the HAL.
  *
- * Queue protocol:
- *
- * The source holds the queue and its contents. At start, the queue is empty.
- *
- * 1. When the first metadata buffer is placed into the queue, the source must
- *    signal the destination by calling notify_queue_not_empty().
- *
- * 2. After receiving notify_queue_not_empty, the destination must call
- *    dequeue() once it's ready to handle the next buffer.
- *
- * 3. Once the destination has processed a buffer, it should try to dequeue
- *    another buffer. If there are no more buffers available, dequeue() will
- *    return NULL. In this case, when a buffer becomes available, the source
- *    must call notify_queue_not_empty() again. If the destination receives a
- *    NULL return from dequeue, it does not need to query the queue again until
- *    a notify_queue_not_empty() call is received from the source.
- *
- * 4. If the destination calls buffer_count() and receives 0, this does not mean
- *    that the source will provide a notify_queue_not_empty() call. The source
- *    must only provide such a call after the destination has received a NULL
- *    from dequeue, or on initial startup.
- *
- * 5. The dequeue() call in response to notify_queue_not_empty() may be on the
- *    same thread as the notify_queue_not_empty() call. The source must not
- *    deadlock in that case.
  */
 
-typedef struct camera2_metadata_queue_src_ops {
+enum {
+    CAMERA2_REQUEST_QUEUE_IS_BOTTOMLESS = -1
+};
+
+/**
+ * Request input queue protocol:
+ *
+ * The framework holds the queue and its contents. At start, the queue is empty.
+ *
+ * 1. When the first metadata buffer is placed into the queue, the framework
+ *    signals the device by calling notify_request_queue_not_empty().
+ *
+ * 2. After receiving notify_request_queue_not_empty, the device must call
+ *    dequeue() once it's ready to handle the next buffer.
+ *
+ * 3. Once the device has processed a buffer, and is ready for the next buffer,
+ *    it must call dequeue() again instead of waiting for a notification. If
+ *    there are no more buffers available, dequeue() will return NULL. After
+ *    this point, when a buffer becomes available, the framework must call
+ *    notify_request_queue_not_empty() again. If the device receives a NULL
+ *    return from dequeue, it does not need to query the queue again until a
+ *    notify_request_queue_not_empty() call is received from the source.
+ *
+ * 4. If the device calls buffer_count() and receives 0, this does not mean that
+ *    the framework will provide a notify_request_queue_not_empty() call. The
+ *    framework will only provide such a notification after the device has
+ *    received a NULL from dequeue, or on initial startup.
+ *
+ * 5. The dequeue() call in response to notify_request_queue_not_empty() may be
+ *    on the same thread as the notify_request_queue_not_empty() call, and may
+ *    be performed from within the notify call.
+ *
+ * 6. All dequeued request buffers must be returned to the framework by calling
+ *    free_request, including when errors occur, a device flush is requested, or
+ *    when the device is shutting down.
+ */
+typedef struct camera2_request_queue_src_ops {
     /**
-     * Get count of buffers in queue
+     * Get the count of request buffers pending in the queue. May return
+     * CAMERA2_REQUEST_QUEUE_IS_BOTTOMLESS if a repeating request (stream
+     * request) is currently configured. Calling this method has no effect on
+     * whether the notify_request_queue_not_empty() method will be called by the
+     * framework.
      */
-    int (*buffer_count)(camera2_metadata_queue_src_ops *q);
+    int (*request_count)(const struct camera2_request_queue_src_ops *q);
 
     /**
-     * Get a metadata buffer from the source. Returns OK if a request is
-     * available, placing a pointer to it in next_request.
+     * Get a metadata buffer from the framework. Returns OK if there is no
+     * error. If the queue is empty, returns NULL in buffer. In that case, the
+     * device must wait for a notify_request_queue_not_empty() message before
+     * attempting to dequeue again. Buffers obtained in this way must be
+     * returned to the framework with free_request().
      */
-    int (*dequeue)(camera2_metadata_queue_src_ops *q,
+    int (*dequeue_request)(const struct camera2_request_queue_src_ops *q,
             camera_metadata_t **buffer);
     /**
-     * Return a metadata buffer to the source once it has been used
+     * Return a metadata buffer to the framework once it has been used, or if
+     * an error or shutdown occurs.
      */
-    int (*free)(camera2_metadata_queue_src_ops *q,
+    int (*free_request)(const struct camera2_request_queue_src_ops *q,
             camera_metadata_t *old_buffer);
 
-} camera2_metadata_queue_src_ops_t;
+} camera2_request_queue_src_ops_t;
 
-typedef struct camera2_metadata_queue_dst_ops {
+/**
+ * Frame output queue protocol:
+ *
+ * The framework holds the queue and its contents. At start, the queue is empty.
+ *
+ * 1. When the device is ready to fill an output metadata frame, it must dequeue
+ *    a metadata buffer of the required size.
+ *
+ * 2. It should then fill the metadata buffer, and place it on the frame queue
+ *    using enqueue_frame. The framework takes ownership of the frame.
+ *
+ * 3. In case of an error, a request to flush the pipeline, or shutdown, the
+ *    device must return any affected dequeued frames to the framework by
+ *    calling cancel_frame.
+ */
+typedef struct camera2_frame_queue_dst_ops {
     /**
-     * Notify destination that the queue is no longer empty
+     * Get an empty metadata buffer to fill from the framework. The new metadata
+     * buffer will have room for entries number of metadata entries, plus
+     * data_bytes worth of extra storage. Frames dequeued here must be returned
+     * to the framework with either cancel_frame or enqueue_frame.
      */
-    int (*notify_queue_not_empty)(struct camera2_metadata_queue_dst_ops *);
+    int (*dequeue_frame)(const struct camera2_frame_queue_dst_ops *q,
+            size_t entries, size_t data_bytes,
+            camera_metadata_t **buffer);
 
-} camera2_metadata_queue_dst_ops_t;
+    /**
+     * Return a dequeued metadata buffer to the framework for reuse; do not mark it as
+     * filled. Use when encountering errors, or flushing the internal request queue.
+     */
+    int (*cancel_frame)(const struct camera2_frame_queue_dst_ops *q,
+            camera_metadata_t *buffer);
 
-/* Defined in camera_metadata.h */
-typedef struct vendor_tag_query_ops vendor_tag_query_ops_t;
+    /**
+     * Place a completed metadata frame on the frame output queue.
+     */
+    int (*enqueue_frame)(const struct camera2_frame_queue_dst_ops *q,
+            camera_metadata_t *buffer);
+
+} camera2_frame_queue_dst_ops_t;
+
+/**********************************************************************
+ *
+ * Notification callback and message definition, and trigger definitions
+ *
+ */
 
 /**
  * Asynchronous notification callback from the HAL, fired for various
  * reasons. Only for information independent of frame capture, or that require
- * specific timing.
+ * specific timing. The user pointer must be the same one that was passed to the
+ * device in set_notify_callback().
  */
 typedef void (*camera2_notify_callback)(int32_t msg_type,
         int32_t ext1,
         int32_t ext2,
+        int32_t ext3,
         void *user);
 
 /**
@@ -149,15 +271,42 @@
  */
 enum {
     /**
-     * A serious error has occurred. Argument ext1 contains the error code, and
-     * ext2 and user contain any error-specific information.
+     * An error has occurred. Argument ext1 contains the error code, and
+     * ext2 and ext3 contain any error-specific information.
      */
     CAMERA2_MSG_ERROR   = 0x0001,
     /**
      * The exposure of a given request has begun. Argument ext1 contains the
-     * request id.
+     * frame number, and ext2 and ext3 contain the low-order and high-order
+     * bytes of the timestamp for when exposure began.
+     * (timestamp = (ext3 << 32 | ext2))
      */
-    CAMERA2_MSG_SHUTTER = 0x0002
+    CAMERA2_MSG_SHUTTER = 0x0010,
+    /**
+     * The autofocus routine has changed state. Argument ext1 contains the new
+     * state; the values are the same as those for the metadata field
+     * android.control.afState. Ext2 contains the latest trigger ID passed to
+     * trigger_action(CAMERA2_TRIGGER_AUTOFOCUS) or
+     * trigger_action(CAMERA2_TRIGGER_CANCEL_AUTOFOCUS), or 0 if trigger has not
+     * been called with either of those actions.
+     */
+    CAMERA2_MSG_AUTOFOCUS = 0x0020,
+    /**
+     * The autoexposure routine has changed state. Argument ext1 contains the
+     * new state; the values are the same as those for the metadata field
+     * android.control.aeState. Ext2 contains the latest trigger ID value passed to
+     * trigger_action(CAMERA2_TRIGGER_PRECAPTURE_METERING), or 0 if that method
+     * has not been called.
+     */
+    CAMERA2_MSG_AUTOEXPOSURE = 0x0021,
+    /**
+     * The auto-whitebalance routine has changed state. Argument ext1 contains
+     * the new state; the values are the same as those for the metadata field
+     * android.control.awbState. Ext2 contains the latest trigger ID passed to
+     * trigger_action(CAMERA2_TRIGGER_PRECAPTURE_METERING), or 0 if that method
+     * has not been called.
+     */
+    CAMERA2_MSG_AUTOWB = 0x0022
 };
 
 /**
@@ -169,133 +318,420 @@
      * no further frames or buffer streams will be produced by the
      * device. Device should be treated as closed.
      */
-    CAMERA2_MSG_ERROR_HARDWARE_FAULT = 0x0001,
+    CAMERA2_MSG_ERROR_HARDWARE = 0x0001,
     /**
      * A serious failure occured. No further frames or buffer streams will be
      * produced by the device. Device should be treated as closed. The client
      * must reopen the device to use it again.
      */
-    CAMERA2_MSG_ERROR_DEVICE_FAULT =   0x0002,
+    CAMERA2_MSG_ERROR_DEVICE,
     /**
-     * The camera service has failed. Device should be treated as released. The client
-     * must reopen the device to use it again.
+     * An error has occurred in processing a request. No output (metadata or
+     * buffers) will be produced for this request. ext2 contains the frame
+     * number of the request. Subsequent requests are unaffected, and the device
+     * remains operational.
      */
-    CAMERA2_MSG_ERROR_SERVER_FAULT =   0x0003
+    CAMERA2_MSG_ERROR_REQUEST,
+    /**
+     * An error has occurred in producing an output frame metadata buffer for a
+     * request, but image buffers for it will still be available. Subsequent
+     * requests are unaffected, and the device remains operational. ext2
+     * contains the frame number of the request.
+     */
+    CAMERA2_MSG_ERROR_FRAME,
+    /**
+     * An error has occurred in placing an output buffer into a stream for a
+     * request. The frame metadata and other buffers may still be
+     * available. Subsequent requests are unaffected, and the device remains
+     * operational. ext2 contains the frame number of the request, and ext3
+     * contains the stream id.
+     */
+    CAMERA2_MSG_ERROR_STREAM,
+    /**
+     * Number of error types
+     */
+    CAMERA2_MSG_NUM_ERRORS
 };
 
+/**
+ * Possible trigger ids for trigger_action()
+ */
+enum {
+    /**
+     * Trigger an autofocus cycle. The effect of the trigger depends on the
+     * autofocus mode in effect when the trigger is received, which is the mode
+     * listed in the latest capture request to be dequeued by the HAL. If the
+     * mode is OFF, EDOF, or FIXED, the trigger has no effect. In AUTO, MACRO,
+     * or CONTINUOUS_* modes, see below for the expected behavior. The state of
+     * the autofocus cycle can be tracked in android.control.afMode and the
+     * corresponding notifications.
+     *
+     **
+     * In AUTO or MACRO mode, the AF state transitions (and notifications)
+     * when calling with trigger ID = N with the previous ID being K are:
+     *
+     * Initial state       Transitions
+     * INACTIVE (K)         -> ACTIVE_SCAN (N) -> AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * AF_FOCUSED (K)       -> ACTIVE_SCAN (N) -> AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * AF_NOT_FOCUSED (K)   -> ACTIVE_SCAN (N) -> AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * ACTIVE_SCAN (K)      -> AF_FOCUSED(N) or AF_NOT_FOCUSED(N)
+     * PASSIVE_SCAN (K)      Not used in AUTO/MACRO mode
+     * PASSIVE_FOCUSED (K)   Not used in AUTO/MACRO mode
+     *
+     **
+     * In CONTINUOUS_PICTURE mode, triggering AF must lock the AF to the current
+     * lens position and transition the AF state to either AF_FOCUSED or
+     * NOT_FOCUSED. If a passive scan is underway, that scan must complete and
+     * then lock the lens position and change AF state. TRIGGER_CANCEL_AUTOFOCUS
+     * will allow the AF to restart its operation.
+     *
+     * Initial state      Transitions
+     * INACTIVE (K)        -> immediate AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * PASSIVE_FOCUSED (K) -> immediate AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * PASSIVE_SCAN (K)    -> AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * AF_FOCUSED (K)      no effect except to change next notification ID to N
+     * AF_NOT_FOCUSED (K)  no effect except to change next notification ID to N
+     *
+     **
+     * In CONTINUOUS_VIDEO mode, triggering AF must lock the AF to the current
+     * lens position and transition the AF state to either AF_FOCUSED or
+     * NOT_FOCUSED. If a passive scan is underway, it must immediately halt, in
+     * contrast with CONTINUOUS_PICTURE mode. TRIGGER_CANCEL_AUTOFOCUS will
+     * allow the AF to restart its operation.
+     *
+     * Initial state      Transitions
+     * INACTIVE (K)        -> immediate AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * PASSIVE_FOCUSED (K) -> immediate AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * PASSIVE_SCAN (K)    -> immediate AF_FOCUSED (N) or AF_NOT_FOCUSED (N)
+     * AF_FOCUSED (K)      no effect except to change next notification ID to N
+     * AF_NOT_FOCUSED (K)  no effect except to change next notification ID to N
+     *
+     * Ext1 is an ID that must be returned in subsequent auto-focus state change
+     * notifications through camera2_notify_callback() and stored in
+     * android.control.afTriggerId.
+     */
+    CAMERA2_TRIGGER_AUTOFOCUS = 0x0001,
+    /**
+     * Send a cancel message to the autofocus algorithm. The effect of the
+     * cancellation depends on the autofocus mode in effect when the trigger is
+     * received, which is the mode listed in the latest capture request to be
+     * dequeued by the HAL. If the AF mode is OFF or EDOF, the cancel has no
+     * effect.  For other modes, the lens should return to its default position,
+     * any current autofocus scan must be canceled, and the AF state should be
+     * set to INACTIVE.
+     *
+     * The state of the autofocus cycle can be tracked in android.control.afMode
+     * and the corresponding notification. Continuous autofocus modes may resume
+     * focusing operations thereafter exactly as if the camera had just been set
+     * to a continuous AF mode.
+     *
+     * Ext1 is an ID that must be returned in subsequent auto-focus state change
+     * notifications through camera2_notify_callback() and stored in
+     * android.control.afTriggerId.
+     */
+    CAMERA2_TRIGGER_CANCEL_AUTOFOCUS,
+    /**
+     * Trigger a pre-capture metering cycle, which may include firing the flash
+     * to determine proper capture parameters. Typically, this trigger would be
+     * fired for a half-depress of a camera shutter key, or before a snapshot
+     * capture in general. The state of the metering cycle can be tracked in
+     * android.control.aeMode and the corresponding notification.  If the
+     * auto-exposure mode is OFF, the trigger does nothing.
+     *
+     * Ext1 is an ID that must be returned in subsequent
+     * auto-exposure/auto-white balance state change notifications through
+     * camera2_notify_callback() and stored in android.control.aePrecaptureId.
+     */
+     CAMERA2_TRIGGER_PRECAPTURE_METERING
+};
+
+/**
+ * Possible template types for construct_default_request()
+ */
+enum {
+    /**
+     * Standard camera preview operation with 3A on auto.
+     */
+    CAMERA2_TEMPLATE_PREVIEW = 1,
+    /**
+     * Standard camera high-quality still capture with 3A and flash on auto.
+     */
+    CAMERA2_TEMPLATE_STILL_CAPTURE,
+    /**
+     * Standard video recording plus preview with 3A on auto, torch off.
+     */
+    CAMERA2_TEMPLATE_VIDEO_RECORD,
+    /**
+     * High-quality still capture while recording video. Application will
+     * include preview, video record, and full-resolution YUV or JPEG streams in
+     * request. Must not cause stuttering on video stream. 3A on auto.
+     */
+    CAMERA2_TEMPLATE_VIDEO_SNAPSHOT,
+    /**
+     * Zero-shutter-lag mode. Application will request preview and
+     * full-resolution data for each frame, and reprocess it to JPEG when a
+     * still image is requested by user. Settings should provide highest-quality
+     * full-resolution images without compromising preview frame rate. 3A on
+     * auto.
+     */
+    CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG,
+
+    /* Total number of templates */
+    CAMERA2_TEMPLATE_COUNT
+};
+
+
+/**********************************************************************
+ *
+ * Camera device operations
+ *
+ */
 typedef struct camera2_device_ops {
-    /**
-     * Input request queue methods
-     */
-    int (*set_request_queue_src_ops)(struct camera2_device *,
-            camera2_metadata_queue_src_ops *queue_src_ops);
 
-    int (*get_request_queue_dst_ops)(struct camera2_device *,
-            camera2_metadata_queue_dst_ops **queue_dst_ops);
+    /**********************************************************************
+     * Request and frame queue setup and management methods
+     */
 
     /**
-     * Input reprocessing queue methods
+     * Pass in input request queue interface methods.
      */
-    int (*set_reprocess_queue_ops)(struct camera2_device *,
-            camera2_metadata_queue_src_ops *queue_src_ops);
-
-    int (*get_reprocess_queue_dst_ops)(struct camera2_device *,
-            camera2_metadata_queue_dst_ops **queue_dst_ops);
+    int (*set_request_queue_src_ops)(const struct camera2_device *,
+            const camera2_request_queue_src_ops_t *request_src_ops);
 
     /**
-     * Output frame queue methods
+     * Notify device that the request queue is no longer empty. Must only be
+     * called when the first buffer is added a new queue, or after the source
+     * has returned NULL in response to a dequeue call.
      */
-    int (*set_frame_queue_dst_ops)(struct camera2_device *,
-            camera2_metadata_queue_dst_ops *queue_dst_ops);
-
-    int (*get_frame_queue_src_ops)(struct camera2_device *,
-            camera2_metadata_queue_src_ops **queue_dst_ops);
+    int (*notify_request_queue_not_empty)(const struct camera2_device *);
 
     /**
-     * Pass in notification methods
+     * Pass in output frame queue interface methods
      */
-    int (*set_notify_callback)(struct camera2_device *,
-            camera2_notify_callback notify_cb);
+    int (*set_frame_queue_dst_ops)(const struct camera2_device *,
+            const camera2_frame_queue_dst_ops_t *frame_dst_ops);
 
     /**
-     * Number of camera frames being processed by the device
-     * at the moment (frames that have had their request dequeued,
-     * but have not yet been enqueued onto output pipeline(s) )
+     * Number of camera requests being processed by the device at the moment
+     * (captures/reprocesses that have had their request dequeued, but have not
+     * yet been enqueued onto output pipeline(s) ). No streams may be released
+     * by the framework until the in-progress count is 0.
      */
-    int (*get_in_progress_count)(struct camera2_device *);
+    int (*get_in_progress_count)(const struct camera2_device *);
 
     /**
      * Flush all in-progress captures. This includes all dequeued requests
      * (regular or reprocessing) that have not yet placed any outputs into a
      * stream or the frame queue. Partially completed captures must be completed
-     * normally. No new requests may be dequeued from the request or
-     * reprocessing queues until the flush completes.
+     * normally. No new requests may be dequeued from the request queue until
+     * the flush completes.
      */
-    int (*flush_captures_in_progress)(struct camera2_device *);
+    int (*flush_captures_in_progress)(const struct camera2_device *);
 
     /**
-     * Camera stream management
+     * Create a filled-in default request for standard camera use cases.
+     *
+     * The device must return a complete request that is configured to meet the
+     * requested use case, which must be one of the CAMERA2_TEMPLATE_*
+     * enums. All request control fields must be included, except for
+     * android.request.outputStreams.
+     *
+     * The metadata buffer returned must be allocated with
+     * allocate_camera_metadata. The framework takes ownership of the buffer.
+     */
+    int (*construct_default_request)(const struct camera2_device *,
+            int request_template,
+            camera_metadata_t **request);
+
+    /**********************************************************************
+     * Stream management
      */
 
     /**
-     * Operations on the input reprocessing stream
-     */
-    int (*get_reprocess_stream_ops)(struct camera2_device *,
-            camera2_stream_ops_t **stream_ops);
-
-    /**
-     * Get the number of streams that can be simultaneously allocated.
-     * A request may include any allocated pipeline for its output, without
-     * causing a substantial delay in frame production.
-     */
-    int (*get_stream_slot_count)(struct camera2_device *);
-
-    /**
-     * Allocate a new stream for use. Requires specifying which pipeline slot
-     * to use. Specifies the buffer width, height, and format.
-     * Error conditions:
-     *  - Allocating an already-allocated slot without first releasing it
-     *  - Requesting a width/height/format combination not listed as supported
-     *  - Requesting a pipeline slot >= pipeline slot count.
+     * allocate_stream:
+     *
+     * Allocate a new output stream for use, defined by the output buffer width,
+     * height, target, and possibly the pixel format.  Returns the new stream's
+     * ID, gralloc usage flags, minimum queue buffer count, and possibly the
+     * pixel format, on success. Error conditions:
+     *
+     *  - Requesting a width/height/format combination not listed as
+     *    supported by the sensor's static characteristics
+     *
+     *  - Asking for too many streams of a given format type (2 bayer raw
+     *    streams, for example).
+     *
+     * Input parameters:
+     *
+     * - width, height, format: Specification for the buffers to be sent through
+     *   this stream. Format is a value from the HAL_PIXEL_FORMAT_* list, or
+     *   CAMERA2_HAL_PIXEL_FORMAT_OPAQUE. In the latter case, the camera device
+     *   must select an appropriate (possible platform-specific) HAL pixel
+     *   format to return in format_actual. In the former case, format_actual
+     *   must be set to match format.
+     *
+     * - stream_ops: A structure of function pointers for obtaining and queuing
+     *   up buffers for this stream. The underlying stream will be configured
+     *   based on the usage and max_buffers outputs. The methods in this
+     *   structure may not be called until after allocate_stream returns.
+     *
+     * Output parameters:
+     *
+     * - stream_id: An unsigned integer identifying this stream. This value is
+     *   used in incoming requests to identify the stream, and in releasing the
+     *   stream.
+     *
+     * - format_actual: If the input format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE,
+     *   then device must select the appropriate (possible platform-specific)
+     *   pixel format and return it in *format_actual. It will be treated as an
+     *   opaque value by the framework, and simply passed to the gralloc module
+     *   when new buffers need to be allocated. If the input format is one of
+     *   the values from HAL_PIXEL_FORMAT_* list, then *format_actual must be
+     *   set equal to format. In the latter case, format_actual may also be
+     *   NULL, in which case it can be ignored as an output.
+     *
+     * - usage: The gralloc usage mask needed by the HAL device for producing
+     *   the requested type of data. This is used in allocating new gralloc
+     *   buffers for the stream buffer queue.
+     *
+     * - max_buffers: The maximum number of buffers the HAL device may need to
+     *   have dequeued at the same time. The device may not dequeue more buffers
+     *   than this value at the same time.
+     *
      */
     int (*allocate_stream)(
-        struct camera2_device *,
-        uint32_t stream_slot,
-        uint32_t width,
-        uint32_t height,
-        uint32_t format,
-        camera2_stream_ops_t *camera2_stream_ops);
+            const struct camera2_device *,
+            // inputs
+            uint32_t width,
+            uint32_t height,
+            int      format,
+            const camera2_stream_ops_t *stream_ops,
+            // outputs
+            uint32_t *stream_id,
+            uint32_t *format_actual,
+            uint32_t *usage,
+            uint32_t *max_buffers);
 
     /**
-     * Release a stream. Returns an error if called when
-     * get_in_progress_count is non-zero, or if the pipeline slot is not
-     * allocated.
+     * Register buffers for a given stream. This is called after a successful
+     * allocate_stream call, and before the first request referencing the stream
+     * is enqueued. This method is intended to allow the HAL device to map or
+     * otherwise prepare the buffers for later use. num_buffers is guaranteed to
+     * be at least max_buffers (from allocate_stream), but may be larger. The
+     * buffers will already be locked for use. At the end of the call, all the
+     * buffers must be ready to be returned to the queue.
+     */
+    int (*register_stream_buffers)(
+            const struct camera2_device *,
+            uint32_t stream_id,
+            int num_buffers,
+            buffer_handle_t *buffers);
+
+    /**
+     * Release a stream. Returns an error if called when get_in_progress_count
+     * is non-zero, or if the stream id is invalid.
      */
     int (*release_stream)(
-        struct camera2_device *,
-        uint32_t stream_slot);
+            const struct camera2_device *,
+            uint32_t stream_id);
+
+    /**
+     * allocate_reprocess_stream:
+     *
+     * Allocate a new input stream for use, defined by the output buffer width,
+     * height, and the pixel format.  Returns the new stream's ID, gralloc usage
+     * flags, and required simultaneously acquirable buffer count, on
+     * success. Error conditions:
+     *
+     *  - Requesting a width/height/format combination not listed as
+     *    supported by the sensor's static characteristics
+     *
+     *  - Asking for too many reprocessing streams to be configured at once.
+     *
+     * Input parameters:
+     *
+     * - width, height, format: Specification for the buffers to be sent through
+     *   this stream. Format must be a value from the HAL_PIXEL_FORMAT_* list.
+     *
+     * - reprocess_stream_ops: A structure of function pointers for acquiring
+     *   and releasing buffers for this stream. The underlying stream will be
+     *   configured based on the usage and max_buffers outputs.
+     *
+     * Output parameters:
+     *
+     * - stream_id: An unsigned integer identifying this stream. This value is
+     *   used in incoming requests to identify the stream, and in releasing the
+     *   stream. These ids are numbered separately from the input stream ids.
+     *
+     * - consumer_usage: The gralloc usage mask needed by the HAL device for
+     *   consuming the requested type of data. This is used in allocating new
+     *   gralloc buffers for the stream buffer queue.
+     *
+     * - max_buffers: The maximum number of buffers the HAL device may need to
+     *   have acquired at the same time. The device may not have more buffers
+     *   acquired at the same time than this value.
+     *
+     */
+    int (*allocate_reprocess_stream)(const struct camera2_device *,
+            uint32_t width,
+            uint32_t height,
+            uint32_t format,
+            const camera2_stream_in_ops_t *reprocess_stream_ops,
+            // outputs
+            uint32_t *stream_id,
+            uint32_t *consumer_usage,
+            uint32_t *max_buffers);
+
+    /**
+     * Release a reprocessing stream. Returns an error if called when
+     * get_in_progress_count is non-zero, or if the stream id is not
+     * valid.
+     */
+    int (*release_reprocess_stream)(
+            const struct camera2_device *,
+            uint32_t stream_id);
+
+    /**********************************************************************
+     * Miscellaneous methods
+     */
+
+    /**
+     * Trigger asynchronous activity. This is used for triggering special
+     * behaviors of the camera 3A routines when they are in use. See the
+     * documentation for CAMERA2_TRIGGER_* above for details of the trigger ids
+     * and their arguments.
+     */
+    int (*trigger_action)(const struct camera2_device *,
+            uint32_t trigger_id,
+            int32_t ext1,
+            int32_t ext2);
+
+    /**
+     * Notification callback setup
+     */
+    int (*set_notify_callback)(const struct camera2_device *,
+            camera2_notify_callback notify_cb,
+            void *user);
 
     /**
      * Get methods to query for vendor extension metadata tag infomation. May
      * set ops to NULL if no vendor extension tags are defined.
      */
-    int (*get_metadata_vendor_tag_ops)(struct camera2_device*,
+    int (*get_metadata_vendor_tag_ops)(const struct camera2_device*,
             vendor_tag_query_ops_t **ops);
 
     /**
-     * Release the camera hardware.  Requests that are in flight will be
-     * canceled. No further buffers will be pushed into any allocated pipelines
-     * once this call returns.
-     */
-    void (*release)(struct camera2_device *);
-
-    /**
      * Dump state of the camera hardware
      */
-    int (*dump)(struct camera2_device *, int fd);
+    int (*dump)(const struct camera2_device *, int fd);
 
 } camera2_device_ops_t;
 
+/**********************************************************************
+ *
+ * Camera device definition
+ *
+ */
 typedef struct camera2_device {
     /**
      * common.version must equal CAMERA_DEVICE_API_VERSION_2_0 to identify
diff --git a/include/hardware/fb.h b/include/hardware/fb.h
index ba2f286..135e4aa 100644
--- a/include/hardware/fb.h
+++ b/include/hardware/fb.h
@@ -64,7 +64,10 @@
     /* max swap interval supported by this framebuffer */
     const int       maxSwapInterval;
 
-    int reserved[8];
+    /* Number of framebuffers supported*/
+    const int       numFramebuffers;
+
+    int reserved[7];
 
     /*
      * requests a specific swap-interval (same definition than EGL)
diff --git a/include/hardware/gralloc.h b/include/hardware/gralloc.h
index 2dbd1fa..86ed95c 100644
--- a/include/hardware/gralloc.h
+++ b/include/hardware/gralloc.h
@@ -76,8 +76,12 @@
     GRALLOC_USAGE_HW_FB                 = 0x00001000,
     /* buffer will be used with the HW video encoder */
     GRALLOC_USAGE_HW_VIDEO_ENCODER      = 0x00010000,
+    /* buffer will be written by the HW camera pipeline */
+    GRALLOC_USAGE_HW_CAMERA_WRITE       = 0x00020000,
+    /* buffer will be read by the HW camera pipeline */
+    GRALLOC_USAGE_HW_CAMERA_READ        = 0x00040000,
     /* mask for the software usage bit-mask */
-    GRALLOC_USAGE_HW_MASK               = 0x00011F00,
+    GRALLOC_USAGE_HW_MASK               = 0x00071F00,
 
     /* buffer should be displayed full-screen on an external display when
      * possible
diff --git a/include/hardware/hwcomposer.h b/include/hardware/hwcomposer.h
index 98e665c..895c695 100644
--- a/include/hardware/hwcomposer.h
+++ b/include/hardware/hwcomposer.h
@@ -30,11 +30,25 @@
 
 /*****************************************************************************/
 
-// for compatibility
+/* for compatibility */
 #define HWC_MODULE_API_VERSION      HWC_MODULE_API_VERSION_0_1
 #define HWC_DEVICE_API_VERSION      HWC_DEVICE_API_VERSION_0_1
 #define HWC_API_VERSION             HWC_DEVICE_API_VERSION
 
+/* Users of this header can define HWC_REMOVE_DEPRECATED_VERSIONS to test that
+ * they still work with just the current version declared, before the
+ * deprecated versions are actually removed.
+ *
+ * To find code that still depends on the old versions, set the #define to 1
+ * here. Code that explicitly sets it to zero (rather than simply not defining
+ * it) will still see the old versions.
+ */
+#if !defined(HWC_REMOVE_DEPRECATED_VERSIONS)
+#define HWC_REMOVE_DEPRECATED_VERSIONS 0
+#endif
+
+/*****************************************************************************/
+
 /**
  * The id of this module
  */
@@ -45,25 +59,12 @@
  */
 #define HWC_HARDWARE_COMPOSER   "composer"
 
-
-struct hwc_composer_device;
-
-/*
- * availability: HWC_DEVICE_API_VERSION_0_3
- *
- * struct hwc_methods cannot be embedded in other structures as
- * sizeof(struct hwc_methods) cannot be relied upon.
- *
- */
-typedef struct hwc_methods {
-
-    /*************************************************************************
-     * HWC_DEVICE_API_VERSION_0_3
-     *************************************************************************/
+struct hwc_composer_device_1;
+typedef struct hwc_methods_1 {
 
     /*
      * eventControl(..., event, enabled)
-     * Enables or disables h/w composer events.
+     * Enables or disables h/w composer events for a display.
      *
      * eventControl can be called from any thread and takes effect
      * immediately.
@@ -74,11 +75,22 @@
      * returns -EINVAL if the "event" parameter is not one of the value above
      * or if the "enabled" parameter is not 0 or 1.
      */
-
     int (*eventControl)(
-            struct hwc_composer_device* dev, int event, int enabled);
+            struct hwc_composer_device_1* dev, int dpy,
+            int event, int enabled);
 
-} hwc_methods_t;
+    /*
+     * blank(..., blank)
+     * Blanks or unblanks a display's screen.
+     *
+     * Turns the screen off when blank is nonzero, on when blank is zero.
+     * Multiple sequential calls with the same blank value must be supported.
+     *
+     * returns 0 on success, negative on error.
+     */
+    int (*blank)(struct hwc_composer_device_1* dev, int dpy, int blank);
+
+} hwc_methods_1_t;
 
 typedef struct hwc_rect {
     int left;
@@ -99,7 +111,7 @@
     uint8_t a;
 } hwc_color_t;
 
-typedef struct hwc_layer {
+typedef struct hwc_layer_1 {
     /*
      * initially set to HWC_FRAMEBUFFER or HWC_BACKGROUND.
      * HWC_FRAMEBUFFER
@@ -158,31 +170,46 @@
              * The visible region INCLUDES areas overlapped by a translucent layer.
              */
             hwc_region_t visibleRegionScreen;
+
+            /* Sync fence object that will be signaled when the buffer's
+             * contents are available. May be -1 if the contents are already
+             * available. This field is only valid during set(), and should be
+             * ignored during prepare(). The set() call must not wait for the
+             * fence to be signaled before returning, but the HWC must wait for
+             * all buffers to be signaled before reading from them.
+             *
+             * The HWC takes ownership of the acquireFenceFd and is responsible
+             * for closing it when no longer needed.
+             */
+            int acquireFenceFd;
+
+            /* During set() the HWC must set this field to a file descriptor for
+             * a sync fence object that will signal after the HWC has finished
+             * reading from the buffer. The field is ignored by prepare(). Each
+             * layer should have a unique file descriptor, even if more than one
+             * refer to the same underlying fence object; this allows each to be
+             * closed independently.
+             *
+             * If buffer reads can complete at significantly different times,
+             * then using independent fences is preferred. For example, if the
+             * HWC handles some layers with a blit engine and others with
+             * overlays, then the blit layers can be reused immediately after
+             * the blit completes, but the overlay layers can't be reused until
+             * a subsequent frame has been displayed.
+             *
+             * The HWC client taks ownership of the releaseFenceFd and is
+             * responsible for closing it when no longer needed.
+             */
+            int releaseFenceFd;
         };
     };
-} hwc_layer_t;
 
-
-/*
- * hwc_layer_list_t::flags values
- */
-enum {
-    /*
-     * HWC_GEOMETRY_CHANGED is set by SurfaceFlinger to indicate that the list
-     * passed to (*prepare)() has changed by more than just the buffer handles.
+    /* Allow for expansion w/o breaking binary compatibility.
+     * Pad layer to 96 bytes, assuming 32-bit pointers.
      */
-    HWC_GEOMETRY_CHANGED = 0x00000001,
-};
+    int32_t reserved[24 - 18];
 
-/*
- * List of layers.
- * The handle members of hwLayers elements must be unique.
- */
-typedef struct hwc_layer_list {
-    uint32_t flags;
-    size_t numHwLayers;
-    hwc_layer_t hwLayers[0];
-} hwc_layer_list_t;
+} hwc_layer_1_t;
 
 /* This represents a display, typically an EGLDisplay object */
 typedef void* hwc_display_t;
@@ -190,6 +217,49 @@
 /* This represents a surface, typically an EGLSurface object  */
 typedef void* hwc_surface_t;
 
+/*
+ * hwc_display_contents_1_t::flags values
+ */
+enum {
+    /*
+     * HWC_GEOMETRY_CHANGED is set by SurfaceFlinger to indicate that the list
+     * passed to (*prepare)() has changed by more than just the buffer handles
+     * and acquire fences.
+     */
+    HWC_GEOMETRY_CHANGED = 0x00000001,
+};
+
+/*
+ * Description of the contents to output on a display.
+ *
+ * This is the top-level structure passed to the prepare and set calls to
+ * negotiate and commit the composition of a display image.
+ */
+typedef struct hwc_display_contents_1 {
+    /* File descriptor referring to a Sync HAL fence object which will signal
+     * when this display image is no longer visible, i.e. when the following
+     * set() takes effect. The fence object is created and returned by the set
+     * call; this field will be -1 on entry to prepare and set. SurfaceFlinger
+     * will close the returned file descriptor.
+     */
+    int flipFenceFd;
+
+    /* (dpy, sur) is the target of SurfaceFlinger's OpenGL ES composition.
+     * They aren't relevant to prepare. The set call should commit this surface
+     * atomically to the display along with any overlay layers.
+     */
+    hwc_display_t dpy;
+    hwc_surface_t sur;
+
+    /* List of layers that will be composed on the display. The buffer handles
+     * in the list will be unique. If numHwLayers is 0, all composition will be
+     * performed by SurfaceFlinger.
+     */
+    uint32_t flags;
+    size_t numHwLayers;
+    hwc_layer_1_t hwLayers[0];
+
+} hwc_display_contents_1_t;
 
 /* see hwc_composer_device::registerProcs()
  * Any of the callbacks can be NULL, in which case the corresponding
@@ -209,9 +279,10 @@
 
     /*
      * (*vsync)() is called by the h/w composer HAL when a vsync event is
-     * received and HWC_EVENT_VSYNC is enabled (see: hwc_event_control).
+     * received and HWC_EVENT_VSYNC is enabled on a display
+     * (see: hwc_event_control).
      *
-     * the "zero" parameter must always be 0.
+     * the "dpy" parameter indicates which display the vsync event is for.
      * the "timestamp" parameter is the system monotonic clock timestamp in
      *   nanosecond of when the vsync event happened.
      *
@@ -225,9 +296,8 @@
      * hwc_composer_device.set(..., 0, 0, 0) (screen off). The implementation
      * can either stop or continue to process VSYNC events, but must not
      * crash or cause other problems.
-     *
      */
-    void (*vsync)(struct hwc_procs* procs, int zero, int64_t timestamp);
+    void (*vsync)(struct hwc_procs* procs, int dpy, int64_t timestamp);
 } hwc_procs_t;
 
 
@@ -237,8 +307,7 @@
     struct hw_module_t common;
 } hwc_module_t;
 
-
-typedef struct hwc_composer_device {
+typedef struct hwc_composer_device_1 {
     struct hw_device_t common;
 
     /*
@@ -247,78 +316,63 @@
      *
      * (*prepare)() can be called more than once, the last call prevails.
      *
-     * The HWC responds by setting the compositionType field to either
-     * HWC_FRAMEBUFFER or HWC_OVERLAY. In the former case, the composition for
-     * this layer is handled by SurfaceFlinger with OpenGL ES, in the later
-     * case, the HWC will have to handle this layer's composition.
+     * The HWC responds by setting the compositionType field in each layer to
+     * either HWC_FRAMEBUFFER or HWC_OVERLAY. In the former case, the
+     * composition for the layer is handled by SurfaceFlinger with OpenGL ES,
+     * in the later case, the HWC will have to handle the layer's composition.
      *
      * (*prepare)() is called with HWC_GEOMETRY_CHANGED to indicate that the
      * list's geometry has changed, that is, when more than just the buffer's
      * handles have been updated. Typically this happens (but is not limited to)
      * when a window is added, removed, resized or moved.
      *
-     * a NULL list parameter or a numHwLayers of zero indicates that the
-     * entire composition will be handled by SurfaceFlinger with OpenGL ES.
+     * The numDisplays parameter will always be greater than zero, displays
+     * will be non-NULL, and the array entries will be non-NULL.
      *
      * returns: 0 on success. An negative error code on error. If an error is
      * returned, SurfaceFlinger will assume that none of the layer will be
      * handled by the HWC.
      */
-    int (*prepare)(struct hwc_composer_device *dev, hwc_layer_list_t* list);
-
+    int (*prepare)(struct hwc_composer_device_1 *dev,
+                    size_t numDisplays, hwc_display_contents_1_t** displays);
 
     /*
      * (*set)() is used in place of eglSwapBuffers(), and assumes the same
      * functionality, except it also commits the work list atomically with
      * the actual eglSwapBuffers().
      *
-     * The list parameter is guaranteed to be the same as the one returned
-     * from the last call to (*prepare)().
+     * The layer lists are guaranteed to be the same as the ones returned from
+     * the last call to (*prepare)().
      *
-     * When this call returns the caller assumes that:
+     * When this call returns the caller assumes that the displays will be
+     * updated in the near future with the content of their work lists, without
+     * artifacts during the transition from the previous frame.
      *
-     * - the display will be updated in the near future with the content
-     *   of the work list, without artifacts during the transition from the
-     *   previous frame.
+     * A display with zero layers indicates that the entire composition has
+     * been handled by SurfaceFlinger with OpenGL ES. In this case, (*set)()
+     * behaves just like eglSwapBuffers().
      *
-     * - all objects are available for immediate access or destruction, in
-     *   particular, hwc_region_t::rects data and hwc_layer_t::layer's buffer.
-     *   Note that this means that immediately accessing (potentially from a
-     *   different process) a buffer used in this call will not result in
-     *   screen corruption, the driver must apply proper synchronization or
-     *   scheduling (eg: block the caller, such as gralloc_module_t::lock(),
-     *   OpenGL ES, Camera, Codecs, etc..., or schedule the caller's work
-     *   after the buffer is freed from the actual composition).
-     *
-     * a NULL list parameter or a numHwLayers of zero indicates that the
-     * entire composition has been handled by SurfaceFlinger with OpenGL ES.
-     * In this case, (*set)() behaves just like eglSwapBuffers().
-     *
-     * dpy, sur, and list are set to NULL to indicate that the screen is
-     * turning off. This happens WITHOUT prepare() being called first.
-     * This is a good time to free h/w resources and/or power
-     * the relevant h/w blocks down.
+     * The numDisplays parameter will always be greater than zero, displays
+     * will be non-NULL, and the array entries will be non-NULL.
      *
      * IMPORTANT NOTE: there is an implicit layer containing opaque black
-     * pixels behind all the layers in the list.
-     * It is the responsibility of the hwcomposer module to make
-     * sure black pixels are output (or blended from).
+     * pixels behind all the layers in the list. It is the responsibility of
+     * the hwcomposer module to make sure black pixels are output (or blended
+     * from).
      *
      * returns: 0 on success. An negative error code on error:
      *    HWC_EGL_ERROR: eglGetError() will provide the proper error code
      *    Another code for non EGL errors.
-     *
      */
-    int (*set)(struct hwc_composer_device *dev,
-                hwc_display_t dpy,
-                hwc_surface_t sur,
-                hwc_layer_list_t* list);
+    int (*set)(struct hwc_composer_device_1 *dev,
+                size_t numDisplays, hwc_display_contents_1_t** displays);
+
     /*
      * This field is OPTIONAL and can be NULL.
      *
      * If non NULL it will be called by SurfaceFlinger on dumpsys
      */
-    void (*dump)(struct hwc_composer_device* dev, char *buff, int buff_len);
+    void (*dump)(struct hwc_composer_device_1* dev, char *buff, int buff_len);
 
     /*
      * This field is OPTIONAL and can be NULL.
@@ -333,18 +387,17 @@
      * Any of the callbacks can be NULL, in which case the corresponding
      * functionality is not supported.
      */
-    void (*registerProcs)(struct hwc_composer_device* dev,
+    void (*registerProcs)(struct hwc_composer_device_1* dev,
             hwc_procs_t const* procs);
 
     /*
      * This field is OPTIONAL and can be NULL.
-     * availability: HWC_DEVICE_API_VERSION_0_2
      *
      * Used to retrieve information about the h/w composer
      *
      * Returns 0 on success or -errno on error.
      */
-    int (*query)(struct hwc_composer_device* dev, int what, int* value);
+    int (*query)(struct hwc_composer_device_1* dev, int what, int* value);
 
     /*
      * Reserved for future use. Must be NULL.
@@ -352,29 +405,30 @@
     void* reserved_proc[4];
 
     /*
-     * This field is OPTIONAL and can be NULL.
-     * availability: HWC_DEVICE_API_VERSION_0_3
+     * This field is REQUIRED and must not be NULL.
      */
-    hwc_methods_t const *methods;
+    hwc_methods_1_t const *methods;
 
-} hwc_composer_device_t;
-
+} hwc_composer_device_1_t;
 
 /** convenience API for opening and closing a device */
 
-static inline int hwc_open(const struct hw_module_t* module,
-        hwc_composer_device_t** device) {
+static inline int hwc_open_1(const struct hw_module_t* module,
+        hwc_composer_device_1_t** device) {
     return module->methods->open(module,
             HWC_HARDWARE_COMPOSER, (struct hw_device_t**)device);
 }
 
-static inline int hwc_close(hwc_composer_device_t* device) {
+static inline int hwc_close_1(hwc_composer_device_1_t* device) {
     return device->common.close(&device->common);
 }
 
-
 /*****************************************************************************/
 
+#if !HWC_REMOVE_DEPRECATED_VERSIONS
+#include <hardware/hwcomposer_v0.h>
+#endif
+
 __END_DECLS
 
 #endif /* ANDROID_INCLUDE_HARDWARE_HWCOMPOSER_H */
diff --git a/include/hardware/hwcomposer_defs.h b/include/hardware/hwcomposer_defs.h
index 99465d3..02b8e50 100644
--- a/include/hardware/hwcomposer_defs.h
+++ b/include/hardware/hwcomposer_defs.h
@@ -33,7 +33,7 @@
 #define HWC_DEVICE_API_VERSION_0_1  HARDWARE_DEVICE_API_VERSION(0, 1)
 #define HWC_DEVICE_API_VERSION_0_2  HARDWARE_DEVICE_API_VERSION(0, 2)
 #define HWC_DEVICE_API_VERSION_0_3  HARDWARE_DEVICE_API_VERSION(0, 3)
-
+#define HWC_DEVICE_API_VERSION_1_0  HARDWARE_DEVICE_API_VERSION(1, 0)
 
 enum {
     /* hwc_composer_device_t::set failed in EGL */
diff --git a/include/hardware/hwcomposer_v0.h b/include/hardware/hwcomposer_v0.h
new file mode 100644
index 0000000..473819b
--- /dev/null
+++ b/include/hardware/hwcomposer_v0.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This header contains deprecated HWCv0 interface declarations. Don't include
+ * this header directly; it will be included by <hardware/hwcomposer.h> unless
+ * HWC_REMOVE_DEPRECATED_VERSIONS is defined to non-zero.
+ */
+#ifndef ANDROID_INCLUDE_HARDWARE_HWCOMPOSER_H
+#error "This header should only be included by hardware/hwcomposer.h"
+#endif
+
+#ifndef ANDROID_INCLUDE_HARDWARE_HWCOMPOSER_V0_H
+#define ANDROID_INCLUDE_HARDWARE_HWCOMPOSER_V0_H
+
+struct hwc_composer_device;
+
+/*
+ * availability: HWC_DEVICE_API_VERSION_0_3
+ *
+ * struct hwc_methods cannot be embedded in other structures as
+ * sizeof(struct hwc_methods) cannot be relied upon.
+ *
+ */
+typedef struct hwc_methods {
+
+    /*************************************************************************
+     * HWC_DEVICE_API_VERSION_0_3
+     *************************************************************************/
+
+    /*
+     * eventControl(..., event, enabled)
+     * Enables or disables h/w composer events.
+     *
+     * eventControl can be called from any thread and takes effect
+     * immediately.
+     *
+     *  Supported events are:
+     *      HWC_EVENT_VSYNC
+     *
+     * returns -EINVAL if the "event" parameter is not one of the value above
+     * or if the "enabled" parameter is not 0 or 1.
+     */
+
+    int (*eventControl)(
+            struct hwc_composer_device* dev, int event, int enabled);
+
+} hwc_methods_t;
+
+typedef struct hwc_layer {
+    /*
+     * initially set to HWC_FRAMEBUFFER or HWC_BACKGROUND.
+     * HWC_FRAMEBUFFER
+     *   indicates the layer will be drawn into the framebuffer
+     *   using OpenGL ES.
+     *   The HWC can toggle this value to HWC_OVERLAY, to indicate
+     *   it will handle the layer.
+     *
+     * HWC_BACKGROUND
+     *   indicates this is a special "background"  layer. The only valid
+     *   field is backgroundColor. HWC_BACKGROUND can only be used with
+     *   HWC_API_VERSION >= 0.2
+     *   The HWC can toggle this value to HWC_FRAMEBUFFER, to indicate
+     *   it CANNOT handle the background color
+     *
+     */
+    int32_t compositionType;
+
+    /* see hwc_layer_t::hints above */
+    uint32_t hints;
+
+    /* see hwc_layer_t::flags above */
+    uint32_t flags;
+
+    union {
+        /* color of the background.  hwc_color_t.a is ignored */
+        hwc_color_t backgroundColor;
+
+        struct {
+            /* handle of buffer to compose. This handle is guaranteed to have been
+             * allocated from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag. If
+             * the layer's handle is unchanged across two consecutive prepare calls and
+             * the HWC_GEOMETRY_CHANGED flag is not set for the second call then the
+             * HWComposer implementation may assume that the contents of the buffer have
+             * not changed. */
+            buffer_handle_t handle;
+
+            /* transformation to apply to the buffer during composition */
+            uint32_t transform;
+
+            /* blending to apply during composition */
+            int32_t blending;
+
+            /* area of the source to consider, the origin is the top-left corner of
+             * the buffer */
+            hwc_rect_t sourceCrop;
+
+            /* where to composite the sourceCrop onto the display. The sourceCrop
+             * is scaled using linear filtering to the displayFrame. The origin is the
+             * top-left corner of the screen.
+             */
+            hwc_rect_t displayFrame;
+
+            /* visible region in screen space. The origin is the
+             * top-left corner of the screen.
+             * The visible region INCLUDES areas overlapped by a translucent layer.
+             */
+            hwc_region_t visibleRegionScreen;
+        };
+    };
+} hwc_layer_t;
+
+/*
+ * List of layers.
+ * The handle members of hwLayers elements must be unique.
+ */
+typedef struct hwc_layer_list {
+    uint32_t flags;
+    size_t numHwLayers;
+    hwc_layer_t hwLayers[0];
+} hwc_layer_list_t;
+
+/*****************************************************************************/
+
+typedef struct hwc_composer_device {
+    struct hw_device_t common;
+
+    /*
+     * (*prepare)() is called for each frame before composition and is used by
+     * SurfaceFlinger to determine what composition steps the HWC can handle.
+     *
+     * (*prepare)() can be called more than once, the last call prevails.
+     *
+     * The HWC responds by setting the compositionType field to either
+     * HWC_FRAMEBUFFER or HWC_OVERLAY. In the former case, the composition for
+     * this layer is handled by SurfaceFlinger with OpenGL ES, in the later
+     * case, the HWC will have to handle this layer's composition.
+     *
+     * (*prepare)() is called with HWC_GEOMETRY_CHANGED to indicate that the
+     * list's geometry has changed, that is, when more than just the buffer's
+     * handles have been updated. Typically this happens (but is not limited to)
+     * when a window is added, removed, resized or moved.
+     *
+     * a NULL list parameter or a numHwLayers of zero indicates that the
+     * entire composition will be handled by SurfaceFlinger with OpenGL ES.
+     *
+     * returns: 0 on success. An negative error code on error. If an error is
+     * returned, SurfaceFlinger will assume that none of the layer will be
+     * handled by the HWC.
+     */
+    int (*prepare)(struct hwc_composer_device *dev, hwc_layer_list_t* list);
+
+    /*
+     * (*set)() is used in place of eglSwapBuffers(), and assumes the same
+     * functionality, except it also commits the work list atomically with
+     * the actual eglSwapBuffers().
+     *
+     * The list parameter is guaranteed to be the same as the one returned
+     * from the last call to (*prepare)().
+     *
+     * When this call returns the caller assumes that:
+     *
+     * - the display will be updated in the near future with the content
+     *   of the work list, without artifacts during the transition from the
+     *   previous frame.
+     *
+     * - all objects are available for immediate access or destruction, in
+     *   particular, hwc_region_t::rects data and hwc_layer_t::layer's buffer.
+     *   Note that this means that immediately accessing (potentially from a
+     *   different process) a buffer used in this call will not result in
+     *   screen corruption, the driver must apply proper synchronization or
+     *   scheduling (eg: block the caller, such as gralloc_module_t::lock(),
+     *   OpenGL ES, Camera, Codecs, etc..., or schedule the caller's work
+     *   after the buffer is freed from the actual composition).
+     *
+     * a NULL list parameter or a numHwLayers of zero indicates that the
+     * entire composition has been handled by SurfaceFlinger with OpenGL ES.
+     * In this case, (*set)() behaves just like eglSwapBuffers().
+     *
+     * dpy, sur, and list are set to NULL to indicate that the screen is
+     * turning off. This happens WITHOUT prepare() being called first.
+     * This is a good time to free h/w resources and/or power
+     * the relevant h/w blocks down.
+     *
+     * IMPORTANT NOTE: there is an implicit layer containing opaque black
+     * pixels behind all the layers in the list.
+     * It is the responsibility of the hwcomposer module to make
+     * sure black pixels are output (or blended from).
+     *
+     * returns: 0 on success. An negative error code on error:
+     *    HWC_EGL_ERROR: eglGetError() will provide the proper error code
+     *    Another code for non EGL errors.
+     *
+     */
+    int (*set)(struct hwc_composer_device *dev,
+                hwc_display_t dpy,
+                hwc_surface_t sur,
+                hwc_layer_list_t* list);
+
+    /*
+     * This field is OPTIONAL and can be NULL.
+     *
+     * If non NULL it will be called by SurfaceFlinger on dumpsys
+     */
+    void (*dump)(struct hwc_composer_device* dev, char *buff, int buff_len);
+
+    /*
+     * This field is OPTIONAL and can be NULL.
+     *
+     * (*registerProcs)() registers a set of callbacks the h/w composer HAL
+     * can later use. It is FORBIDDEN to call any of the callbacks from
+     * within registerProcs(). registerProcs() must save the hwc_procs_t pointer
+     * which is needed when calling a registered callback.
+     * Each call to registerProcs replaces the previous set of callbacks.
+     * registerProcs is called with NULL to unregister all callbacks.
+     *
+     * Any of the callbacks can be NULL, in which case the corresponding
+     * functionality is not supported.
+     */
+    void (*registerProcs)(struct hwc_composer_device* dev,
+            hwc_procs_t const* procs);
+
+    /*
+     * This field is OPTIONAL and can be NULL.
+     * availability: HWC_DEVICE_API_VERSION_0_2
+     *
+     * Used to retrieve information about the h/w composer
+     *
+     * Returns 0 on success or -errno on error.
+     */
+    int (*query)(struct hwc_composer_device* dev, int what, int* value);
+
+    /*
+     * Reserved for future use. Must be NULL.
+     */
+    void* reserved_proc[4];
+
+    /*
+     * This field is OPTIONAL and can be NULL.
+     * availability: HWC_DEVICE_API_VERSION_0_3
+     */
+    hwc_methods_t const *methods;
+
+} hwc_composer_device_t;
+
+/** convenience API for opening and closing a device */
+
+static inline int hwc_open(const struct hw_module_t* module,
+        hwc_composer_device_t** device) {
+    return module->methods->open(module,
+            HWC_HARDWARE_COMPOSER, (struct hw_device_t**)device);
+}
+
+static inline int hwc_close(hwc_composer_device_t* device) {
+    return device->common.close(&device->common);
+}
+
+/*****************************************************************************/
+
+#endif /* ANDROID_INCLUDE_HARDWARE_HWCOMPOSER_V0_H */
diff --git a/modules/audio/audio_hw.c b/modules/audio/audio_hw.c
index d860437..e4fb711 100644
--- a/modules/audio/audio_hw.c
+++ b/modules/audio/audio_hw.c
@@ -55,7 +55,7 @@
     return 4096;
 }
 
-static uint32_t out_get_channels(const struct audio_stream *stream)
+static audio_channel_mask_t out_get_channels(const struct audio_stream *stream)
 {
     return AUDIO_CHANNEL_OUT_STEREO;
 }
@@ -148,7 +148,7 @@
     return 320;
 }
 
-static uint32_t in_get_channels(const struct audio_stream *stream)
+static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
 {
     return AUDIO_CHANNEL_IN_MONO;
 }
@@ -287,8 +287,17 @@
     return -ENOSYS;
 }
 
-static int adev_get_master_volume(struct audio_hw_device *dev,
-                                  float *volume)
+static int adev_get_master_volume(struct audio_hw_device *dev, float *volume)
+{
+    return -ENOSYS;
+}
+
+static int adev_set_master_mute(struct audio_hw_device *dev, bool muted)
+{
+    return -ENOSYS;
+}
+
+static int adev_get_master_mute(struct audio_hw_device *dev, bool *muted)
 {
     return -ENOSYS;
 }
@@ -416,6 +425,8 @@
     adev->device.set_voice_volume = adev_set_voice_volume;
     adev->device.set_master_volume = adev_set_master_volume;
     adev->device.get_master_volume = adev_get_master_volume;
+    adev->device.set_master_mute = adev_set_master_mute;
+    adev->device.get_master_mute = adev_get_master_mute;
     adev->device.set_mode = adev_set_mode;
     adev->device.set_mic_mute = adev_set_mic_mute;
     adev->device.get_mic_mute = adev_get_mic_mute;
diff --git a/modules/audio/audio_policy.c b/modules/audio/audio_policy.c
index ee95e92..2dd3dbe 100644
--- a/modules/audio/audio_policy.c
+++ b/modules/audio/audio_policy.c
@@ -98,7 +98,7 @@
                                        audio_stream_type_t stream,
                                        uint32_t sampling_rate,
                                        audio_format_t format,
-                                       uint32_t channels,
+                                       audio_channel_mask_t channelMask,
                                        audio_output_flags_t flags)
 {
     return 0;
@@ -124,7 +124,7 @@
 static audio_io_handle_t ap_get_input(struct audio_policy *pol, audio_source_t inputSource,
                                       uint32_t sampling_rate,
                                       audio_format_t format,
-                                      uint32_t channels,
+                                      audio_channel_mask_t channelMask,
                                       audio_in_acoustics_t acoustics)
 {
     return 0;
@@ -193,13 +193,13 @@
 }
 
 static audio_io_handle_t ap_get_output_for_effect(struct audio_policy *pol,
-                                            struct effect_descriptor_s *desc)
+                                            const struct effect_descriptor_s *desc)
 {
     return 0;
 }
 
 static int ap_register_effect(struct audio_policy *pol,
-                              struct effect_descriptor_s *desc,
+                              const struct effect_descriptor_s *desc,
                               audio_io_handle_t output,
                               uint32_t strategy,
                               int session,
diff --git a/modules/gralloc/gralloc.cpp b/modules/gralloc/gralloc.cpp
index a6b4edd..99aeb01 100644
--- a/modules/gralloc/gralloc.cpp
+++ b/modules/gralloc/gralloc.cpp
@@ -219,6 +219,7 @@
         case HAL_PIXEL_FORMAT_RGB_565:
         case HAL_PIXEL_FORMAT_RGBA_5551:
         case HAL_PIXEL_FORMAT_RGBA_4444:
+        case HAL_PIXEL_FORMAT_RAW_SENSOR:
             bpp = 2;
             break;
         default:
diff --git a/modules/hwcomposer/hwcomposer.cpp b/modules/hwcomposer/hwcomposer.cpp
index 0e04cac..f0a5512 100644
--- a/modules/hwcomposer/hwcomposer.cpp
+++ b/modules/hwcomposer/hwcomposer.cpp
@@ -29,7 +29,7 @@
 /*****************************************************************************/
 
 struct hwc_context_t {
-    hwc_composer_device_t device;
+    hwc_composer_device_1_t device;
     /* our private state goes below here */
 };
 
@@ -54,7 +54,7 @@
 
 /*****************************************************************************/
 
-static void dump_layer(hwc_layer_t const* l) {
+static void dump_layer(hwc_layer_1_t const* l) {
     ALOGD("\ttype=%d, flags=%08x, handle=%p, tr=%02x, blend=%04x, {%d,%d,%d,%d}, {%d,%d,%d,%d}",
             l->compositionType, l->flags, l->handle, l->transform, l->blending,
             l->sourceCrop.left,
@@ -67,26 +67,26 @@
             l->displayFrame.bottom);
 }
 
-static int hwc_prepare(hwc_composer_device_t *dev, hwc_layer_list_t* list) {
-    if (list && (list->flags & HWC_GEOMETRY_CHANGED)) {
-        for (size_t i=0 ; i<list->numHwLayers ; i++) {
+static int hwc_prepare(hwc_composer_device_1_t *dev,
+        size_t numDisplays, hwc_display_contents_1_t** displays) {
+    if (displays && (displays[0]->flags & HWC_GEOMETRY_CHANGED)) {
+        for (size_t i=0 ; i<displays[0]->numHwLayers ; i++) {
             //dump_layer(&list->hwLayers[i]);
-            list->hwLayers[i].compositionType = HWC_FRAMEBUFFER;
+            displays[0]->hwLayers[i].compositionType = HWC_FRAMEBUFFER;
         }
     }
     return 0;
 }
 
-static int hwc_set(hwc_composer_device_t *dev,
-        hwc_display_t dpy,
-        hwc_surface_t sur,
-        hwc_layer_list_t* list)
+static int hwc_set(hwc_composer_device_1_t *dev,
+        size_t numDisplays, hwc_display_contents_1_t** displays)
 {
     //for (size_t i=0 ; i<list->numHwLayers ; i++) {
     //    dump_layer(&list->hwLayers[i]);
     //}
 
-    EGLBoolean sucess = eglSwapBuffers((EGLDisplay)dpy, (EGLSurface)sur);
+    EGLBoolean sucess = eglSwapBuffers((EGLDisplay)displays[0]->dpy,
+            (EGLSurface)displays[0]->sur);
     if (!sucess) {
         return HWC_EGL_ERROR;
     }
@@ -117,7 +117,7 @@
 
         /* initialize the procs */
         dev->device.common.tag = HARDWARE_DEVICE_TAG;
-        dev->device.common.version = 0;
+        dev->device.common.version = HWC_DEVICE_API_VERSION_1_0;
         dev->device.common.module = const_cast<hw_module_t*>(module);
         dev->device.common.close = hwc_device_close;
 
diff --git a/tests/camera2/Android.mk b/tests/camera2/Android.mk
index 340ec30..c378e12 100644
--- a/tests/camera2/Android.mk
+++ b/tests/camera2/Android.mk
@@ -2,13 +2,17 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-	camera2.cpp
+	camera2.cpp \
+	camera2_utils.cpp
 
 LOCAL_SHARED_LIBRARIES := \
 	libutils \
 	libstlport \
 	libhardware \
-	libcamera_metadata
+	libcamera_metadata \
+	libgui \
+	libsync \
+	libui
 
 LOCAL_STATIC_LIBRARIES := \
 	libgtest \
@@ -21,7 +25,7 @@
 	external/stlport/stlport \
 	system/media/camera/include \
 
-LOCAL_MODULE:= camera2_hal_tests
+LOCAL_MODULE:= camera2_test
 LOCAL_MODULE_TAGS := tests
 
 include $(BUILD_EXECUTABLE)
diff --git a/tests/camera2/camera2.cpp b/tests/camera2/camera2.cpp
index d13d7cd..05f61ef 100644
--- a/tests/camera2/camera2.cpp
+++ b/tests/camera2/camera2.cpp
@@ -14,10 +14,22 @@
  * limitations under the License.
  */
 
-#include <system/camera_metadata.h>
-#include <hardware/camera2.h>
+#define LOG_TAG "Camera2_test"
+#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
 #include <gtest/gtest.h>
 #include <iostream>
+#include <fstream>
+
+#include <utils/Vector.h>
+#include <gui/CpuConsumer.h>
+#include <ui/PixelFormat.h>
+#include <system/camera_metadata.h>
+
+#include "camera2_utils.h"
+
+namespace android {
 
 class Camera2Test: public testing::Test {
   public:
@@ -33,12 +45,16 @@
         ASSERT_TRUE(NULL != module)
                 << "No camera module was set by hw_get_module";
 
-        std::cout << "  Camera module name: " << module->name << std::endl;
-        std::cout << "  Camera module author: " << module->author << std::endl;
-        std::cout << "  Camera module API version: 0x" << std::hex
-                  << module->module_api_version << std::endl;
-        std::cout << "  Camera module HAL API version: 0x" << std::hex
-                  << module->hal_api_version << std::endl;
+        IF_ALOGV() {
+            std::cout << "  Camera module name: "
+                    << module->name << std::endl;
+            std::cout << "  Camera module author: "
+                    << module->author << std::endl;
+            std::cout << "  Camera module API version: 0x" << std::hex
+                    << module->module_api_version << std::endl;
+            std::cout << "  Camera module HAL API version: 0x" << std::hex
+                    << module->hal_api_version << std::endl;
+        }
 
         int16_t version2_0 = CAMERA_MODULE_API_VERSION_2_0;
         ASSERT_EQ(version2_0, module->module_api_version)
@@ -52,7 +68,10 @@
         sNumCameras = sCameraModule->get_number_of_cameras();
         ASSERT_LT(0, sNumCameras) << "No camera devices available!";
 
-        std::cout << "  Camera device count: " << sNumCameras << std::endl;
+        IF_ALOGV() {
+            std::cout << "  Camera device count: " << sNumCameras << std::endl;
+        }
+
         sCameraSupportsHal2 = new bool[sNumCameras];
 
         for (int i = 0; i < sNumCameras; i++) {
@@ -60,19 +79,24 @@
             res = sCameraModule->get_camera_info(i, &info);
             ASSERT_EQ(0, res)
                     << "Failure getting camera info for camera " << i;
-            std::cout << "  Camera device: " << std::dec
-                      << i << std::endl;;
-            std::cout << "    Facing: " << std::dec
-                      << info.facing  << std::endl;
-            std::cout << "    Orientation: " << std::dec
-                      << info.orientation  << std::endl;
-            std::cout << "    Version: 0x" << std::hex <<
-                    info.device_version  << std::endl;
+            IF_ALOGV() {
+                std::cout << "  Camera device: " << std::dec
+                          << i << std::endl;;
+                std::cout << "    Facing: " << std::dec
+                          << info.facing  << std::endl;
+                std::cout << "    Orientation: " << std::dec
+                          << info.orientation  << std::endl;
+                std::cout << "    Version: 0x" << std::hex <<
+                        info.device_version  << std::endl;
+            }
             if (info.device_version >= CAMERA_DEVICE_API_VERSION_2_0) {
                 sCameraSupportsHal2[i] = true;
                 ASSERT_TRUE(NULL != info.static_camera_characteristics);
-                std::cout << "    Static camera metadata:"  << std::endl;
-                dump_camera_metadata(info.static_camera_characteristics, 0, 1);
+                IF_ALOGV() {
+                    std::cout << "    Static camera metadata:"  << std::endl;
+                    dump_indented_camera_metadata(info.static_camera_characteristics,
+                            0, 1, 6);
+                }
             } else {
                 sCameraSupportsHal2[i] = false;
             }
@@ -83,13 +107,26 @@
         return sCameraModule;
     }
 
-    static const camera2_device_t *openCameraDevice(int id) {
+    static int getNumCameras() {
+        return sNumCameras;
+    }
+
+    static bool isHal2Supported(int id) {
+        return sCameraSupportsHal2[id];
+    }
+
+    static camera2_device_t *openCameraDevice(int id) {
+        ALOGV("Opening camera %d", id);
         if (NULL == sCameraSupportsHal2) return NULL;
         if (id >= sNumCameras) return NULL;
         if (!sCameraSupportsHal2[id]) return NULL;
 
         hw_device_t *device = NULL;
         const camera_module_t *cam_module = getCameraModule();
+        if (cam_module == NULL) {
+            return NULL;
+        }
+
         char camId[10];
         int res;
 
@@ -98,7 +135,7 @@
             (const hw_module_t*)cam_module,
             camId,
             &device);
-        if (res < 0 || cam_module == NULL) {
+        if (res != NO_ERROR || device == NULL) {
             return NULL;
         }
         camera2_device_t *cam_device =
@@ -106,18 +143,582 @@
         return cam_device;
     }
 
-  private:
+    static status_t configureCameraDevice(camera2_device_t *dev,
+            MetadataQueue &requestQueue,
+            MetadataQueue  &frameQueue,
+            NotifierListener &listener) {
 
+        status_t err;
+
+        err = dev->ops->set_request_queue_src_ops(dev,
+                requestQueue.getToConsumerInterface());
+        if (err != OK) return err;
+
+        requestQueue.setFromConsumerInterface(dev);
+
+        err = dev->ops->set_frame_queue_dst_ops(dev,
+                frameQueue.getToProducerInterface());
+        if (err != OK) return err;
+
+        err = listener.getNotificationsFrom(dev);
+        if (err != OK) return err;
+
+        vendor_tag_query_ops_t *vendor_metadata_tag_ops;
+        err = dev->ops->get_metadata_vendor_tag_ops(dev, &vendor_metadata_tag_ops);
+        if (err != OK) return err;
+
+        err = set_camera_metadata_vendor_tag_ops(vendor_metadata_tag_ops);
+        if (err != OK) return err;
+
+        return OK;
+    }
+
+    static status_t closeCameraDevice(camera2_device_t *cam_dev) {
+        int res;
+        ALOGV("Closing camera %p", cam_dev);
+
+        hw_device_t *dev = reinterpret_cast<hw_device_t *>(cam_dev);
+        res = dev->close(dev);
+        return res;
+    }
+
+    void setUpCamera(int id) {
+        ASSERT_GT(sNumCameras, id);
+        status_t res;
+
+        if (mDevice != NULL) {
+            closeCameraDevice(mDevice);
+        }
+        mDevice = openCameraDevice(id);
+        ASSERT_TRUE(NULL != mDevice) << "Failed to open camera device";
+
+        camera_info info;
+        res = sCameraModule->get_camera_info(id, &info);
+        ASSERT_EQ(OK, res);
+
+        mStaticInfo = info.static_camera_characteristics;
+
+        res = configureCameraDevice(mDevice,
+                mRequests,
+                mFrames,
+                mNotifications);
+        ASSERT_EQ(OK, res) << "Failure to configure camera device";
+
+    }
+
+    void setUpStream(sp<ISurfaceTexture> consumer,
+            int width, int height, int format, int *id) {
+        status_t res;
+
+        StreamAdapter* stream = new StreamAdapter(consumer);
+
+        ALOGV("Creating stream, format 0x%x, %d x %d", format, width, height);
+        res = stream->connectToDevice(mDevice, width, height, format);
+        ASSERT_EQ(NO_ERROR, res) << "Failed to connect to stream: "
+                                 << strerror(-res);
+        mStreams.push_back(stream);
+
+        *id = stream->getId();
+    }
+
+    void disconnectStream(int id) {
+        status_t res;
+        unsigned int i=0;
+        for (; i < mStreams.size(); i++) {
+            if (mStreams[i]->getId() == id) {
+                res = mStreams[i]->disconnect();
+                ASSERT_EQ(NO_ERROR, res) <<
+                        "Failed to disconnect stream " << id;
+                break;
+            }
+        }
+        ASSERT_GT(mStreams.size(), i) << "Stream id not found:" << id;
+    }
+
+    void getResolutionList(int32_t format,
+            int32_t **list,
+            size_t *count) {
+        ALOGV("Getting resolutions for format %x", format);
+        status_t res;
+        if (format != CAMERA2_HAL_PIXEL_FORMAT_OPAQUE) {
+            camera_metadata_entry_t availableFormats;
+            res = find_camera_metadata_entry(mStaticInfo,
+                    ANDROID_SCALER_AVAILABLE_FORMATS,
+                    &availableFormats);
+            ASSERT_EQ(OK, res);
+
+            uint32_t formatIdx;
+            for (formatIdx=0; formatIdx < availableFormats.count; formatIdx++) {
+                if (availableFormats.data.i32[formatIdx] == format) break;
+            }
+            ASSERT_NE(availableFormats.count, formatIdx)
+                << "No support found for format 0x" << std::hex << format;
+        }
+
+        camera_metadata_entry_t availableSizes;
+        if (format == HAL_PIXEL_FORMAT_RAW_SENSOR) {
+            res = find_camera_metadata_entry(mStaticInfo,
+                    ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+                    &availableSizes);
+        } else if (format == HAL_PIXEL_FORMAT_BLOB) {
+            res = find_camera_metadata_entry(mStaticInfo,
+                    ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+                    &availableSizes);
+        } else {
+            res = find_camera_metadata_entry(mStaticInfo,
+                    ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+                    &availableSizes);
+        }
+        ASSERT_EQ(OK, res);
+
+        *list = availableSizes.data.i32;
+        *count = availableSizes.count;
+    }
+
+    virtual void SetUp() {
+        const ::testing::TestInfo* const testInfo =
+                ::testing::UnitTest::GetInstance()->current_test_info();
+
+        ALOGV("*** Starting test %s in test case %s", testInfo->name(), testInfo->test_case_name());
+        mDevice = NULL;
+    }
+
+    virtual void TearDown() {
+        for (unsigned int i = 0; i < mStreams.size(); i++) {
+            delete mStreams[i];
+        }
+        if (mDevice != NULL) {
+            closeCameraDevice(mDevice);
+        }
+    }
+
+    camera2_device    *mDevice;
+    camera_metadata_t *mStaticInfo;
+
+    MetadataQueue    mRequests;
+    MetadataQueue    mFrames;
+    NotifierListener mNotifications;
+
+    Vector<StreamAdapter*> mStreams;
+
+  private:
     static camera_module_t *sCameraModule;
-    static int sNumCameras;
-    static bool *sCameraSupportsHal2;
+    static int              sNumCameras;
+    static bool            *sCameraSupportsHal2;
 };
 
 camera_module_t *Camera2Test::sCameraModule = NULL;
-int Camera2Test::sNumCameras = 0;
-bool *Camera2Test::sCameraSupportsHal2 = NULL;
+bool *Camera2Test::sCameraSupportsHal2      = NULL;
+int Camera2Test::sNumCameras                = 0;
+
+static const nsecs_t USEC = 1000;
+static const nsecs_t MSEC = 1000*USEC;
+static const nsecs_t SEC = 1000*MSEC;
 
 
-TEST_F(Camera2Test, Basic) {
-    ASSERT_TRUE(NULL != getCameraModule());
+TEST_F(Camera2Test, OpenClose) {
+    status_t res;
+
+    for (int id = 0; id < getNumCameras(); id++) {
+        if (!isHal2Supported(id)) continue;
+
+        camera2_device_t *d = openCameraDevice(id);
+        ASSERT_TRUE(NULL != d) << "Failed to open camera device";
+
+        res = closeCameraDevice(d);
+        ASSERT_EQ(NO_ERROR, res) << "Failed to close camera device";
+    }
 }
+
+TEST_F(Camera2Test, Capture1Raw) {
+    status_t res;
+
+    for (int id = 0; id < getNumCameras(); id++) {
+        if (!isHal2Supported(id)) continue;
+
+        ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
+
+        sp<CpuConsumer> rawConsumer = new CpuConsumer(1);
+        sp<FrameWaiter> rawWaiter = new FrameWaiter();
+        rawConsumer->setFrameAvailableListener(rawWaiter);
+
+        int32_t *rawResolutions;
+        size_t   rawResolutionsCount;
+
+        int format = HAL_PIXEL_FORMAT_RAW_SENSOR;
+
+        getResolutionList(format,
+                &rawResolutions, &rawResolutionsCount);
+        ASSERT_LT((size_t)0, rawResolutionsCount);
+
+        // Pick first available raw resolution
+        int width = rawResolutions[0];
+        int height = rawResolutions[1];
+
+        int streamId;
+        ASSERT_NO_FATAL_FAILURE(
+            setUpStream(rawConsumer->getProducerInterface(),
+                    width, height, format, &streamId) );
+
+        camera_metadata_t *request;
+        request = allocate_camera_metadata(20, 2000);
+
+        uint8_t metadataMode = ANDROID_REQUEST_METADATA_FULL;
+        add_camera_metadata_entry(request,
+                ANDROID_REQUEST_METADATA_MODE,
+                (void**)&metadataMode, 1);
+        uint32_t outputStreams = streamId;
+        add_camera_metadata_entry(request,
+                ANDROID_REQUEST_OUTPUT_STREAMS,
+                (void**)&outputStreams, 1);
+
+        uint64_t exposureTime = 10*MSEC;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_EXPOSURE_TIME,
+                (void**)&exposureTime, 1);
+        uint64_t frameDuration = 30*MSEC;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_FRAME_DURATION,
+                (void**)&frameDuration, 1);
+        uint32_t sensitivity = 100;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_SENSITIVITY,
+                (void**)&sensitivity, 1);
+
+        uint32_t hourOfDay = 12;
+        add_camera_metadata_entry(request,
+                0x80000000, // EMULATOR_HOUROFDAY
+                &hourOfDay, 1);
+
+        IF_ALOGV() {
+            std::cout << "Input request: " << std::endl;
+            dump_indented_camera_metadata(request, 0, 1, 2);
+        }
+
+        res = mRequests.enqueue(request);
+        ASSERT_EQ(NO_ERROR, res) << "Can't enqueue request: " << strerror(-res);
+
+        res = mFrames.waitForBuffer(exposureTime + SEC);
+        ASSERT_EQ(NO_ERROR, res) << "No frame to get: " << strerror(-res);
+
+        camera_metadata_t *frame;
+        res = mFrames.dequeue(&frame);
+        ASSERT_EQ(NO_ERROR, res);
+        ASSERT_TRUE(frame != NULL);
+
+        IF_ALOGV() {
+            std::cout << "Output frame:" << std::endl;
+            dump_indented_camera_metadata(frame, 0, 1, 2);
+        }
+
+        res = rawWaiter->waitForFrame(exposureTime + SEC);
+        ASSERT_EQ(NO_ERROR, res);
+
+        CpuConsumer::LockedBuffer buffer;
+        res = rawConsumer->lockNextBuffer(&buffer);
+        ASSERT_EQ(NO_ERROR, res);
+
+        IF_ALOGV() {
+            const char *dumpname =
+                    "/data/local/tmp/camera2_test-capture1raw-dump.raw";
+            ALOGV("Dumping raw buffer to %s", dumpname);
+            // Write to file
+            std::ofstream rawFile(dumpname);
+            size_t bpp = 2;
+            for (unsigned int y = 0; y < buffer.height; y++) {
+                rawFile.write(
+                        (const char *)(buffer.data + y * buffer.stride * bpp),
+                        buffer.width * bpp);
+            }
+            rawFile.close();
+        }
+
+        res = rawConsumer->unlockBuffer(buffer);
+        ASSERT_EQ(NO_ERROR, res);
+
+        ASSERT_NO_FATAL_FAILURE(disconnectStream(streamId));
+
+        res = closeCameraDevice(mDevice);
+        ASSERT_EQ(NO_ERROR, res) << "Failed to close camera device";
+
+    }
+}
+
+TEST_F(Camera2Test, CaptureBurstRaw) {
+    status_t res;
+
+    for (int id = 0; id < getNumCameras(); id++) {
+        if (!isHal2Supported(id)) continue;
+
+        ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
+
+        sp<CpuConsumer> rawConsumer = new CpuConsumer(1);
+        sp<FrameWaiter> rawWaiter = new FrameWaiter();
+        rawConsumer->setFrameAvailableListener(rawWaiter);
+
+        int32_t *rawResolutions;
+        size_t    rawResolutionsCount;
+
+        int format = HAL_PIXEL_FORMAT_RAW_SENSOR;
+
+        getResolutionList(format,
+                &rawResolutions, &rawResolutionsCount);
+        ASSERT_LT((uint32_t)0, rawResolutionsCount);
+
+        // Pick first available raw resolution
+        int width = rawResolutions[0];
+        int height = rawResolutions[1];
+
+        int streamId;
+        ASSERT_NO_FATAL_FAILURE(
+            setUpStream(rawConsumer->getProducerInterface(),
+                    width, height, format, &streamId) );
+
+        camera_metadata_t *request;
+        request = allocate_camera_metadata(20, 2000);
+
+        uint8_t metadataMode = ANDROID_REQUEST_METADATA_FULL;
+        add_camera_metadata_entry(request,
+                ANDROID_REQUEST_METADATA_MODE,
+                (void**)&metadataMode, 1);
+        uint32_t outputStreams = streamId;
+        add_camera_metadata_entry(request,
+                ANDROID_REQUEST_OUTPUT_STREAMS,
+                (void**)&outputStreams, 1);
+
+        uint64_t frameDuration = 30*MSEC;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_FRAME_DURATION,
+                (void**)&frameDuration, 1);
+        uint32_t sensitivity = 100;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_SENSITIVITY,
+                (void**)&sensitivity, 1);
+
+        uint32_t hourOfDay = 12;
+        add_camera_metadata_entry(request,
+                0x80000000, // EMULATOR_HOUROFDAY
+                &hourOfDay, 1);
+
+        IF_ALOGV() {
+            std::cout << "Input request template: " << std::endl;
+            dump_indented_camera_metadata(request, 0, 1, 2);
+        }
+
+        int numCaptures = 10;
+
+        // Enqueue numCaptures requests with increasing exposure time
+
+        uint64_t exposureTime = 100 * USEC;
+        for (int reqCount = 0; reqCount < numCaptures; reqCount++ ) {
+            camera_metadata_t *req;
+            req = allocate_camera_metadata(20, 2000);
+            append_camera_metadata(req, request);
+
+            add_camera_metadata_entry(req,
+                    ANDROID_SENSOR_EXPOSURE_TIME,
+                    (void**)&exposureTime, 1);
+            exposureTime *= 2;
+
+            res = mRequests.enqueue(req);
+            ASSERT_EQ(NO_ERROR, res) << "Can't enqueue request: "
+                    << strerror(-res);
+        }
+
+        // Get frames and image buffers one by one
+        uint64_t expectedExposureTime = 100 * USEC;
+        for (int frameCount = 0; frameCount < 10; frameCount++) {
+            res = mFrames.waitForBuffer(SEC + expectedExposureTime);
+            ASSERT_EQ(NO_ERROR, res) << "No frame to get: " << strerror(-res);
+
+            camera_metadata_t *frame;
+            res = mFrames.dequeue(&frame);
+            ASSERT_EQ(NO_ERROR, res);
+            ASSERT_TRUE(frame != NULL);
+
+            camera_metadata_entry_t frameNumber;
+            res = find_camera_metadata_entry(frame,
+                    ANDROID_REQUEST_FRAME_COUNT,
+                    &frameNumber);
+            ASSERT_EQ(NO_ERROR, res);
+            ASSERT_EQ(frameCount, *frameNumber.data.i32);
+
+            res = rawWaiter->waitForFrame(SEC + expectedExposureTime);
+            ASSERT_EQ(NO_ERROR, res) <<
+                    "Never got raw data for capture " << frameCount;
+
+            CpuConsumer::LockedBuffer buffer;
+            res = rawConsumer->lockNextBuffer(&buffer);
+            ASSERT_EQ(NO_ERROR, res);
+
+            IF_ALOGV() {
+                char dumpname[60];
+                snprintf(dumpname, 60,
+                        "/data/local/tmp/camera2_test-"
+                        "captureBurstRaw-dump_%d.raw",
+                        frameCount);
+                ALOGV("Dumping raw buffer to %s", dumpname);
+                // Write to file
+                std::ofstream rawFile(dumpname);
+                for (unsigned int y = 0; y < buffer.height; y++) {
+                    rawFile.write(
+                            (const char *)(buffer.data + y * buffer.stride * 2),
+                            buffer.width * 2);
+                }
+                rawFile.close();
+            }
+
+            res = rawConsumer->unlockBuffer(buffer);
+            ASSERT_EQ(NO_ERROR, res);
+
+            expectedExposureTime *= 2;
+        }
+    }
+}
+
+TEST_F(Camera2Test, ConstructDefaultRequests) {
+    status_t res;
+
+    for (int id = 0; id < getNumCameras(); id++) {
+        if (!isHal2Supported(id)) continue;
+
+        ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
+
+        for (int i = CAMERA2_TEMPLATE_PREVIEW; i < CAMERA2_TEMPLATE_COUNT;
+             i++) {
+            camera_metadata_t *request = NULL;
+            res = mDevice->ops->construct_default_request(mDevice,
+                    i,
+                    &request);
+            EXPECT_EQ(NO_ERROR, res) <<
+                    "Unable to construct request from template type %d", i;
+            EXPECT_TRUE(request != NULL);
+            EXPECT_LT((size_t)0, get_camera_metadata_entry_count(request));
+            EXPECT_LT((size_t)0, get_camera_metadata_data_count(request));
+
+            IF_ALOGV() {
+                std::cout << "  ** Template type " << i << ":"<<std::endl;
+                dump_indented_camera_metadata(request, 0, 2, 4);
+            }
+
+            free_camera_metadata(request);
+        }
+    }
+}
+
+TEST_F(Camera2Test, Capture1Jpeg) {
+    status_t res;
+
+    for (int id = 0; id < getNumCameras(); id++) {
+        if (!isHal2Supported(id)) continue;
+
+        ASSERT_NO_FATAL_FAILURE(setUpCamera(id));
+
+        sp<CpuConsumer> jpegConsumer = new CpuConsumer(1);
+        sp<FrameWaiter> jpegWaiter = new FrameWaiter();
+        jpegConsumer->setFrameAvailableListener(jpegWaiter);
+
+        int32_t *jpegResolutions;
+        size_t   jpegResolutionsCount;
+
+        int format = HAL_PIXEL_FORMAT_BLOB;
+
+        getResolutionList(format,
+                &jpegResolutions, &jpegResolutionsCount);
+        ASSERT_LT((size_t)0, jpegResolutionsCount);
+
+        // Pick first available JPEG resolution
+        int width = jpegResolutions[0];
+        int height = jpegResolutions[1];
+
+        int streamId;
+        ASSERT_NO_FATAL_FAILURE(
+            setUpStream(jpegConsumer->getProducerInterface(),
+                    width, height, format, &streamId) );
+
+        camera_metadata_t *request;
+        request = allocate_camera_metadata(20, 2000);
+
+        uint8_t metadataMode = ANDROID_REQUEST_METADATA_FULL;
+        add_camera_metadata_entry(request,
+                ANDROID_REQUEST_METADATA_MODE,
+                (void**)&metadataMode, 1);
+        uint32_t outputStreams = streamId;
+        add_camera_metadata_entry(request,
+                ANDROID_REQUEST_OUTPUT_STREAMS,
+                (void**)&outputStreams, 1);
+
+        uint64_t exposureTime = 10*MSEC;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_EXPOSURE_TIME,
+                (void**)&exposureTime, 1);
+        uint64_t frameDuration = 30*MSEC;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_FRAME_DURATION,
+                (void**)&frameDuration, 1);
+        uint32_t sensitivity = 100;
+        add_camera_metadata_entry(request,
+                ANDROID_SENSOR_SENSITIVITY,
+                (void**)&sensitivity, 1);
+
+        uint32_t hourOfDay = 12;
+        add_camera_metadata_entry(request,
+                0x80000000, // EMULATOR_HOUROFDAY
+                &hourOfDay, 1);
+
+        IF_ALOGV() {
+            std::cout << "Input request: " << std::endl;
+            dump_indented_camera_metadata(request, 0, 1, 4);
+        }
+
+        res = mRequests.enqueue(request);
+        ASSERT_EQ(NO_ERROR, res) << "Can't enqueue request: " << strerror(-res);
+
+        res = mFrames.waitForBuffer(exposureTime + SEC);
+        ASSERT_EQ(NO_ERROR, res) << "No frame to get: " << strerror(-res);
+
+        camera_metadata_t *frame;
+        res = mFrames.dequeue(&frame);
+        ASSERT_EQ(NO_ERROR, res);
+        ASSERT_TRUE(frame != NULL);
+
+        IF_ALOGV() {
+            std::cout << "Output frame:" << std::endl;
+            dump_indented_camera_metadata(frame, 0, 1, 4);
+        }
+
+        res = jpegWaiter->waitForFrame(exposureTime + SEC);
+        ASSERT_EQ(NO_ERROR, res);
+
+        CpuConsumer::LockedBuffer buffer;
+        res = jpegConsumer->lockNextBuffer(&buffer);
+        ASSERT_EQ(NO_ERROR, res);
+
+        IF_ALOGV() {
+            const char *dumpname =
+                    "/data/local/tmp/camera2_test-capture1jpeg-dump.jpeg";
+            ALOGV("Dumping raw buffer to %s", dumpname);
+            // Write to file
+            std::ofstream jpegFile(dumpname);
+            size_t bpp = 1;
+            for (unsigned int y = 0; y < buffer.height; y++) {
+                jpegFile.write(
+                        (const char *)(buffer.data + y * buffer.stride * bpp),
+                        buffer.width * bpp);
+            }
+            jpegFile.close();
+        }
+
+        res = jpegConsumer->unlockBuffer(buffer);
+        ASSERT_EQ(NO_ERROR, res);
+
+        ASSERT_NO_FATAL_FAILURE(disconnectStream(streamId));
+
+        res = closeCameraDevice(mDevice);
+        ASSERT_EQ(NO_ERROR, res) << "Failed to close camera device";
+
+    }
+}
+
+
+} // namespace android
diff --git a/tests/camera2/camera2_utils.cpp b/tests/camera2/camera2_utils.cpp
new file mode 100644
index 0000000..ba938d9
--- /dev/null
+++ b/tests/camera2/camera2_utils.cpp
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Utility classes for camera2 HAL testing
+
+#define LOG_TAG "Camera2_test_utils"
+#define LOG_NDEBUG 0
+
+#include "utils/Log.h"
+#include "camera2_utils.h"
+
+namespace android {
+
+/**
+ * MetadataQueue
+ */
+
+MetadataQueue::MetadataQueue():
+            mDevice(NULL),
+            mFrameCount(0),
+            mCount(0),
+            mStreamSlotCount(0),
+            mSignalConsumer(true)
+{
+    camera2_request_queue_src_ops::dequeue_request = consumer_dequeue;
+    camera2_request_queue_src_ops::request_count = consumer_buffer_count;
+    camera2_request_queue_src_ops::free_request = consumer_free;
+
+    camera2_frame_queue_dst_ops::dequeue_frame = producer_dequeue;
+    camera2_frame_queue_dst_ops::cancel_frame = producer_cancel;
+    camera2_frame_queue_dst_ops::enqueue_frame = producer_enqueue;
+}
+
+MetadataQueue::~MetadataQueue() {
+    freeBuffers(mEntries.begin(), mEntries.end());
+    freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
+}
+
+// Interface to camera2 HAL as consumer (input requests/reprocessing)
+const camera2_request_queue_src_ops_t* MetadataQueue::getToConsumerInterface() {
+    return static_cast<camera2_request_queue_src_ops_t*>(this);
+}
+
+void MetadataQueue::setFromConsumerInterface(camera2_device_t *d) {
+    mDevice = d;
+}
+
+const camera2_frame_queue_dst_ops_t* MetadataQueue::getToProducerInterface() {
+    return static_cast<camera2_frame_queue_dst_ops_t*>(this);
+}
+
+// Real interfaces
+status_t MetadataQueue::enqueue(camera_metadata_t *buf) {
+    Mutex::Autolock l(mMutex);
+
+    mCount++;
+    mEntries.push_back(buf);
+    notEmpty.signal();
+
+    if (mSignalConsumer && mDevice != NULL) {
+        mSignalConsumer = false;
+
+        mMutex.unlock();
+        ALOGV("%s: Signaling consumer", __FUNCTION__);
+        mDevice->ops->notify_request_queue_not_empty(mDevice);
+        mMutex.lock();
+    }
+    return OK;
+}
+
+int MetadataQueue::getBufferCount() {
+    Mutex::Autolock l(mMutex);
+    if (mStreamSlotCount > 0) {
+        return CAMERA2_REQUEST_QUEUE_IS_BOTTOMLESS;
+    }
+    return mCount;
+}
+
+status_t MetadataQueue::dequeue(camera_metadata_t **buf, bool incrementCount) {
+    Mutex::Autolock l(mMutex);
+
+    if (mCount == 0) {
+        if (mStreamSlotCount == 0) {
+            ALOGV("%s: Empty", __FUNCTION__);
+            *buf = NULL;
+            mSignalConsumer = true;
+            return OK;
+        }
+        ALOGV("%s: Streaming %d frames to queue", __FUNCTION__,
+              mStreamSlotCount);
+
+        for (List<camera_metadata_t*>::iterator slotEntry = mStreamSlot.begin();
+                slotEntry != mStreamSlot.end();
+                slotEntry++ ) {
+            size_t entries = get_camera_metadata_entry_count(*slotEntry);
+            size_t dataBytes = get_camera_metadata_data_count(*slotEntry);
+
+            camera_metadata_t *copy = allocate_camera_metadata(entries, dataBytes);
+            append_camera_metadata(copy, *slotEntry);
+            mEntries.push_back(copy);
+        }
+        mCount = mStreamSlotCount;
+    }
+    ALOGV("MetadataQueue: deque (%d buffers)", mCount);
+    camera_metadata_t *b = *(mEntries.begin());
+    mEntries.erase(mEntries.begin());
+
+    if (incrementCount) {
+        add_camera_metadata_entry(b,
+                ANDROID_REQUEST_FRAME_COUNT,
+                (void**)&mFrameCount, 1);
+        mFrameCount++;
+    }
+
+    *buf = b;
+    mCount--;
+
+    return OK;
+}
+
+status_t MetadataQueue::waitForBuffer(nsecs_t timeout) {
+    Mutex::Autolock l(mMutex);
+    status_t res;
+    while (mCount == 0) {
+        res = notEmpty.waitRelative(mMutex,timeout);
+        if (res != OK) return res;
+    }
+    return OK;
+}
+
+status_t MetadataQueue::setStreamSlot(camera_metadata_t *buf) {
+    if (buf == NULL) {
+        freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
+        mStreamSlotCount = 0;
+        return OK;
+    }
+    if (mStreamSlotCount > 1) {
+        List<camera_metadata_t*>::iterator deleter = ++mStreamSlot.begin();
+        freeBuffers(++mStreamSlot.begin(), mStreamSlot.end());
+        mStreamSlotCount = 1;
+    }
+    if (mStreamSlotCount == 1) {
+        free_camera_metadata( *(mStreamSlot.begin()) );
+        *(mStreamSlot.begin()) = buf;
+    } else {
+        mStreamSlot.push_front(buf);
+        mStreamSlotCount = 1;
+    }
+    return OK;
+}
+
+status_t MetadataQueue::setStreamSlot(const List<camera_metadata_t*> &bufs) {
+    if (mStreamSlotCount > 0) {
+        freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
+    }
+    mStreamSlot = bufs;
+    mStreamSlotCount = mStreamSlot.size();
+
+    return OK;
+}
+
+status_t MetadataQueue::freeBuffers(List<camera_metadata_t*>::iterator start,
+                                    List<camera_metadata_t*>::iterator end) {
+    while (start != end) {
+        free_camera_metadata(*start);
+        start = mStreamSlot.erase(start);
+    }
+    return OK;
+}
+
+MetadataQueue* MetadataQueue::getInstance(
+        const camera2_request_queue_src_ops_t *q) {
+    const MetadataQueue* cmq = static_cast<const MetadataQueue*>(q);
+    return const_cast<MetadataQueue*>(cmq);
+}
+
+MetadataQueue* MetadataQueue::getInstance(
+        const camera2_frame_queue_dst_ops_t *q) {
+    const MetadataQueue* cmq = static_cast<const MetadataQueue*>(q);
+    return const_cast<MetadataQueue*>(cmq);
+}
+
+int MetadataQueue::consumer_buffer_count(
+        const camera2_request_queue_src_ops_t *q) {
+    MetadataQueue *queue = getInstance(q);
+    return queue->getBufferCount();
+}
+
+int MetadataQueue::consumer_dequeue(const camera2_request_queue_src_ops_t *q,
+        camera_metadata_t **buffer) {
+    MetadataQueue *queue = getInstance(q);
+    return queue->dequeue(buffer, true);
+}
+
+int MetadataQueue::consumer_free(const camera2_request_queue_src_ops_t *q,
+        camera_metadata_t *old_buffer) {
+    MetadataQueue *queue = getInstance(q);
+    free_camera_metadata(old_buffer);
+    return OK;
+}
+
+int MetadataQueue::producer_dequeue(const camera2_frame_queue_dst_ops_t *q,
+        size_t entries, size_t bytes,
+        camera_metadata_t **buffer) {
+    camera_metadata_t *new_buffer =
+            allocate_camera_metadata(entries, bytes);
+    if (new_buffer == NULL) return NO_MEMORY;
+    *buffer = new_buffer;
+        return OK;
+}
+
+int MetadataQueue::producer_cancel(const camera2_frame_queue_dst_ops_t *q,
+        camera_metadata_t *old_buffer) {
+    free_camera_metadata(old_buffer);
+    return OK;
+}
+
+int MetadataQueue::producer_enqueue(const camera2_frame_queue_dst_ops_t *q,
+        camera_metadata_t *filled_buffer) {
+    MetadataQueue *queue = getInstance(q);
+    return queue->enqueue(filled_buffer);
+}
+
+/**
+ * NotifierListener
+ */
+
+NotifierListener::NotifierListener() {
+}
+
+status_t NotifierListener::getNotificationsFrom(camera2_device *dev) {
+    if (!dev) return BAD_VALUE;
+    status_t err;
+    err = dev->ops->set_notify_callback(dev,
+            notify_callback_dispatch,
+            (void*)this);
+    return err;
+}
+
+status_t NotifierListener::getNextNotification(int32_t *msg_type,
+        int32_t *ext1,
+        int32_t *ext2,
+        int32_t *ext3) {
+    Mutex::Autolock l(mMutex);
+    if (mNotifications.size() == 0) return BAD_VALUE;
+    return getNextNotificationLocked(msg_type, ext1, ext2, ext3);
+}
+
+status_t NotifierListener::waitForNotification(int32_t *msg_type,
+        int32_t *ext1,
+        int32_t *ext2,
+        int32_t *ext3) {
+    Mutex::Autolock l(mMutex);
+    while (mNotifications.size() == 0) {
+        mNewNotification.wait(mMutex);
+    }
+    return getNextNotificationLocked(msg_type, ext1, ext2, ext3);
+}
+
+int NotifierListener::numNotifications() {
+    Mutex::Autolock l(mMutex);
+    return mNotifications.size();
+}
+
+status_t NotifierListener::getNextNotificationLocked(int32_t *msg_type,
+        int32_t *ext1,
+        int32_t *ext2,
+        int32_t *ext3) {
+    *msg_type = mNotifications.begin()->msg_type;
+    *ext1 = mNotifications.begin()->ext1;
+    *ext2 = mNotifications.begin()->ext2;
+    *ext3 = mNotifications.begin()->ext3;
+    mNotifications.erase(mNotifications.begin());
+    return OK;
+}
+
+void NotifierListener::onNotify(int32_t msg_type,
+        int32_t ext1,
+        int32_t ext2,
+        int32_t ext3) {
+    Mutex::Autolock l(mMutex);
+    mNotifications.push_back(Notification(msg_type, ext1, ext2, ext3));
+    mNewNotification.signal();
+}
+
+void NotifierListener::notify_callback_dispatch(int32_t msg_type,
+        int32_t ext1,
+        int32_t ext2,
+        int32_t ext3,
+        void *user) {
+    NotifierListener *me = reinterpret_cast<NotifierListener*>(user);
+    me->onNotify(msg_type, ext1, ext2, ext3);
+}
+
+/**
+ * StreamAdapter
+ */
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+    (type *)((char*)(ptr) - offsetof(type, member))
+#endif
+
+StreamAdapter::StreamAdapter(sp<ISurfaceTexture> consumer):
+        mState(UNINITIALIZED), mDevice(NULL),
+        mId(-1),
+        mWidth(0), mHeight(0), mFormatRequested(0)
+{
+    mConsumerInterface = new SurfaceTextureClient(consumer);
+    camera2_stream_ops::dequeue_buffer = dequeue_buffer;
+    camera2_stream_ops::enqueue_buffer = enqueue_buffer;
+    camera2_stream_ops::cancel_buffer = cancel_buffer;
+    camera2_stream_ops::set_crop = set_crop;
+}
+
+StreamAdapter::~StreamAdapter() {
+    disconnect();
+}
+
+status_t StreamAdapter::connectToDevice(camera2_device_t *d,
+        uint32_t width, uint32_t height, int format) {
+    if (mState != UNINITIALIZED) return INVALID_OPERATION;
+    if (d == NULL) {
+        ALOGE("%s: Null device passed to stream adapter", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    status_t res;
+
+    mWidth = width;
+    mHeight = height;
+    mFormatRequested = format;
+
+    // Allocate device-side stream interface
+
+    uint32_t id;
+    uint32_t formatActual;
+    uint32_t usage;
+    uint32_t maxBuffers = 2;
+    res = d->ops->allocate_stream(d,
+            mWidth, mHeight, mFormatRequested, getStreamOps(),
+            &id, &formatActual, &usage, &maxBuffers);
+    if (res != OK) {
+        ALOGE("%s: Device stream allocation failed: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        mState = UNINITIALIZED;
+        return res;
+    }
+    mDevice = d;
+
+    mId = id;
+    mFormat = formatActual;
+    mUsage = usage;
+    mMaxProducerBuffers = maxBuffers;
+
+    // Configure consumer-side ANativeWindow interface
+
+    res = native_window_api_connect(mConsumerInterface.get(),
+            NATIVE_WINDOW_API_CAMERA);
+    if (res != OK) {
+        ALOGE("%s: Unable to connect to native window for stream %d",
+                __FUNCTION__, mId);
+        mState = ALLOCATED;
+        return res;
+    }
+
+    res = native_window_set_usage(mConsumerInterface.get(), mUsage);
+    if (res != OK) {
+        ALOGE("%s: Unable to configure usage %08x for stream %d",
+                __FUNCTION__, mUsage, mId);
+        mState = CONNECTED;
+        return res;
+    }
+
+    res = native_window_set_buffers_geometry(mConsumerInterface.get(),
+            mWidth, mHeight, mFormat);
+    if (res != OK) {
+        ALOGE("%s: Unable to configure buffer geometry"
+                " %d x %d, format 0x%x for stream %d",
+                __FUNCTION__, mWidth, mHeight, mFormat, mId);
+        mState = CONNECTED;
+        return res;
+    }
+
+    int maxConsumerBuffers;
+    res = mConsumerInterface->query(mConsumerInterface.get(),
+            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+    if (res != OK) {
+        ALOGE("%s: Unable to query consumer undequeued"
+                " buffer count for stream %d", __FUNCTION__, mId);
+        mState = CONNECTED;
+        return res;
+    }
+    mMaxConsumerBuffers = maxConsumerBuffers;
+
+    ALOGV("%s: Producer wants %d buffers, consumer wants %d", __FUNCTION__,
+            mMaxProducerBuffers, mMaxConsumerBuffers);
+
+    int totalBuffers = mMaxConsumerBuffers + mMaxProducerBuffers;
+
+    res = native_window_set_buffer_count(mConsumerInterface.get(),
+            totalBuffers);
+    if (res != OK) {
+        ALOGE("%s: Unable to set buffer count for stream %d",
+                __FUNCTION__, mId);
+        mState = CONNECTED;
+        return res;
+    }
+
+    // Register allocated buffers with HAL device
+    buffer_handle_t *buffers = new buffer_handle_t[totalBuffers];
+    ANativeWindowBuffer **anwBuffers = new ANativeWindowBuffer*[totalBuffers];
+    int bufferIdx = 0;
+    for (; bufferIdx < totalBuffers; bufferIdx++) {
+        res = native_window_dequeue_buffer_and_wait(mConsumerInterface.get(),
+                &anwBuffers[bufferIdx]);
+        if (res != OK) {
+            ALOGE("%s: Unable to dequeue buffer %d for initial registration for"
+                    "stream %d", __FUNCTION__, bufferIdx, mId);
+            mState = CONNECTED;
+            goto cleanUpBuffers;
+        }
+        buffers[bufferIdx] = anwBuffers[bufferIdx]->handle;
+    }
+
+    res = mDevice->ops->register_stream_buffers(mDevice,
+            mId,
+            totalBuffers,
+            buffers);
+    if (res != OK) {
+        ALOGE("%s: Unable to register buffers with HAL device for stream %d",
+                __FUNCTION__, mId);
+        mState = CONNECTED;
+    } else {
+        mState = ACTIVE;
+    }
+
+cleanUpBuffers:
+    for (int i = 0; i < bufferIdx; i++) {
+        res = mConsumerInterface->cancelBuffer(mConsumerInterface.get(),
+                anwBuffers[i], -1);
+    }
+    delete anwBuffers;
+    delete buffers;
+
+    return res;
+}
+
+status_t StreamAdapter::disconnect() {
+    status_t res;
+    if (mState >= ALLOCATED) {
+        res = mDevice->ops->release_stream(mDevice, mId);
+        if (res != OK) {
+            ALOGE("%s: Unable to release stream %d",
+                    __FUNCTION__, mId);
+            return res;
+        }
+    }
+    if (mState >= CONNECTED) {
+        res = native_window_api_disconnect(mConsumerInterface.get(),
+                NATIVE_WINDOW_API_CAMERA);
+        if (res != OK) {
+            ALOGE("%s: Unable to disconnect stream %d from native window",
+                    __FUNCTION__, mId);
+            return res;
+        }
+    }
+    mId = -1;
+    mState = DISCONNECTED;
+    return OK;
+}
+
+int StreamAdapter::getId() {
+    return mId;
+}
+
+const camera2_stream_ops *StreamAdapter::getStreamOps() {
+    return static_cast<camera2_stream_ops *>(this);
+}
+
+ANativeWindow* StreamAdapter::toANW(const camera2_stream_ops_t *w) {
+    return static_cast<const StreamAdapter*>(w)->mConsumerInterface.get();
+}
+
+int StreamAdapter::dequeue_buffer(const camera2_stream_ops_t *w,
+        buffer_handle_t** buffer) {
+    int res;
+    int state = static_cast<const StreamAdapter*>(w)->mState;
+    if (state != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
+        return INVALID_OPERATION;
+    }
+
+    ANativeWindow *a = toANW(w);
+    ANativeWindowBuffer* anb;
+    res = native_window_dequeue_buffer_and_wait(a, &anb);
+    if (res != OK) return res;
+
+    *buffer = &(anb->handle);
+
+    return res;
+}
+
+int StreamAdapter::enqueue_buffer(const camera2_stream_ops_t* w,
+        int64_t timestamp,
+        buffer_handle_t* buffer) {
+    int state = static_cast<const StreamAdapter*>(w)->mState;
+    if (state != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
+        return INVALID_OPERATION;
+    }
+    ANativeWindow *a = toANW(w);
+    status_t err;
+    err = native_window_set_buffers_timestamp(a, timestamp);
+    if (err != OK) return err;
+    return a->queueBuffer(a,
+            container_of(buffer, ANativeWindowBuffer, handle), -1);
+}
+
+int StreamAdapter::cancel_buffer(const camera2_stream_ops_t* w,
+        buffer_handle_t* buffer) {
+    int state = static_cast<const StreamAdapter*>(w)->mState;
+    if (state != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
+        return INVALID_OPERATION;
+    }
+    ANativeWindow *a = toANW(w);
+    return a->cancelBuffer(a,
+            container_of(buffer, ANativeWindowBuffer, handle), -1);
+}
+
+int StreamAdapter::set_crop(const camera2_stream_ops_t* w,
+        int left, int top, int right, int bottom) {
+    int state = static_cast<const StreamAdapter*>(w)->mState;
+    if (state != ACTIVE) {
+        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
+        return INVALID_OPERATION;
+    }
+    ANativeWindow *a = toANW(w);
+    android_native_rect_t crop = { left, top, right, bottom };
+    return native_window_set_crop(a, &crop);
+}
+
+/**
+ * FrameWaiter
+ */
+
+FrameWaiter::FrameWaiter():
+        mPendingFrames(0) {
+}
+
+status_t FrameWaiter::waitForFrame(nsecs_t timeout) {
+    status_t res;
+    Mutex::Autolock lock(mMutex);
+    while (mPendingFrames == 0) {
+        res = mCondition.waitRelative(mMutex, timeout);
+        if (res != OK) return res;
+    }
+    mPendingFrames--;
+    return OK;
+}
+
+void FrameWaiter::onFrameAvailable() {
+    Mutex::Autolock lock(mMutex);
+    mPendingFrames++;
+    mCondition.signal();
+}
+
+} // namespace android
diff --git a/tests/camera2/camera2_utils.h b/tests/camera2/camera2_utils.h
new file mode 100644
index 0000000..2c9f801
--- /dev/null
+++ b/tests/camera2/camera2_utils.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Utility classes for camera2 HAL testing
+
+#include <system/camera_metadata.h>
+#include <hardware/camera2.h>
+
+#include <gui/SurfaceTextureClient.h>
+#include <gui/CpuConsumer.h>
+
+#include <utils/List.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+namespace android {
+
+/**
+ * Queue class for both sending requests to a camera2 device, and for receiving
+ * frames from a camera2 device.
+ */
+class MetadataQueue: public camera2_request_queue_src_ops_t,
+                    public camera2_frame_queue_dst_ops_t {
+  public:
+    MetadataQueue();
+    ~MetadataQueue();
+
+    // Interface to camera2 HAL device, either for requests (device is consumer)
+    // or for frames (device is producer)
+    const camera2_request_queue_src_ops_t*   getToConsumerInterface();
+    void setFromConsumerInterface(camera2_device_t *d);
+
+    const camera2_frame_queue_dst_ops_t* getToProducerInterface();
+
+    // Real interfaces. On enqueue, queue takes ownership of buffer pointer
+    // On dequeue, user takes ownership of buffer pointer.
+    status_t enqueue(camera_metadata_t *buf);
+    status_t dequeue(camera_metadata_t **buf, bool incrementCount = true);
+    int      getBufferCount();
+    status_t waitForBuffer(nsecs_t timeout);
+
+    // Set repeating buffer(s); if the queue is empty on a dequeue call, the
+    // queue copies the contents of the stream slot into the queue, and then
+    // dequeues the first new entry.
+    status_t setStreamSlot(camera_metadata_t *buf);
+    status_t setStreamSlot(const List<camera_metadata_t*> &bufs);
+
+  private:
+    status_t freeBuffers(List<camera_metadata_t*>::iterator start,
+                         List<camera_metadata_t*>::iterator end);
+
+    camera2_device_t *mDevice;
+
+    Mutex mMutex;
+    Condition notEmpty;
+
+    int mFrameCount;
+
+    int mCount;
+    List<camera_metadata_t*> mEntries;
+    int mStreamSlotCount;
+    List<camera_metadata_t*> mStreamSlot;
+
+    bool mSignalConsumer;
+
+    static MetadataQueue* getInstance(const camera2_frame_queue_dst_ops_t *q);
+    static MetadataQueue* getInstance(const camera2_request_queue_src_ops_t *q);
+
+    static int consumer_buffer_count(const camera2_request_queue_src_ops_t *q);
+
+    static int consumer_dequeue(const camera2_request_queue_src_ops_t *q,
+            camera_metadata_t **buffer);
+
+    static int consumer_free(const camera2_request_queue_src_ops_t *q,
+            camera_metadata_t *old_buffer);
+
+    static int producer_dequeue(const camera2_frame_queue_dst_ops_t *q,
+            size_t entries, size_t bytes,
+            camera_metadata_t **buffer);
+
+    static int producer_cancel(const camera2_frame_queue_dst_ops_t *q,
+            camera_metadata_t *old_buffer);
+
+    static int producer_enqueue(const camera2_frame_queue_dst_ops_t *q,
+            camera_metadata_t *filled_buffer);
+
+};
+
+/**
+ * Basic class to receive and queue up notifications from the camera device
+ */
+
+class NotifierListener {
+  public:
+
+    NotifierListener();
+
+    status_t getNotificationsFrom(camera2_device *dev);
+
+    status_t getNextNotification(int32_t *msg_type, int32_t *ext1,
+            int32_t *ext2, int32_t *ext3);
+
+    status_t waitForNotification(int32_t *msg_type, int32_t *ext1,
+            int32_t *ext2, int32_t *ext3);
+
+    int numNotifications();
+
+  private:
+
+    status_t getNextNotificationLocked(int32_t *msg_type,
+            int32_t *ext1, int32_t *ext2, int32_t *ext3);
+
+    struct Notification {
+        Notification(int32_t type, int32_t e1, int32_t e2, int32_t e3):
+                msg_type(type),
+                ext1(e1),
+                ext2(e2),
+                ext3(e3)
+        {}
+
+        int32_t msg_type;
+        int32_t ext1;
+        int32_t ext2;
+        int32_t ext3;
+    };
+
+    List<Notification> mNotifications;
+
+    Mutex mMutex;
+    Condition mNewNotification;
+
+    void onNotify(int32_t msg_type,
+            int32_t ext1,
+            int32_t ext2,
+            int32_t ext3);
+
+    static void notify_callback_dispatch(int32_t msg_type,
+            int32_t ext1,
+            int32_t ext2,
+            int32_t ext3,
+            void *user);
+
+};
+
+/**
+ * Adapter from an ISurfaceTexture interface to camera2 device stream ops.
+ * Also takes care of allocating/deallocating stream in device interface
+ */
+class StreamAdapter: public camera2_stream_ops {
+  public:
+    StreamAdapter(sp<ISurfaceTexture> consumer);
+
+    ~StreamAdapter();
+
+    status_t connectToDevice(camera2_device_t *d,
+            uint32_t width, uint32_t height, int format);
+
+    status_t disconnect();
+
+    // Get stream ID. Only valid after a successful connectToDevice call.
+    int      getId();
+
+  private:
+    enum {
+        ERROR = -1,
+        DISCONNECTED = 0,
+        UNINITIALIZED,
+        ALLOCATED,
+        CONNECTED,
+        ACTIVE
+    } mState;
+
+    sp<ANativeWindow> mConsumerInterface;
+    camera2_device_t *mDevice;
+
+    uint32_t mId;
+    uint32_t mWidth;
+    uint32_t mHeight;
+    uint32_t mFormat;
+    uint32_t mUsage;
+    uint32_t mMaxProducerBuffers;
+    uint32_t mMaxConsumerBuffers;
+
+    int mFormatRequested;
+
+    const camera2_stream_ops *getStreamOps();
+
+    static ANativeWindow* toANW(const camera2_stream_ops_t *w);
+
+    static int dequeue_buffer(const camera2_stream_ops_t *w,
+            buffer_handle_t** buffer);
+
+    static int enqueue_buffer(const camera2_stream_ops_t* w,
+            int64_t timestamp,
+            buffer_handle_t* buffer);
+
+    static int cancel_buffer(const camera2_stream_ops_t* w,
+            buffer_handle_t* buffer);
+
+    static int set_crop(const camera2_stream_ops_t* w,
+            int left, int top, int right, int bottom);
+
+};
+
+/**
+ * Simple class to wait on the CpuConsumer to have a frame available
+ */
+class FrameWaiter : public CpuConsumer::FrameAvailableListener {
+  public:
+    FrameWaiter();
+
+    /**
+     * Wait for max timeout nanoseconds for a new frame. Returns
+     * OK if a frame is available, TIMED_OUT if the timeout was reached.
+     */
+    status_t waitForFrame(nsecs_t timeout);
+
+    virtual void onFrameAvailable();
+
+    int mPendingFrames;
+    Mutex mMutex;
+    Condition mCondition;
+};
+
+}