Merge "Libhardware: HAL: Fingerprint: Add template collection progress hint; remove match confidence."
diff --git a/include/hardware/activity_recognition.h b/include/hardware/activity_recognition.h
index ecac856..6ae90b7 100644
--- a/include/hardware/activity_recognition.h
+++ b/include/hardware/activity_recognition.h
@@ -44,19 +44,19 @@
  */
 
 /* Reserved. get_supported_activities_list() should not return this activity. */
-#define DETECTED_ACTIVITY_RESERVED          (0)
+#define ACTIVITY_RESERVED          (0)
 
-#define DETECTED_ACTIVITY_IN_VEHICLE        (1)
+#define ACTIVITY_IN_VEHICLE        (1)
 
-#define DETECTED_ACTIVITY_ON_BICYCLE        (2)
+#define ACTIVITY_ON_BICYCLE        (2)
 
-#define DETECTED_ACTIVITY_WALKING           (3)
+#define ACTIVITY_WALKING           (3)
 
-#define DETECTED_ACTIVITY_RUNNING           (4)
+#define ACTIVITY_RUNNING           (4)
 
-#define DETECTED_ACTIVITY_STILL             (5)
+#define ACTIVITY_STILL             (5)
 
-#define DETECTED_ACTIVITY_TILTING           (6)
+#define ACTIVITY_TILTING           (6)
 
 /* Values for activity_event.event_types. */
 enum {
@@ -70,7 +70,7 @@
      *
      * A flush complete event should have the following parameters set.
      * activity_event_t.event_type = ACTIVITY_EVENT_TYPE_FLUSH_COMPLETE
-     * activity_event_t.detected_activity = DETECTED_ACTIVITY_RESERVED
+     * activity_event_t.activity = ACTIVITY_RESERVED
      * activity_event_t.timestamp = 0
      * activity_event_t.reserved = 0
      * See (*flush)() for more details.
@@ -86,14 +86,14 @@
 
 /*
  * Each event is a separate activity with event_type indicating whether this activity has started
- * or ended. Eg event: (event_type="enter", detected_activity="ON_FOOT", timestamp)
+ * or ended. Eg event: (event_type="enter", activity="ON_FOOT", timestamp)
  */
 typedef struct activity_event {
     /* One of the ACTIVITY_EVENT_TYPE_* constants defined above. */
     uint32_t event_type;
 
-    /* Detected Activity. One of DETECTED_ACTIVITY_TYPE_* constants defined above. */
-    int32_t detected_activity;
+    /* One of ACTIVITY_* constants defined above. */
+    uint32_t activity;
 
     /* Time at which the transition/event has occurred in nanoseconds using elapsedRealTimeNano. */
     int64_t timestamp;
@@ -113,7 +113,7 @@
 
     /*
      * List of all activities supported by this module. Each activity is represented as an integer.
-     * Each value in the list is one of the DETECTED_ACTIVITY_* constants defined above. Return
+     * Each value in the list is one of the ACTIVITY_* constants defined above. Return
      * value is the size of this list.
      */
     int (*get_supported_activities_list)(struct activity_recognition_module* module,
@@ -127,7 +127,7 @@
     // Memory allocated for the events can be reused after this method returns.
     //    events - Array of activity_event_t s that are reported.
     //    count  - size of the array.
-    void (*activity_callback)(const struct activity_recognition_device* dev,
+    void (*activity_callback)(const struct activity_recognition_callback_procs* procs,
             const activity_event_t* events, int count);
 } activity_recognition_callback_procs_t;
 
@@ -148,27 +148,31 @@
             const activity_recognition_callback_procs_t* callback);
 
     /*
-     * Activates and deactivates monitoring of activity transitions. Activities need not be reported
-     * as soon as they are detected. The detected activities are stored in a FIFO and reported in
-     * batches when the "max_batch_report_latency" expires or when the batch FIFO is full. The
-     * implementation should allow the AP to go into suspend mode while the activities are detected
-     * and stored in the batch FIFO. Whenever events need to be reported (like when the FIFO is full
-     * or when the max_batch_report_latency has expired for an activity, event pair), it should
-     * wake_up the AP so that no events are lost. Activities are stored as transitions and they are
-     * allowed to overlap with each other.
-     * detected_activity - The specific activity that needs to be monitored.
-     * event_type    - Specific transition of the activity that needs to be monitored.
-     * enabled       - Enable/Disable detection of an (detected_activity, event_type) pair. Each
-     *                 pair can be activated or deactivated independently of the other. The HAL
-     *                 implementation needs to keep track of which pairs are currently active
-     *                 and needs to detect only those activities.
-     * max_batch_report_latency - a transition can be delayed by at most
-     *                            “max_batch_report_latency” nanoseconds.
+     * Activates monitoring of activity transitions. Activities need not be reported as soon as they
+     * are detected. The detected activities are stored in a FIFO and reported in batches when the
+     * "max_batch_report_latency" expires or when the batch FIFO is full. The implementation should
+     * allow the AP to go into suspend mode while the activities are detected and stored in the
+     * batch FIFO. Whenever events need to be reported (like when the FIFO is full or when the
+     * max_batch_report_latency has expired for an activity, event pair), it should wake_up the AP
+     * so that no events are lost. Activities are stored as transitions and they are allowed to
+     * overlap with each other. Each (activity, event_type) pair can be activated or deactivated
+     * independently of the other. The HAL implementation needs to keep track of which pairs are
+     * currently active and needs to detect only those pairs.
+     *
+     * activity   - The specific activity that needs to be detected.
+     * event_type - Specific transition of the activity that needs to be detected.
+     * max_batch_report_latency_ns - a transition can be delayed by at most
+     *                               “max_batch_report_latency” nanoseconds.
      * Return 0 on success, negative errno code otherwise.
      */
-    int (*monitor_activity_event)(const struct activity_recognition_device* dev,
-            int32_t detected_activity, int32_t event_type, int64_t max_batch_report_latency_ns,
-            int32_t enabled);
+    int (*enable_activity_event)(const struct activity_recognition_device* dev,
+            uint32_t activity, uint32_t event_type, int64_t max_batch_report_latency_ns);
+
+    /*
+     * Disables detection of a specific (activity, event_type) pair.
+     */
+    int (*disable_activity_event)(const struct activity_recognition_device* dev,
+            uint32_t activity, uint32_t event_type);
 
     /*
      * Flush all the batch FIFOs. Report all the activities that were stored in the FIFO so far as
@@ -180,7 +184,7 @@
     int (*flush)(const struct activity_recognition_device* dev);
 
     // Must be set to NULL.
-    void (*reserved_procs[4])(void);
+    void (*reserved_procs[16 - 4])(void);
 } activity_recognition_device_t;
 
 static inline int activity_recognition_open(const hw_module_t* module,
diff --git a/modules/audio_remote_submix/audio_hw.cpp b/modules/audio_remote_submix/audio_hw.cpp
index 8014d18..03f079f 100644
--- a/modules/audio_remote_submix/audio_hw.cpp
+++ b/modules/audio_remote_submix/audio_hw.cpp
@@ -23,6 +23,7 @@
 #include <stdlib.h>
 #include <sys/param.h>
 #include <sys/time.h>
+#include <sys/limits.h>
 
 #include <cutils/log.h>
 #include <cutils/properties.h>
@@ -74,11 +75,16 @@
 // multiple input streams from this device.  If this option is enabled, each input stream returned
 // is *the same stream* which means that readers will race to read data from these streams.
 #define ENABLE_LEGACY_INPUT_OPEN     1
+// Whether channel conversion (16-bit signed PCM mono->stereo, stereo->mono) is enabled.
+#define ENABLE_CHANNEL_CONVERSION    1
 
 // Common limits macros.
 #ifndef min
 #define min(a, b) ((a) < (b) ? (a) : (b))
 #endif // min
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif // max
 
 // Set *result_variable_ptr to true if value_to_find is present in the array array_to_search,
 // otherwise set *result_variable_ptr to false.
@@ -103,6 +109,7 @@
     // channel bitfields are not equivalent.
     audio_channel_mask_t input_channel_mask;
     audio_channel_mask_t output_channel_mask;
+    size_t pipe_frame_size;  // Number of bytes in each audio frame in the pipe.
     size_t buffer_size_frames; // Size of the audio pipe in frames.
     // Maximum number of frames buffered by the input and output streams.
     size_t buffer_period_size_frames;
@@ -283,6 +290,7 @@
 static bool audio_config_compare(const audio_config * const input_config,
         const audio_config * const output_config)
 {
+#if !ENABLE_CHANNEL_CONVERSION
     const uint32_t input_channels = get_channel_count_from_mask(input_config->channel_mask);
     const uint32_t output_channels = get_channel_count_from_mask(output_config->channel_mask);
     if (input_channels != output_channels) {
@@ -290,6 +298,7 @@
               input_channels, output_channels);
         return false;
     }
+#endif // !ENABLE_CHANNEL_CONVERSION
     if (input_config->sample_rate != output_config->sample_rate) {
         ALOGE("audio_config_compare() sample rate mismatch %ul vs. %ul",
               input_config->sample_rate, output_config->sample_rate);
@@ -356,6 +365,11 @@
         device_config->buffer_size_frames = sink->maxFrames();
         device_config->buffer_period_size_frames = device_config->buffer_size_frames /
                 buffer_period_count;
+        if (in) device_config->pipe_frame_size = audio_stream_frame_size(&in->stream.common);
+        if (out) device_config->pipe_frame_size = audio_stream_frame_size(&out->stream.common);
+        SUBMIX_ALOGV("submix_audio_device_create_pipe(): pipe frame size %zd, pipe size %zd, "
+                     "period size %zd", device_config->pipe_frame_size,
+                     device_config->buffer_size_frames, device_config->buffer_period_size_frames);
     }
     pthread_mutex_unlock(&rsxadev->lock);
 }
@@ -454,6 +468,17 @@
     return true;
 }
 
+// Calculate the maximum size of the pipe buffer in frames for the specified stream.
+static size_t calculate_stream_pipe_size_in_frames(const struct audio_stream *stream,
+                                                   const struct submix_config *config,
+                                                   const size_t pipe_frames)
+{
+    const size_t stream_frame_size = audio_stream_frame_size(stream);
+    const size_t pipe_frame_size = config->pipe_frame_size;
+    const size_t max_frame_size = max(stream_frame_size, pipe_frame_size);
+    return (pipe_frames * config->pipe_frame_size) / max_frame_size;
+}
+
 /* audio HAL functions */
 
 static uint32_t out_get_sample_rate(const struct audio_stream *stream)
@@ -482,10 +507,12 @@
     const struct submix_stream_out * const out = audio_stream_get_submix_stream_out(
             const_cast<struct audio_stream *>(stream));
     const struct submix_config * const config = &out->dev->config;
-    const size_t buffer_size = config->buffer_period_size_frames * audio_stream_frame_size(stream);
+    const size_t buffer_size_frames = calculate_stream_pipe_size_in_frames(
+        stream, config, config->buffer_period_size_frames);
+    const size_t buffer_size_bytes = buffer_size_frames * audio_stream_frame_size(stream);
     SUBMIX_ALOGV("out_get_buffer_size() returns %zu bytes, %zu frames",
-                 buffer_size, config->buffer_period_size_frames);
-    return buffer_size;
+                 buffer_size_bytes, buffer_size_frames);
+    return buffer_size_bytes;
 }
 
 static audio_channel_mask_t out_get_channels(const struct audio_stream *stream)
@@ -577,9 +604,11 @@
     const struct submix_stream_out * const out = audio_stream_out_get_submix_stream_out(
             const_cast<struct audio_stream_out *>(stream));
     const struct submix_config * const config = &out->dev->config;
-    const uint32_t latency_ms = (config->buffer_size_frames * 1000) / config->common.sample_rate;
-    SUBMIX_ALOGV("out_get_latency() returns %u ms, size in frames %zu, sample rate %u", latency_ms,
-          config->buffer_size_frames, config->common.sample_rate);
+    const size_t buffer_size_frames = calculate_stream_pipe_size_in_frames(
+            &stream->common, config, config->buffer_size_frames);
+    const uint32_t latency_ms = (buffer_size_frames * 1000) / config->common.sample_rate;
+    SUBMIX_ALOGV("out_get_latency() returns %u ms, size in frames %zu, sample rate %u",
+                 latency_ms, buffer_size_frames, config->common.sample_rate);
     return latency_ms;
 }
 
@@ -624,6 +653,25 @@
         return 0;
     }
 
+    // If the write to the sink would block when no input stream is present, flush enough frames
+    // from the pipe to make space to write the most recent data.
+    {
+        const size_t availableToWrite = sink->availableToWrite();
+        sp<MonoPipeReader> source = rsxadev->rsxSource;
+        if (rsxadev->input == NULL && availableToWrite < frames) {
+            static uint8_t flush_buffer[64];
+            const size_t flushBufferSizeFrames = sizeof(flush_buffer) / frame_size;
+            size_t frames_to_flush_from_source = frames - availableToWrite;
+            SUBMIX_ALOGV("out_write(): flushing %d frames from the pipe to avoid blocking",
+                         frames_to_flush_from_source);
+            while (frames_to_flush_from_source) {
+                const size_t flush_size = min(frames_to_flush_from_source, flushBufferSizeFrames);
+                frames_to_flush_from_source -= flush_size;
+                source->read(flush_buffer, flush_size, AudioBufferProvider::kInvalidPTS);
+            }
+        }
+    }
+
     pthread_mutex_unlock(&rsxadev->lock);
 
     written_frames = sink->write(buffer, frames);
@@ -713,10 +761,13 @@
 {
     const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(
             const_cast<struct audio_stream*>(stream));
-    const size_t buffer_size = in->dev->config.buffer_period_size_frames *
-            audio_stream_frame_size(stream);
-    SUBMIX_ALOGV("in_get_buffer_size() returns %zu", buffer_size);
-    return buffer_size;
+    const struct submix_config * const config = &in->dev->config;
+    const size_t buffer_size_frames = calculate_stream_pipe_size_in_frames(
+        stream, config, config->buffer_period_size_frames);
+    const size_t buffer_size_bytes = buffer_size_frames * audio_stream_frame_size(stream);
+    SUBMIX_ALOGV("in_get_buffer_size() returns %zu bytes, %zu frames", buffer_size_bytes,
+                 buffer_size_frames);
+    return buffer_size_bytes;
 }
 
 static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
@@ -797,6 +848,7 @@
     ssize_t frames_read = -1977;
     struct submix_stream_in * const in = audio_stream_in_get_submix_stream_in(stream);
     struct submix_audio_device * const rsxadev = in->dev;
+    struct audio_config *format;
     const size_t frame_size = audio_stream_frame_size(&stream->common);
     const size_t frames_to_read = bytes / frame_size;
 
@@ -835,8 +887,65 @@
         // read the data from the pipe (it's non blocking)
         int attempts = 0;
         char* buff = (char*)buffer;
+#if ENABLE_CHANNEL_CONVERSION
+        // Determine whether channel conversion is required.
+        const uint32_t input_channels = get_channel_count_from_mask(
+            rsxadev->config.input_channel_mask);
+        const uint32_t output_channels = get_channel_count_from_mask(
+            rsxadev->config.output_channel_mask);
+        if (input_channels != output_channels) {
+            SUBMIX_ALOGV("in_read(): %d output channels will be converted to %d "
+                         "input channels", output_channels, input_channels);
+            // Only support 16-bit PCM channel conversion from mono to stereo or stereo to mono.
+            ALOG_ASSERT(rsxadev->config.common.format == AUDIO_FORMAT_PCM_16_BIT);
+            ALOG_ASSERT((input_channels == 1 && output_channels == 2) ||
+                        (input_channels == 2 && output_channels == 1));
+        }
+#endif // ENABLE_CHANNEL_CONVERSION
+
         while ((remaining_frames > 0) && (attempts < MAX_READ_ATTEMPTS)) {
-            frames_read = source->read(buff, remaining_frames, AudioBufferProvider::kInvalidPTS);
+            size_t read_frames = remaining_frames;
+#if ENABLE_CHANNEL_CONVERSION
+            if (output_channels == 1 && input_channels == 2) {
+                // Need to read half the requested frames since the converted output
+                // data will take twice the space (mono->stereo).
+                read_frames /= 2;
+            }
+#endif // ENABLE_CHANNEL_CONVERSION
+
+            SUBMIX_ALOGV("in_read(): frames available to read %zd", source->availableToRead());
+
+            frames_read = source->read(buff, read_frames, AudioBufferProvider::kInvalidPTS);
+
+            SUBMIX_ALOGV("in_read(): frames read %zd", frames_read);
+
+#if ENABLE_CHANNEL_CONVERSION
+            // Perform in-place channel conversion.
+            // NOTE: In the following "input stream" refers to the data returned by this function
+            // and "output stream" refers to the data read from the pipe.
+            if (input_channels != output_channels && frames_read > 0) {
+                int16_t *data = (int16_t*)buff;
+                if (output_channels == 2 && input_channels == 1) {
+                    // Offset into the output stream data in samples.
+                    ssize_t output_stream_offset = 0;
+                    for (ssize_t input_stream_frame = 0; input_stream_frame < frames_read;
+                         input_stream_frame++, output_stream_offset += 2) {
+                        // Average the content from both channels.
+                        data[input_stream_frame] = ((int32_t)data[output_stream_offset] +
+                                                    (int32_t)data[output_stream_offset + 1]) / 2;
+                    }
+                } else if (output_channels == 1 && input_channels == 2) {
+                    // Offset into the input stream data in samples.
+                    ssize_t input_stream_offset = (frames_read - 1) * 2;
+                    for (ssize_t output_stream_frame = frames_read - 1; output_stream_frame >= 0;
+                         output_stream_frame--, input_stream_offset -= 2) {
+                        const short sample = data[output_stream_frame];
+                        data[input_stream_offset] = sample;
+                        data[input_stream_offset + 1] = sample;
+                    }
+                }
+            }
+#endif // ENABLE_CHANNEL_CONVERSION
 
             if (frames_read > 0) {
                 remaining_frames -= frames_read;