Merge "Remove header symlinks"
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index 7c41b5e..e15e1a0 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -480,6 +480,7 @@
         case ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST:
         case ACAMERA_CONTROL_ENABLE_ZSL:
         case ACAMERA_CONTROL_BOKEH_MODE:
+        case ACAMERA_CONTROL_ZOOM_RATIO:
         case ACAMERA_EDGE_MODE:
         case ACAMERA_FLASH_MODE:
         case ACAMERA_HOT_PIXEL_MODE:
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 0652cbb..b4a907f 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -518,11 +518,22 @@
      * region and output only the intersection rectangle as the metering region in the result
      * metadata.  If the region is entirely outside the crop region, it will be ignored and
      * not reported in the result metadata.</p>
+     * <p>Starting from API level 30, the coordinate system of activeArraySize or
+     * preCorrectionActiveArraySize is used to represent post-zoomRatio field of view, not
+     * pre-zoom field of view. This means that the same aeRegions values at different
+     * ACAMERA_CONTROL_ZOOM_RATIO represent different parts of the scene. The aeRegions
+     * coordinates are relative to the activeArray/preCorrectionActiveArray representing the
+     * zoomed field of view. If ACAMERA_CONTROL_ZOOM_RATIO is set to 1.0 (default), the same
+     * aeRegions at different ACAMERA_SCALER_CROP_REGION still represent the same parts of the
+     * scene as they do before. See ACAMERA_CONTROL_ZOOM_RATIO for details. Whether to use
+     * activeArraySize or preCorrectionActiveArraySize still depends on distortion correction
+     * mode.</p>
      * <p>The data representation is <code>int[5 * area_count]</code>.
      * Every five elements represent a metering region of <code>(xmin, ymin, xmax, ymax, weight)</code>.
      * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
      * ymax.</p>
      *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -698,11 +709,22 @@
      * region and output only the intersection rectangle as the metering region in the result
      * metadata. If the region is entirely outside the crop region, it will be ignored and
      * not reported in the result metadata.</p>
+     * <p>Starting from API level 30, the coordinate system of activeArraySize or
+     * preCorrectionActiveArraySize is used to represent post-zoomRatio field of view, not
+     * pre-zoom field of view. This means that the same afRegions values at different
+     * ACAMERA_CONTROL_ZOOM_RATIO represent different parts of the scene. The afRegions
+     * coordinates are relative to the activeArray/preCorrectionActiveArray representing the
+     * zoomed field of view. If ACAMERA_CONTROL_ZOOM_RATIO is set to 1.0 (default), the same
+     * afRegions at different ACAMERA_SCALER_CROP_REGION still represent the same parts of the
+     * scene as they do before. See ACAMERA_CONTROL_ZOOM_RATIO for details. Whether to use
+     * activeArraySize or preCorrectionActiveArraySize still depends on distortion correction
+     * mode.</p>
      * <p>The data representation is <code>int[5 * area_count]</code>.
      * Every five elements represent a metering region of <code>(xmin, ymin, xmax, ymax, weight)</code>.
      * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
      * ymax.</p>
      *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -873,11 +895,22 @@
      * region and output only the intersection rectangle as the metering region in the result
      * metadata.  If the region is entirely outside the crop region, it will be ignored and
      * not reported in the result metadata.</p>
+     * <p>Starting from API level 30, the coordinate system of activeArraySize or
+     * preCorrectionActiveArraySize is used to represent post-zoomRatio field of view, not
+     * pre-zoom field of view. This means that the same awbRegions values at different
+     * ACAMERA_CONTROL_ZOOM_RATIO represent different parts of the scene. The awbRegions
+     * coordinates are relative to the activeArray/preCorrectionActiveArray representing the
+     * zoomed field of view. If ACAMERA_CONTROL_ZOOM_RATIO is set to 1.0 (default), the same
+     * awbRegions at different ACAMERA_SCALER_CROP_REGION still represent the same parts of
+     * the scene as they do before. See ACAMERA_CONTROL_ZOOM_RATIO for details. Whether to use
+     * activeArraySize or preCorrectionActiveArraySize still depends on distortion correction
+     * mode.</p>
      * <p>The data representation is <code>int[5 * area_count]</code>.
      * Every five elements represent a metering region of <code>(xmin, ymin, xmax, ymax, weight)</code>.
      * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
      * ymax.</p>
      *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -1735,8 +1768,10 @@
     ACAMERA_CONTROL_AF_SCENE_CHANGE =                           // byte (acamera_metadata_enum_android_control_af_scene_change_t)
             ACAMERA_CONTROL_START + 42,
     /**
-     * <p>The list of bokeh modes that are supported by this camera device, and each bokeh mode's
-     * maximum streaming (non-stall) size with bokeh effect.</p>
+     * <p>The list of bokeh modes for ACAMERA_CONTROL_BOKEH_MODE that are supported by this camera
+     * device, and each bokeh mode's maximum streaming (non-stall) size with bokeh effect.</p>
+     *
+     * @see ACAMERA_CONTROL_BOKEH_MODE
      *
      * <p>Type: int32[3*n]</p>
      *
@@ -1761,9 +1796,31 @@
      * application configures a stream with larger dimension, the stream may not have bokeh
      * effect applied.</p>
      */
-    ACAMERA_CONTROL_AVAILABLE_BOKEH_CAPABILITIES =              // int32[3*n]
+    ACAMERA_CONTROL_AVAILABLE_BOKEH_MAX_SIZES =                 // int32[3*n]
             ACAMERA_CONTROL_START + 43,
     /**
+     * <p>The ranges of supported zoom ratio for non-OFF ACAMERA_CONTROL_BOKEH_MODE.</p>
+     *
+     * @see ACAMERA_CONTROL_BOKEH_MODE
+     *
+     * <p>Type: float[2*n]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>When bokeh mode is enabled, the camera device may have limited range of zoom ratios
+     * compared to when bokeh mode is disabled. This tag lists the zoom ratio ranges for all
+     * supported non-OFF bokeh modes, in the same order as in
+     * ACAMERA_CONTROL_AVAILABLE_BOKEH_CAPABILITIES.</p>
+     * <p>Range [1.0, 1.0] means that no zoom (optical or digital) is supported.</p>
+     *
+     * @see ACAMERA_CONTROL_AVAILABLE_BOKEH_CAPABILITIES
+     */
+    ACAMERA_CONTROL_AVAILABLE_BOKEH_ZOOM_RATIO_RANGES =         // float[2*n]
+            ACAMERA_CONTROL_START + 44,
+    /**
      * <p>Whether bokeh mode is enabled for a particular capture request.</p>
      *
      * <p>Type: byte (acamera_metadata_enum_android_control_bokeh_mode_t)</p>
@@ -1804,7 +1861,71 @@
      * bokeh mode is off.</p>
      */
     ACAMERA_CONTROL_BOKEH_MODE =                                // byte (acamera_metadata_enum_android_control_bokeh_mode_t)
-            ACAMERA_CONTROL_START + 44,
+            ACAMERA_CONTROL_START + 45,
+    /**
+     * <p>Minimum and maximum zoom ratios supported by this camera device.</p>
+     *
+     * <p>Type: float[2]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>If the camera device supports zoom-out from 1x zoom, minZoom will be less than 1.0, and
+     * setting android.control.zoomRation to values less than 1.0 increases the camera's field
+     * of view.</p>
+     */
+    ACAMERA_CONTROL_ZOOM_RATIO_RANGE =                          // float[2]
+            ACAMERA_CONTROL_START + 46,
+    /**
+     * <p>The desired zoom ratio</p>
+     *
+     * <p>Type: float</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul></p>
+     *
+     * <p>Instead of using ACAMERA_SCALER_CROP_REGION with dual purposes of crop and zoom, the
+     * application can now choose to use this tag to specify the desired zoom level. The
+     * ACAMERA_SCALER_CROP_REGION can still be used to specify the horizontal or vertical
+     * crop to achieve aspect ratios different than the native camera sensor.</p>
+     * <p>By using this control, the application gains a simpler way to control zoom, which can
+     * be a combination of optical and digital zoom. More specifically, for a logical
+     * multi-camera with more than one focal length, using a floating point zoom ratio offers
+     * more zoom precision when a telephoto lens is used, as well as allowing zoom ratio of
+     * less than 1.0 to zoom out to a wide field of view.</p>
+     * <p>Note that the coordinate system of cropRegion, AE/AWB/AF regions, and faces now changes
+     * to the effective after-zoom field-of-view represented by rectangle of (0, 0,
+     * activeArrayWidth, activeArrayHeight).</p>
+     * <p>For example, if ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE is 4032*3024, and the preview stream
+     * is configured to the same 4:3 aspect ratio, the application can achieve 2.0x zoom in
+     * one of two ways:</p>
+     * <ul>
+     * <li>zoomRatio = 2.0, scaler.cropRegion = (0, 0, 4032, 3024)</li>
+     * <li>zoomRatio = 1.0 (default), scaler.cropRegion = (1008, 756, 3024, 2268)</li>
+     * </ul>
+     * <p>If the application intends to set aeRegions to be top-left quarter of the preview
+     * field-of-view, the ACAMERA_CONTROL_AE_REGIONS should be set to (0, 0, 2016, 1512) with
+     * zoomRatio set to 2.0. Alternatively, the application can set aeRegions to the equivalent
+     * region of (1008, 756, 2016, 1512) for zoomRatio of 1.0. If the application doesn't
+     * explicitly set ACAMERA_CONTROL_ZOOM_RATIO, its value defaults to 1.0.</p>
+     * <p>This coordinate system change isn't applicable to RAW capture and its related metadata
+     * such as intrinsicCalibration and lensShadingMap.</p>
+     * <p>One limitation of controlling zoom using zoomRatio is that the ACAMERA_SCALER_CROP_REGION
+     * must only be used for letterboxing or pillarboxing of the sensor active array, and no
+     * FREEFORM cropping can be used with ACAMERA_CONTROL_ZOOM_RATIO other than 1.0.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_REGIONS
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
+     * @see ACAMERA_SCALER_CROP_REGION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_CONTROL_ZOOM_RATIO =                                // float
+            ACAMERA_CONTROL_START + 47,
     ACAMERA_CONTROL_END,
 
     /**
@@ -3203,8 +3324,17 @@
      * for rounding and other hardware requirements; the final
      * crop region used will be included in the output capture
      * result.</p>
+     * <p>Starting from API level 30, it's strongly recommended to use ACAMERA_CONTROL_ZOOM_RATIO
+     * to take advantage of better support for zoom with logical multi-camera. The benefits
+     * include better precision with optical-digital zoom combination, and ability to do
+     * zoom-out from 1.0x. When using ACAMERA_CONTROL_ZOOM_RATIO for zoom, the crop region in
+     * the capture request must be either letterboxing or pillarboxing (but not both). The
+     * coordinate system is post-zoom, meaning that the activeArraySize or
+     * preCorrectionActiveArraySize covers the camera device's field of view "after" zoom.
+     * See ACAMERA_CONTROL_ZOOM_RATIO for details.</p>
      * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
      *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -3232,6 +3362,12 @@
      * <p>Crop regions that have a width or height that is smaller
      * than this ratio allows will be rounded up to the minimum
      * allowed size by the camera device.</p>
+     * <p>Starting from API level 30, when using ACAMERA_CONTROL_ZOOM_RATIO to zoom in or out,
+     * the application must use ACAMERA_CONTROL_ZOOM_RATIO_RANGE to query both the minimum and
+     * maximum zoom ratio.</p>
+     *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
+     * @see ACAMERA_CONTROL_ZOOM_RATIO_RANGE
      */
     ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM =                 // float
             ACAMERA_SCALER_START + 4,
@@ -3406,8 +3542,24 @@
      * <p>Camera devices that support FREEFORM cropping will support any crop region that
      * is inside of the active array. The camera device will apply the same crop region and
      * return the final used crop region in capture result metadata ACAMERA_SCALER_CROP_REGION.</p>
+     * <p>Starting from API level 30,</p>
+     * <ul>
+     * <li>If the camera device supports FREEFORM cropping, in order to do FREEFORM cropping, the
+     * application must set ACAMERA_CONTROL_ZOOM_RATIO to 1.0, and use ACAMERA_SCALER_CROP_REGION
+     * for zoom.</li>
+     * <li>To do CENTER_ONLY zoom, the application has below 2 options:<ol>
+     * <li>Set ACAMERA_CONTROL_ZOOM_RATIO to 1.0; adjust zoom by ACAMERA_SCALER_CROP_REGION.</li>
+     * <li>Adjust zoom by ACAMERA_CONTROL_ZOOM_RATIO; use ACAMERA_SCALER_CROP_REGION to crop
+     * the field of view vertically (letterboxing) or horizontally (pillarboxing), but not
+     * windowboxing.</li>
+     * </ol>
+     * </li>
+     * <li>Setting ACAMERA_CONTROL_ZOOM_RATIO to values different than 1.0 and
+     * ACAMERA_SCALER_CROP_REGION to be windowboxing at the same time is undefined behavior.</li>
+     * </ul>
      * <p>LEGACY capability devices will only support CENTER_ONLY cropping.</p>
      *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
      */
@@ -4707,9 +4859,20 @@
      * When the distortion correction mode is not OFF, the coordinate system follows
      * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
      * <code>(0, 0)</code> being the top-left pixel of the active array.</p>
-     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE == FULL</p>
+     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE == FULL.</p>
+     * <p>Starting from API level 30, the coordinate system of activeArraySize or
+     * preCorrectionActiveArraySize is used to represent post-zoomRatio field of view, not
+     * pre-zoomRatio field of view. This means that if the relative position of faces and
+     * the camera device doesn't change, when zooming in by increasing
+     * ACAMERA_CONTROL_ZOOM_RATIO, the face landmarks move farther away from the center of the
+     * activeArray or preCorrectionActiveArray. If ACAMERA_CONTROL_ZOOM_RATIO is set to 1.0
+     * (default), the face landmarks coordinates won't change as ACAMERA_SCALER_CROP_REGION
+     * changes. See ACAMERA_CONTROL_ZOOM_RATIO for details. Whether to use activeArraySize or
+     * preCorrectionActiveArraySize still depends on distortion correction mode.</p>
      *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_DISTORTION_CORRECTION_MODE
+     * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
@@ -4738,10 +4901,21 @@
      * When the distortion correction mode is not OFF, the coordinate system follows
      * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
      * <code>(0, 0)</code> being the top-left pixel of the active array.</p>
-     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE != OFF
-     * The data representation is <code>int[4]</code>, which maps to <code>(left, top, right, bottom)</code>.</p>
+     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE != OFF.</p>
+     * <p>Starting from API level 30, the coordinate system of activeArraySize or
+     * preCorrectionActiveArraySize is used to represent post-zoomRatio field of view, not
+     * pre-zoomRatio field of view. This means that if the relative position of faces and
+     * the camera device doesn't change, when zooming in by increasing
+     * ACAMERA_CONTROL_ZOOM_RATIO, the face rectangles grow larger and move farther away from
+     * the center of the activeArray or preCorrectionActiveArray. If ACAMERA_CONTROL_ZOOM_RATIO
+     * is set to 1.0 (default), the face rectangles won't change as ACAMERA_SCALER_CROP_REGION
+     * changes. See ACAMERA_CONTROL_ZOOM_RATIO for details. Whether to use activeArraySize or
+     * preCorrectionActiveArraySize still depends on distortion correction mode.</p>
+     * <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, right, bottom)</code>.</p>
      *
+     * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_DISTORTION_CORRECTION_MODE
+     * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index eb623f9..a194416 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -78,18 +78,37 @@
     },
     srcs: ["protos/DeviceFiles.proto"],
 }
+
 cc_binary {
     name: "android.hardware.drm@1.2-service.clearkey",
     defaults: ["clearkey_service_defaults"],
     srcs: ["service.cpp"],
     init_rc: ["android.hardware.drm@1.2-service.clearkey.rc"],
-    vintf_fragments: ["manifest_android.hardware.drm@1.3-service.clearkey.xml"],
+    vintf_fragments: ["manifest_android.hardware.drm@1.2-service.clearkey.xml"],
 }
+
 cc_binary {
     name: "android.hardware.drm@1.2-service-lazy.clearkey",
     overrides: ["android.hardware.drm@1.2-service.clearkey"],
     defaults: ["clearkey_service_defaults"],
     srcs: ["serviceLazy.cpp"],
     init_rc: ["android.hardware.drm@1.2-service-lazy.clearkey.rc"],
+    vintf_fragments: ["manifest_android.hardware.drm@1.2-service.clearkey.xml"],
+}
+
+cc_binary {
+    name: "android.hardware.drm@1.3-service.clearkey",
+    defaults: ["clearkey_service_defaults"],
+    srcs: ["service.cpp"],
+    init_rc: ["android.hardware.drm@1.3-service.clearkey.rc"],
+    vintf_fragments: ["manifest_android.hardware.drm@1.3-service.clearkey.xml"],
+}
+
+cc_binary {
+    name: "android.hardware.drm@1.3-service-lazy.clearkey",
+    overrides: ["android.hardware.drm@1.3-service.clearkey"],
+    defaults: ["clearkey_service_defaults"],
+    srcs: ["serviceLazy.cpp"],
+    init_rc: ["android.hardware.drm@1.3-service-lazy.clearkey.rc"],
     vintf_fragments: ["manifest_android.hardware.drm@1.3-service.clearkey.xml"],
 }
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
index cff4d74..9afd3d7 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
@@ -5,8 +5,6 @@
     interface android.hardware.drm@1.1::IDrmFactory clearkey
     interface android.hardware.drm@1.2::ICryptoFactory clearkey
     interface android.hardware.drm@1.2::IDrmFactory clearkey
-    interface android.hardware.drm@1.3::ICryptoFactory clearkey
-    interface android.hardware.drm@1.3::IDrmFactory clearkey
     disabled
     oneshot
     class hal
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
index dca232a..c1abe7f 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
@@ -5,8 +5,7 @@
     interface android.hardware.drm@1.1::IDrmFactory clearkey
     interface android.hardware.drm@1.2::ICryptoFactory clearkey
     interface android.hardware.drm@1.2::IDrmFactory clearkey
-    interface android.hardware.drm@1.3::ICryptoFactory clearkey
-    interface android.hardware.drm@1.3::IDrmFactory clearkey
+    disabled
     class hal
     user media
     group media mediadrm
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
new file mode 100644
index 0000000..1e0d431
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service-lazy.clearkey.rc
@@ -0,0 +1,16 @@
+service vendor.drm-clearkey-hal-1-3 /vendor/bin/hw/android.hardware.drm@1.3-service-lazy.clearkey
+    interface android.hardware.drm@1.0::ICryptoFactory clearkey
+    interface android.hardware.drm@1.0::IDrmFactory clearkey
+    interface android.hardware.drm@1.1::ICryptoFactory clearkey
+    interface android.hardware.drm@1.1::IDrmFactory clearkey
+    interface android.hardware.drm@1.2::ICryptoFactory clearkey
+    interface android.hardware.drm@1.2::IDrmFactory clearkey
+    interface android.hardware.drm@1.3::ICryptoFactory clearkey
+    interface android.hardware.drm@1.3::IDrmFactory clearkey
+    disabled
+    oneshot
+    class hal
+    user media
+    group media mediadrm
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
new file mode 100644
index 0000000..8130511
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.3-service.clearkey.rc
@@ -0,0 +1,14 @@
+service vendor.drm-clearkey-hal-1-3 /vendor/bin/hw/android.hardware.drm@1.3-service.clearkey
+    interface android.hardware.drm@1.0::ICryptoFactory clearkey
+    interface android.hardware.drm@1.0::IDrmFactory clearkey
+    interface android.hardware.drm@1.1::ICryptoFactory clearkey
+    interface android.hardware.drm@1.1::IDrmFactory clearkey
+    interface android.hardware.drm@1.2::ICryptoFactory clearkey
+    interface android.hardware.drm@1.2::IDrmFactory clearkey
+    interface android.hardware.drm@1.3::ICryptoFactory clearkey
+    interface android.hardware.drm@1.3::IDrmFactory clearkey
+    class hal
+    user media
+    group media mediadrm
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/drm/mediadrm/plugins/clearkey/hidl/manifest_android.hardware.drm@1.2-service.clearkey.xml b/drm/mediadrm/plugins/clearkey/hidl/manifest_android.hardware.drm@1.2-service.clearkey.xml
new file mode 100644
index 0000000..16cba11
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/manifest_android.hardware.drm@1.2-service.clearkey.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<manifest version="1.0" type="device">
+    <hal format="hidl">
+        <name>android.hardware.drm</name>
+        <transport>hwbinder</transport>
+        <fqname>@1.2::ICryptoFactory/clearkey</fqname>
+        <fqname>@1.2::IDrmFactory/clearkey</fqname>
+    </hal>
+</manifest>
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index 071bb74..7723940 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -68,6 +68,8 @@
         "libutils",
     ],
 
+    tidy: false, // b/146435095, clang-tidy segmentation fault
+
     cflags: [
         "-Werror",
         "-Wall",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 88267f7..9e0bc96 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -83,7 +83,8 @@
                 const Trex *trex,
                 off64_t firstMoofOffset,
                 const sp<ItemTable> &itemTable,
-                uint64_t elstShiftStartTicks);
+                uint64_t elstShiftStartTicks,
+                uint64_t elstInitialEmptyEditTicks);
     virtual status_t init();
 
     virtual media_status_t start();
@@ -151,6 +152,7 @@
     // Start offset from composition time to presentation time.
     // Support shift only for video tracks through mElstShiftStartTicks for now.
     uint64_t mElstShiftStartTicks;
+    uint64_t mElstInitialEmptyEditTicks;
 
     size_t parseNALSize(const uint8_t *data) const;
     status_t parseChunk(off64_t *offset);
@@ -484,12 +486,11 @@
         int64_t duration;
         int32_t samplerate;
         // Only for audio track.
-        if (track->has_elst && mHeaderTimescale != 0 &&
-                AMediaFormat_getInt64(track->meta, AMEDIAFORMAT_KEY_DURATION, &duration) &&
-                AMediaFormat_getInt32(track->meta, AMEDIAFORMAT_KEY_SAMPLE_RATE, &samplerate)) {
-
+        if (track->elst_needs_processing && mHeaderTimescale != 0 &&
+            AMediaFormat_getInt64(track->meta, AMEDIAFORMAT_KEY_DURATION, &duration) &&
+            AMediaFormat_getInt32(track->meta, AMEDIAFORMAT_KEY_SAMPLE_RATE, &samplerate)) {
             // Elst has to be processed only the first time this function is called.
-            track->has_elst = false;
+            track->elst_needs_processing = false;
 
             if (track->elst_segment_duration > INT64_MAX) {
                 return;
@@ -1098,10 +1099,11 @@
                             track_b->sampleTable = mLastTrack->sampleTable;
                             track_b->includes_expensive_metadata = mLastTrack->includes_expensive_metadata;
                             track_b->skipTrack = mLastTrack->skipTrack;
-                            track_b->has_elst = mLastTrack->has_elst;
+                            track_b->elst_needs_processing = mLastTrack->elst_needs_processing;
                             track_b->elst_media_time = mLastTrack->elst_media_time;
                             track_b->elst_segment_duration = mLastTrack->elst_segment_duration;
-                            track_b->elstShiftStartTicks = mLastTrack->elstShiftStartTicks;
+                            track_b->elst_shift_start_ticks = mLastTrack->elst_shift_start_ticks;
+                            track_b->elst_initial_empty_edit_ticks = mLastTrack->elst_initial_empty_edit_ticks;
                             track_b->subsample_encryption = mLastTrack->subsample_encryption;
 
                             track_b->mTx3gBuffer = mLastTrack->mTx3gBuffer;
@@ -1204,39 +1206,86 @@
                 return ERROR_IO;
             }
 
-            if (entry_count != 1) {
-                // we only support a single entry at the moment, for gapless playback
-                // or start offset
+            if (entry_count > 2) {
+                /* We support a single entry for gapless playback or negating offset for
+                 * reordering B frames, two entries (empty edit) for start offset at the moment.
+                 */
                 ALOGW("ignoring edit list with %d entries", entry_count);
             } else {
                 off64_t entriesoffset = data_offset + 8;
                 uint64_t segment_duration;
                 int64_t media_time;
-
-                if (version == 1) {
-                    if (!mDataSource->getUInt64(entriesoffset, &segment_duration) ||
-                            !mDataSource->getUInt64(entriesoffset + 8, (uint64_t*)&media_time)) {
-                        return ERROR_IO;
-                    }
-                } else if (version == 0) {
-                    uint32_t sd;
-                    int32_t mt;
-                    if (!mDataSource->getUInt32(entriesoffset, &sd) ||
+                uint64_t empty_edit_ticks = 0;
+                bool empty_edit_present = false;
+                for (int i = 0; i < entry_count; ++i) {
+                    switch (version) {
+                    case 0: {
+                        uint32_t sd;
+                        int32_t mt;
+                        if (!mDataSource->getUInt32(entriesoffset, &sd) ||
                             !mDataSource->getUInt32(entriesoffset + 4, (uint32_t*)&mt)) {
-                        return ERROR_IO;
+                            return ERROR_IO;
+                        }
+                        segment_duration = sd;
+                        media_time = mt;
+                        // 4(segment duration) + 4(media time) + 4(media rate)
+                        entriesoffset += 12;
+                        break;
                     }
-                    segment_duration = sd;
-                    media_time = mt;
-                } else {
-                    return ERROR_IO;
+                    case 1: {
+                        if (!mDataSource->getUInt64(entriesoffset, &segment_duration) ||
+                            !mDataSource->getUInt64(entriesoffset + 8, (uint64_t*)&media_time)) {
+                            return ERROR_IO;
+                        }
+                        // 8(segment duration) + 8(media time) + 4(media rate)
+                        entriesoffset += 20;
+                        break;
+                    }
+                    default:
+                        return ERROR_IO;
+                        break;
+                    }
+                    // Empty edit entry would have to be first entry.
+                    if (media_time == -1 && i == 0) {
+                        int64_t durationUs;
+                        if (AMediaFormat_getInt64(mFileMetaData, AMEDIAFORMAT_KEY_DURATION,
+                                                  &durationUs)) {
+                            empty_edit_ticks = segment_duration;
+                            ALOGV("initial empty edit ticks: %" PRIu64, empty_edit_ticks);
+                            empty_edit_present = true;
+                        }
+                    }
+                    // Process second entry only when the first entry was an empty edit entry.
+                    if (empty_edit_present && i == 1) {
+                        int64_t durationUs;
+                        if (AMediaFormat_getInt64(mLastTrack->meta, AMEDIAFORMAT_KEY_DURATION,
+                                                  &durationUs) &&
+                            mHeaderTimescale != 0) {
+                            // Support only segment_duration<=track_duration and media_time==0 case.
+                            uint64_t segmentDurationUs =
+                                    segment_duration * 1000000 / mHeaderTimescale;
+                            if (segmentDurationUs == 0 || segmentDurationUs > durationUs ||
+                                media_time != 0) {
+                                ALOGW("for now, unsupported second entry in empty edit list");
+                            }
+                        }
+                    }
                 }
-
                 // save these for later, because the elst atom might precede
                 // the atoms that actually gives us the duration and sample rate
                 // needed to calculate the padding and delay values
-                mLastTrack->has_elst = true;
-                mLastTrack->elst_media_time = media_time;
-                mLastTrack->elst_segment_duration = segment_duration;
+                mLastTrack->elst_needs_processing = true;
+                if (empty_edit_present) {
+                    /* In movie header timescale, and needs to be converted to media timescale once
+                     * we get that from a track's 'mdhd' atom, which at times come after 'elst'.
+                     */
+                    mLastTrack->elst_initial_empty_edit_ticks = empty_edit_ticks;
+                } else {
+                    mLastTrack->elst_media_time = media_time;
+                    mLastTrack->elst_segment_duration = segment_duration;
+                    ALOGV("segment_duration: %" PRIu64 " media_time: %" PRId64, segment_duration,
+                          media_time);
+                }
             }
             break;
         }
@@ -4275,15 +4324,28 @@
         }
     }
 
-    if (track->has_elst and !strncasecmp("video/", mime, 6) and track->elst_media_time > 0) {
-        track->elstShiftStartTicks = track->elst_media_time;
-        ALOGV("video track->elstShiftStartTicks :%" PRIu64, track->elstShiftStartTicks);
+    // media_time is in media timescale as are STTS/CTTS entries.
+    track->elst_shift_start_ticks = track->elst_media_time;
+    ALOGV("track->elst_shift_start_ticks :%" PRIu64, track->elst_shift_start_ticks);
+    if (mHeaderTimescale != 0) {
+        // Convert empty_edit_ticks from movie timescale to media timescale.
+        uint64_t elst_initial_empty_edit_ticks_mul = 0, elst_initial_empty_edit_ticks_add = 0;
+        if (__builtin_mul_overflow(track->elst_initial_empty_edit_ticks, track->timescale,
+                                   &elst_initial_empty_edit_ticks_mul) ||
+            __builtin_add_overflow(elst_initial_empty_edit_ticks_mul, (mHeaderTimescale / 2),
+                                   &elst_initial_empty_edit_ticks_add)) {
+            ALOGE("track->elst_initial_empty_edit_ticks overflow");
+            return nullptr;
+        }
+        track->elst_initial_empty_edit_ticks = elst_initial_empty_edit_ticks_add / mHeaderTimescale;
+        ALOGV("track->elst_initial_empty_edit_ticks :%" PRIu64,
+              track->elst_initial_empty_edit_ticks);
     }
 
-    MPEG4Source *source =  new MPEG4Source(
-            track->meta, mDataSource, track->timescale, track->sampleTable,
-            mSidxEntries, trex, mMoofOffset, itemTable,
-            track->elstShiftStartTicks);
+    MPEG4Source* source =
+            new MPEG4Source(track->meta, mDataSource, track->timescale, track->sampleTable,
+                            mSidxEntries, trex, mMoofOffset, itemTable,
+                            track->elst_shift_start_ticks, track->elst_initial_empty_edit_ticks);
     if (source->init() != OK) {
         delete source;
         return NULL;
@@ -4776,7 +4838,8 @@
         const Trex *trex,
         off64_t firstMoofOffset,
         const sp<ItemTable> &itemTable,
-        uint64_t elstShiftStartTicks)
+        uint64_t elstShiftStartTicks,
+        uint64_t elstInitialEmptyEditTicks)
     : mFormat(format),
       mDataSource(dataSource),
       mTimescale(timeScale),
@@ -4806,7 +4869,8 @@
       mSrcBuffer(NULL),
       mIsHeif(itemTable != NULL),
       mItemTable(itemTable),
-      mElstShiftStartTicks(elstShiftStartTicks) {
+      mElstShiftStartTicks(elstShiftStartTicks),
+      mElstInitialEmptyEditTicks(elstInitialEmptyEditTicks) {
 
     memset(&mTrackFragmentHeaderInfo, 0, sizeof(mTrackFragmentHeaderInfo));
 
@@ -4925,35 +4989,14 @@
     }
 
     CHECK(AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_TRACK_ID, &mTrackId));
-
 }
 
 status_t MPEG4Source::init() {
-    status_t err = OK;
-    const char *mime;
-    CHECK(AMediaFormat_getString(mFormat, AMEDIAFORMAT_KEY_MIME, &mime));
     if (mFirstMoofOffset != 0) {
         off64_t offset = mFirstMoofOffset;
-        err = parseChunk(&offset);
-        if(err == OK && !strncasecmp("video/", mime, 6)
-            && !mCurrentSamples.isEmpty()) {
-            // Start offset should be less or equal to composition time of first sample.
-            // ISO : sample_composition_time_offset, version 0 (unsigned) for major brands.
-            mElstShiftStartTicks = std::min(mElstShiftStartTicks,
-                                            (uint64_t)(*mCurrentSamples.begin()).compositionOffset);
-        }
-        return err;
+        return parseChunk(&offset);
     }
-
-    if (!strncasecmp("video/", mime, 6)) {
-        uint64_t firstSampleCTS = 0;
-        err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
-        // Start offset should be less or equal to composition time of first sample.
-        // Composition time stamp of first sample cannot be negative.
-        mElstShiftStartTicks = std::min(mElstShiftStartTicks, firstSampleCTS);
-    }
-
-    return err;
+    return OK;
 }
 
 MPEG4Source::~MPEG4Source() {
@@ -5801,6 +5844,7 @@
 
     int64_t seekTimeUs;
     ReadOptions::SeekMode mode;
+
     if (options && options->getSeekTo(&seekTimeUs, &mode)) {
 
         if (mIsHeif) {
@@ -5842,6 +5886,8 @@
             }
             if( mode != ReadOptions::SEEK_FRAME_INDEX) {
                 seekTimeUs += ((long double)mElstShiftStartTicks * 1000000) / mTimescale;
+                ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRIu64, seekTimeUs,
+                      mElstShiftStartTicks);
             }
 
             uint32_t sampleIndex;
@@ -5916,7 +5962,8 @@
 
     off64_t offset = 0;
     size_t size = 0;
-    uint64_t cts, stts;
+    int64_t cts;
+    uint64_t stts;
     bool isSyncSample;
     bool newBuffer = false;
     if (mBuffer == NULL) {
@@ -5924,14 +5971,15 @@
 
         status_t err;
         if (!mIsHeif) {
-            err = mSampleTable->getMetaDataForSample(
-                    mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
+            err = mSampleTable->getMetaDataForSample(mCurrentSampleIndex, &offset, &size,
+                                                    (uint64_t*)&cts, &isSyncSample, &stts);
             if(err == OK) {
-                /* Composition Time Stamp cannot be negative. Some files have video Sample
-                * Time(STTS)delta with zero value(b/117402420).  Hence subtract only
-                * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
-                */
-                cts -= std::min(cts, mElstShiftStartTicks);
+                if (mElstInitialEmptyEditTicks > 0) {
+                    cts += mElstInitialEmptyEditTicks;
+                } else {
+                    // cts can be negative. for example, initial audio samples for gapless playback.
+                    cts -= (int64_t)mElstShiftStartTicks;
+                }
             }
 
         } else {
@@ -6272,7 +6320,7 @@
 
     off64_t offset = 0;
     size_t size = 0;
-    uint64_t cts = 0;
+    int64_t cts = 0;
     bool isSyncSample = false;
     bool newBuffer = false;
     if (mBuffer == NULL || mCurrentSampleIndex >= mCurrentSamples.size()) {
@@ -6304,11 +6352,13 @@
         offset = smpl->offset;
         size = smpl->size;
         cts = mCurrentTime + smpl->compositionOffset;
-        /* Composition Time Stamp cannot be negative. Some files have video Sample
-        * Time(STTS)delta with zero value(b/117402420).  Hence subtract only
-        * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
-        */
-        cts -= std::min(cts, mElstShiftStartTicks);
+
+        if (mElstInitialEmptyEditTicks > 0) {
+            cts += mElstInitialEmptyEditTicks;
+        } else {
+            // cts can be negative. for example, initial audio samples for gapless playback.
+            cts -= (int64_t)mElstShiftStartTicks;
+        }
 
         mCurrentTime += smpl->duration;
         isSyncSample = (mCurrentSampleIndex == 0);
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index fcddbb8..53ec6bc 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -82,14 +82,16 @@
         sp<SampleTable> sampleTable;
         bool includes_expensive_metadata;
         bool skipTrack;
-        bool has_elst;
+        bool elst_needs_processing;
         /* signed int, ISO Spec allows media_time = -1 for other use cases.
          * but we don't support empty edits for now.
          */
         int64_t elst_media_time;
         uint64_t elst_segment_duration;
-        // unsigned int, shift start offset only when media_time > 0.
-        uint64_t elstShiftStartTicks;
+        // Shift start offset only when media_time > 0.
+        uint64_t elst_shift_start_ticks;
+        // Initial start offset, empty edit list entry.
+        uint64_t elst_initial_empty_edit_ticks;
         bool subsample_encryption;
 
         uint8_t *mTx3gBuffer;
@@ -102,9 +104,11 @@
             timescale = 0;
             includes_expensive_metadata = false;
             skipTrack = false;
-            has_elst = false;
+            elst_needs_processing = false;
             elst_media_time = 0;
-            elstShiftStartTicks = 0;
+            elst_segment_duration = 0;
+            elst_shift_start_ticks = 0;
+            elst_initial_empty_edit_ticks = 0;
             subsample_encryption = false;
             mTx3gBuffer = NULL;
             mTx3gSize = mTx3gFilled = 0;
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
deleted file mode 100644
index 8eb70b1..0000000
--- a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
+++ /dev/null
@@ -1,1114 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tools for measuring latency and for detecting glitches.
- * These classes are pure math and can be used with any audio system.
- */
-
-#ifndef AAUDIO_EXAMPLES_LOOPBACK_ANALYSER_H
-#define AAUDIO_EXAMPLES_LOOPBACK_ANALYSER_H
-
-#include <algorithm>
-#include <assert.h>
-#include <cctype>
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <audio_utils/sndfile.h>
-
-// Tag for machine readable results as property = value pairs
-#define LOOPBACK_RESULT_TAG      "RESULT: "
-
-constexpr int32_t kDefaultSampleRate = 48000;
-constexpr int32_t kMillisPerSecond   = 1000;
-constexpr int32_t kMinLatencyMillis  = 4;    // arbitrary and very low
-constexpr int32_t kMaxLatencyMillis  = 400;  // arbitrary and generous
-constexpr double  kMaxEchoGain       = 10.0; // based on experiments, otherwise too noisy
-constexpr double  kMinimumConfidence = 0.5;
-
-static void printAudioScope(float sample) {
-    const int maxStars = 80; // arbitrary, fits on one line
-    char c = '*';
-    if (sample < -1.0) {
-        sample = -1.0;
-        c = '$';
-    } else if (sample > 1.0) {
-        sample = 1.0;
-        c = '$';
-    }
-    int numSpaces = (int) (((sample + 1.0) * 0.5) * maxStars);
-    for (int i = 0; i < numSpaces; i++) {
-        putchar(' ');
-    }
-    printf("%c\n", c);
-}
-
-/*
-
-FIR filter designed with
-http://t-filter.appspot.com
-
-sampling frequency: 48000 Hz
-
-* 0 Hz - 8000 Hz
-  gain = 1.2
-  desired ripple = 5 dB
-  actual ripple = 5.595266169703693 dB
-
-* 12000 Hz - 20000 Hz
-  gain = 0
-  desired attenuation = -40 dB
-  actual attenuation = -37.58691566571914 dB
-
-*/
-
-#define FILTER_TAP_NUM 11
-
-static const float sFilterTaps8000[FILTER_TAP_NUM] = {
-        -0.05944219353343189f,
-        -0.07303434839503208f,
-        -0.037690487672689066f,
-        0.1870480506596512f,
-        0.3910337357836833f,
-        0.5333672385425637f,
-        0.3910337357836833f,
-        0.1870480506596512f,
-        -0.037690487672689066f,
-        -0.07303434839503208f,
-        -0.05944219353343189f
-};
-
-class LowPassFilter {
-public:
-
-    /*
-     * Filter one input sample.
-     * @return filtered output
-     */
-    float filter(float input) {
-        float output = 0.0f;
-        mX[mCursor] = input;
-        // Index backwards over x.
-        int xIndex = mCursor + FILTER_TAP_NUM;
-        // Write twice so we avoid having to wrap in the middle of the convolution.
-        mX[xIndex] = input;
-        for (int i = 0; i < FILTER_TAP_NUM; i++) {
-            output += sFilterTaps8000[i] * mX[xIndex--];
-        }
-        if (++mCursor >= FILTER_TAP_NUM) {
-            mCursor = 0;
-        }
-        return output;
-    }
-
-    /**
-     * @return true if PASSED
-     */
-    bool test() {
-        // Measure the impulse of the filter at different phases so we exercise
-        // all the wraparound cases in the FIR.
-        for (int offset = 0; offset < (FILTER_TAP_NUM * 2); offset++ ) {
-            // printf("LowPassFilter: cursor = %d\n", mCursor);
-            // Offset by one each time.
-            if (filter(0.0f) != 0.0f) {
-                printf("ERROR: filter should return 0.0 before impulse response\n");
-                return false;
-            }
-            for (int i = 0; i < FILTER_TAP_NUM; i++) {
-                float output = filter((i == 0) ? 1.0f : 0.0f); // impulse
-                if (output != sFilterTaps8000[i]) {
-                    printf("ERROR: filter should return impulse response\n");
-                    return false;
-                }
-            }
-            for (int i = 0; i < FILTER_TAP_NUM; i++) {
-                if (filter(0.0f) != 0.0f) {
-                    printf("ERROR: filter should return 0.0 after impulse response\n");
-                    return false;
-                }
-            }
-        }
-        return true;
-    }
-
-private:
-    float   mX[FILTER_TAP_NUM * 2]{}; // twice as big as needed to avoid wrapping
-    int32_t mCursor = 0;
-};
-
-// A narrow impulse seems to have better immunity against over estimating the
-// latency due to detecting subharmonics by the auto-correlator.
-static const float s_Impulse[] = {
-        0.0f, 0.0f, 0.0f, 0.0f, 0.3f, // silence on each side of the impulse
-        0.99f, 0.0f, -0.99f, // bipolar with one zero crossing in middle
-        -0.3f, 0.0f, 0.0f, 0.0f, 0.0f
-};
-
-constexpr int32_t kImpulseSizeInFrames = (int32_t)(sizeof(s_Impulse) / sizeof(s_Impulse[0]));
-
-class PseudoRandom {
-public:
-    PseudoRandom() {}
-    PseudoRandom(int64_t seed)
-            :    mSeed(seed)
-    {}
-
-    /**
-     * Returns the next random double from -1.0 to 1.0
-     *
-     * @return value from -1.0 to 1.0
-     */
-     double nextRandomDouble() {
-        return nextRandomInteger() * (0.5 / (((int32_t)1) << 30));
-    }
-
-    /** Calculate random 32 bit number using linear-congruential method. */
-    int32_t nextRandomInteger() {
-        // Use values for 64-bit sequence from MMIX by Donald Knuth.
-        mSeed = (mSeed * (int64_t)6364136223846793005) + (int64_t)1442695040888963407;
-        return (int32_t) (mSeed >> 32); // The higher bits have a longer sequence.
-    }
-
-private:
-    int64_t mSeed = 99887766;
-};
-
-
-typedef struct LatencyReport_s {
-    double latencyInFrames;
-    double confidence;
-} LatencyReport;
-
-static double calculateCorrelation(const float *a,
-                                   const float *b,
-                                   int windowSize)
-{
-    double correlation = 0.0;
-    double sumProducts = 0.0;
-    double sumSquares = 0.0;
-
-    // Correlate a against b.
-    for (int i = 0; i < windowSize; i++) {
-        float s1 = a[i];
-        float s2 = b[i];
-        // Use a normalized cross-correlation.
-        sumProducts += s1 * s2;
-        sumSquares += ((s1 * s1) + (s2 * s2));
-    }
-
-    if (sumSquares >= 0.00000001) {
-        correlation = (float) (2.0 * sumProducts / sumSquares);
-    }
-    return correlation;
-}
-
-static int measureLatencyFromEchos(const float *data,
-                                   int32_t numFloats,
-                                   int32_t sampleRate,
-                                   LatencyReport *report) {
-    // Allocate results array
-    const int minReasonableLatencyFrames = sampleRate * kMinLatencyMillis / kMillisPerSecond;
-    const int maxReasonableLatencyFrames = sampleRate * kMaxLatencyMillis / kMillisPerSecond;
-    int32_t maxCorrelationSize = maxReasonableLatencyFrames * 3;
-    int numCorrelations = std::min(numFloats, maxCorrelationSize);
-    float *correlations = new float[numCorrelations]{};
-    float *harmonicSums = new float[numCorrelations]{};
-
-    // Perform sliding auto-correlation.
-    // Skip first frames to avoid huge peak at zero offset.
-    for (int i = minReasonableLatencyFrames; i < numCorrelations; i++) {
-        int32_t remaining = numFloats - i;
-        float correlation = (float) calculateCorrelation(&data[i], data, remaining);
-        correlations[i] = correlation;
-        // printf("correlation[%d] = %f\n", ic, correlation);
-    }
-
-    // Apply a technique similar to Harmonic Product Spectrum Analysis to find echo fundamental.
-    // Add higher harmonics mapped onto lower harmonics. This reinforces the "fundamental" echo.
-    const int numEchoes = 8;
-    for (int partial = 1; partial < numEchoes; partial++) {
-        for (int i = minReasonableLatencyFrames; i < numCorrelations; i++) {
-            harmonicSums[i / partial] += correlations[i] / partial;
-        }
-    }
-
-    // Find highest peak in correlation array.
-    float maxCorrelation = 0.0;
-    int peakIndex = 0;
-    for (int i = 0; i < numCorrelations; i++) {
-        if (harmonicSums[i] > maxCorrelation) {
-            maxCorrelation = harmonicSums[i];
-            peakIndex = i;
-            // printf("maxCorrelation = %f at %d\n", maxCorrelation, peakIndex);
-        }
-    }
-    report->latencyInFrames = peakIndex;
-/*
-    {
-        int32_t topPeak = peakIndex * 7 / 2;
-        for (int i = 0; i < topPeak; i++) {
-            float sample = harmonicSums[i];
-            printf("%4d: %7.5f ", i, sample);
-            printAudioScope(sample);
-        }
-    }
-*/
-
-    // Calculate confidence.
-    if (maxCorrelation < 0.001) {
-        report->confidence = 0.0;
-    } else {
-        // Compare peak to average value around peak.
-        int32_t numSamples = std::min(numCorrelations, peakIndex * 2);
-        if (numSamples <= 0) {
-            report->confidence = 0.0;
-        } else {
-            double sum = 0.0;
-            for (int i = 0; i < numSamples; i++) {
-                sum += harmonicSums[i];
-            }
-            const double average = sum / numSamples;
-            const double ratio = average / maxCorrelation; // will be < 1.0
-            report->confidence = 1.0 - sqrt(ratio);
-        }
-    }
-
-    delete[] correlations;
-    delete[] harmonicSums;
-    return 0;
-}
-
-class AudioRecording
-{
-public:
-    AudioRecording() {
-    }
-    ~AudioRecording() {
-        delete[] mData;
-    }
-
-    void allocate(int maxFrames) {
-        delete[] mData;
-        mData = new float[maxFrames];
-        mMaxFrames = maxFrames;
-    }
-
-    // Write SHORT data from the first channel.
-    int32_t write(int16_t *inputData, int32_t inputChannelCount, int32_t numFrames) {
-        // stop at end of buffer
-        if ((mFrameCounter + numFrames) > mMaxFrames) {
-            numFrames = mMaxFrames - mFrameCounter;
-        }
-        for (int i = 0; i < numFrames; i++) {
-            mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
-        }
-        return numFrames;
-    }
-
-    // Write FLOAT data from the first channel.
-    int32_t write(float *inputData, int32_t inputChannelCount, int32_t numFrames) {
-        // stop at end of buffer
-        if ((mFrameCounter + numFrames) > mMaxFrames) {
-            numFrames = mMaxFrames - mFrameCounter;
-        }
-        for (int i = 0; i < numFrames; i++) {
-            mData[mFrameCounter++] = inputData[i * inputChannelCount];
-        }
-        return numFrames;
-    }
-
-    int32_t size() {
-        return mFrameCounter;
-    }
-
-    float *getData() {
-        return mData;
-    }
-
-    void setSampleRate(int32_t sampleRate) {
-        mSampleRate = sampleRate;
-    }
-
-    int32_t getSampleRate() {
-        return mSampleRate;
-    }
-
-    int save(const char *fileName, bool writeShorts = true) {
-        SNDFILE *sndFile = nullptr;
-        int written = 0;
-        SF_INFO info = {
-                .frames = mFrameCounter,
-                .samplerate = mSampleRate,
-                .channels = 1,
-                .format = SF_FORMAT_WAV | (writeShorts ? SF_FORMAT_PCM_16 : SF_FORMAT_FLOAT)
-        };
-
-        sndFile = sf_open(fileName, SFM_WRITE, &info);
-        if (sndFile == nullptr) {
-            printf("AudioRecording::save(%s) failed to open file\n", fileName);
-            return -errno;
-        }
-
-        written = sf_writef_float(sndFile, mData, mFrameCounter);
-
-        sf_close(sndFile);
-        return written;
-    }
-
-    int load(const char *fileName) {
-        SNDFILE *sndFile = nullptr;
-        SF_INFO info;
-
-        sndFile = sf_open(fileName, SFM_READ, &info);
-        if (sndFile == nullptr) {
-            printf("AudioRecording::load(%s) failed to open file\n", fileName);
-            return -errno;
-        }
-
-        assert(info.channels == 1);
-        assert(info.format == SF_FORMAT_FLOAT);
-
-        setSampleRate(info.samplerate);
-        allocate(info.frames);
-        mFrameCounter = sf_readf_float(sndFile, mData, info.frames);
-
-        sf_close(sndFile);
-        return mFrameCounter;
-    }
-
-    /**
-     * Square the samples so they are all positive and so the peaks are emphasized.
-     */
-    void square() {
-        for (int i = 0; i < mFrameCounter; i++) {
-            const float sample = mData[i];
-            mData[i] = sample * sample;
-        }
-    }
-
-    /**
-     * Low pass filter the recording using a simple FIR filter.
-     * Note that the lowpass filter cutoff tracks the sample rate.
-     * That is OK because the impulse width is a fixed number of samples.
-     */
-    void lowPassFilter() {
-        for (int i = 0; i < mFrameCounter; i++) {
-            mData[i] = mLowPassFilter.filter(mData[i]);
-        }
-    }
-
-    /**
-     * Remove DC offset using a one-pole one-zero IIR filter.
-     */
-    void dcBlocker() {
-        const float R = 0.996; // narrow notch at zero Hz
-        float x1 = 0.0;
-        float y1 = 0.0;
-        for (int i = 0; i < mFrameCounter; i++) {
-            const float x = mData[i];
-            const float y = x - x1 + (R * y1);
-            mData[i] = y;
-            y1 = y;
-            x1 = x;
-        }
-    }
-
-private:
-    float        *mData = nullptr;
-    int32_t       mFrameCounter = 0;
-    int32_t       mMaxFrames = 0;
-    int32_t       mSampleRate = kDefaultSampleRate; // common default
-    LowPassFilter mLowPassFilter;
-};
-
-// ====================================================================================
-class LoopbackProcessor {
-public:
-    virtual ~LoopbackProcessor() = default;
-
-
-    enum process_result {
-        PROCESS_RESULT_OK,
-        PROCESS_RESULT_GLITCH
-    };
-
-    virtual void reset() {}
-
-    virtual process_result process(float *inputData, int inputChannelCount,
-                 float *outputData, int outputChannelCount,
-                 int numFrames) = 0;
-
-
-    virtual void report() = 0;
-
-    virtual void printStatus() {};
-
-    int32_t getResult() {
-        return mResult;
-    }
-
-    void setResult(int32_t result) {
-        mResult = result;
-    }
-
-    virtual bool isDone() {
-        return false;
-    }
-
-    virtual int save(const char *fileName) {
-        (void) fileName;
-        return AAUDIO_ERROR_UNIMPLEMENTED;
-    }
-
-    virtual int load(const char *fileName) {
-        (void) fileName;
-        return AAUDIO_ERROR_UNIMPLEMENTED;
-    }
-
-    virtual void setSampleRate(int32_t sampleRate) {
-        mSampleRate = sampleRate;
-    }
-
-    int32_t getSampleRate() {
-        return mSampleRate;
-    }
-
-    // Measure peak amplitude of buffer.
-    static float measurePeakAmplitude(float *inputData, int inputChannelCount, int numFrames) {
-        float peak = 0.0f;
-        for (int i = 0; i < numFrames; i++) {
-            const float pos = fabs(*inputData);
-            if (pos > peak) {
-                peak = pos;
-            }
-            inputData += inputChannelCount;
-        }
-        return peak;
-    }
-
-
-private:
-    int32_t mSampleRate = kDefaultSampleRate;
-    int32_t mResult = 0;
-};
-
-class PeakDetector {
-public:
-    float process(float input) {
-        float output = mPrevious * mDecay;
-        if (input > output) {
-            output = input;
-        }
-        mPrevious = output;
-        return output;
-    }
-
-private:
-    float  mDecay = 0.99f;
-    float  mPrevious = 0.0f;
-};
-
-// ====================================================================================
-/**
- * Measure latency given a loopback stream data.
- * Uses a state machine to cycle through various stages including:
- *
- */
-class EchoAnalyzer : public LoopbackProcessor {
-public:
-
-    EchoAnalyzer() : LoopbackProcessor() {
-        mAudioRecording.allocate(2 * getSampleRate());
-        mAudioRecording.setSampleRate(getSampleRate());
-    }
-
-    void setSampleRate(int32_t sampleRate) override {
-        LoopbackProcessor::setSampleRate(sampleRate);
-        mAudioRecording.setSampleRate(sampleRate);
-    }
-
-    void reset() override {
-        mDownCounter = getSampleRate() / 2;
-        mLoopCounter = 0;
-        mMeasuredLoopGain = 0.0f;
-        mEchoGain = 1.0f;
-        mState = STATE_INITIAL_SILENCE;
-    }
-
-    virtual bool isDone() {
-        return mState == STATE_DONE || mState == STATE_FAILED;
-    }
-
-    void setGain(float gain) {
-        mEchoGain = gain;
-    }
-
-    float getGain() {
-        return mEchoGain;
-    }
-
-    bool testLowPassFilter() {
-        LowPassFilter filter;
-        return filter.test();
-    }
-
-    void report() override {
-        printf("EchoAnalyzer ---------------\n");
-        if (getResult() != 0) {
-            printf(LOOPBACK_RESULT_TAG "result          = %d\n", getResult());
-            return;
-        }
-
-        // printf("LowPassFilter test %s\n", testLowPassFilter() ? "PASSED" : "FAILED");
-
-        printf(LOOPBACK_RESULT_TAG "measured.gain          = %8f\n", mMeasuredLoopGain);
-        printf(LOOPBACK_RESULT_TAG "echo.gain              = %8f\n", mEchoGain);
-        printf(LOOPBACK_RESULT_TAG "test.state             = %8d\n", mState);
-        printf(LOOPBACK_RESULT_TAG "test.state.name        = %8s\n", convertStateToText(mState));
-
-        if (mState == STATE_WAITING_FOR_SILENCE) {
-            printf("WARNING - Stuck waiting for silence. Input may be too noisy!\n");
-            setResult(ERROR_NOISY);
-        } else if (mMeasuredLoopGain >= 0.9999) {
-            printf("   ERROR - clipping, turn down volume slightly\n");
-            setResult(ERROR_CLIPPING);
-        } else if (mState != STATE_DONE && mState != STATE_GATHERING_ECHOS) {
-            printf("WARNING - Bad state. Check volume on device.\n");
-            setResult(ERROR_INVALID_STATE);
-        } else {
-            // Cleanup the signal to improve the auto-correlation.
-            mAudioRecording.dcBlocker();
-            mAudioRecording.square();
-            mAudioRecording.lowPassFilter();
-
-            printf("Please wait several seconds for auto-correlation to complete.\n");
-            measureLatencyFromEchos(mAudioRecording.getData(),
-                                    mAudioRecording.size(),
-                                    getSampleRate(),
-                                    &mLatencyReport);
-
-            double latencyMillis = kMillisPerSecond * (double) mLatencyReport.latencyInFrames
-                                   / getSampleRate();
-            printf(LOOPBACK_RESULT_TAG "latency.frames         = %8.2f\n",
-                   mLatencyReport.latencyInFrames);
-            printf(LOOPBACK_RESULT_TAG "latency.msec           = %8.2f\n",
-                   latencyMillis);
-            printf(LOOPBACK_RESULT_TAG "latency.confidence     = %8.6f\n",
-                   mLatencyReport.confidence);
-            if (mLatencyReport.confidence < kMinimumConfidence) {
-                printf("   ERROR - confidence too low!\n");
-                setResult(ERROR_CONFIDENCE);
-            }
-        }
-    }
-
-    void printStatus() override {
-        printf("st = %d, echo gain = %f ", mState, mEchoGain);
-    }
-
-    void sendImpulses(float *outputData, int outputChannelCount, int numFrames) {
-        while (numFrames-- > 0) {
-            float sample = s_Impulse[mSampleIndex++];
-            if (mSampleIndex >= kImpulseSizeInFrames) {
-                mSampleIndex = 0;
-            }
-
-            *outputData = sample;
-            outputData += outputChannelCount;
-        }
-    }
-
-    void sendOneImpulse(float *outputData, int outputChannelCount) {
-        mSampleIndex = 0;
-        sendImpulses(outputData, outputChannelCount, kImpulseSizeInFrames);
-    }
-
-    // @return number of frames for a typical block of processing
-    int32_t getBlockFrames() {
-        return getSampleRate() / 8;
-    }
-
-    process_result process(float *inputData, int inputChannelCount,
-                 float *outputData, int outputChannelCount,
-                 int numFrames) override {
-        int channelsValid = std::min(inputChannelCount, outputChannelCount);
-        float peak = 0.0f;
-        int numWritten;
-        int numSamples;
-
-        echo_state nextState = mState;
-
-        switch (mState) {
-            case STATE_INITIAL_SILENCE:
-                // Output silence at the beginning.
-                numSamples = numFrames * outputChannelCount;
-                for (int i = 0; i < numSamples; i++) {
-                    outputData[i] = 0;
-                }
-                mDownCounter -= numFrames;
-                if (mDownCounter <= 0) {
-                    nextState = STATE_MEASURING_GAIN;
-                    //printf("%5d: switch to STATE_MEASURING_GAIN\n", mLoopCounter);
-                    mDownCounter = getBlockFrames() * 2;
-                }
-                break;
-
-            case STATE_MEASURING_GAIN:
-                sendImpulses(outputData, outputChannelCount, numFrames);
-                peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
-                // If we get several in a row then go to next state.
-                if (peak > mPulseThreshold) {
-                    mDownCounter -= numFrames;
-                    if (mDownCounter <= 0) {
-                        //printf("%5d: switch to STATE_WAITING_FOR_SILENCE, measured peak = %f\n",
-                        //       mLoopCounter, peak);
-                        mDownCounter = getBlockFrames();
-                        mMeasuredLoopGain = peak;  // assumes original pulse amplitude is one
-                        mSilenceThreshold = peak * 0.1; // scale silence to measured pulse
-                        // Calculate gain that will give us a nice decaying echo.
-                        mEchoGain = mDesiredEchoGain / mMeasuredLoopGain;
-                        if (mEchoGain > kMaxEchoGain) {
-                            printf("ERROR - loop gain too low. Increase the volume.\n");
-                            nextState = STATE_FAILED;
-                        } else {
-                            nextState = STATE_WAITING_FOR_SILENCE;
-                        }
-                    }
-                } else if (numFrames > kImpulseSizeInFrames){ // ignore short callbacks
-                    mDownCounter = getBlockFrames();
-                }
-                break;
-
-            case STATE_WAITING_FOR_SILENCE:
-                // Output silence and wait for the echos to die down.
-                numSamples = numFrames * outputChannelCount;
-                for (int i = 0; i < numSamples; i++) {
-                    outputData[i] = 0;
-                }
-                peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
-                // If we get several in a row then go to next state.
-                if (peak < mSilenceThreshold) {
-                    mDownCounter -= numFrames;
-                    if (mDownCounter <= 0) {
-                        nextState = STATE_SENDING_PULSE;
-                        //printf("%5d: switch to STATE_SENDING_PULSE\n", mLoopCounter);
-                        mDownCounter = getBlockFrames();
-                    }
-                } else {
-                    mDownCounter = getBlockFrames();
-                }
-                break;
-
-            case STATE_SENDING_PULSE:
-                mAudioRecording.write(inputData, inputChannelCount, numFrames);
-                sendOneImpulse(outputData, outputChannelCount);
-                nextState = STATE_GATHERING_ECHOS;
-                //printf("%5d: switch to STATE_GATHERING_ECHOS\n", mLoopCounter);
-                break;
-
-            case STATE_GATHERING_ECHOS:
-                numWritten = mAudioRecording.write(inputData, inputChannelCount, numFrames);
-                peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
-                if (peak > mMeasuredLoopGain) {
-                    mMeasuredLoopGain = peak;  // AGC might be raising gain so adjust it on the fly.
-                    // Recalculate gain that will give us a nice decaying echo.
-                    mEchoGain = mDesiredEchoGain / mMeasuredLoopGain;
-                }
-                // Echo input to output.
-                for (int i = 0; i < numFrames; i++) {
-                    int ic;
-                    for (ic = 0; ic < channelsValid; ic++) {
-                        outputData[ic] = inputData[ic] * mEchoGain;
-                    }
-                    for (; ic < outputChannelCount; ic++) {
-                        outputData[ic] = 0;
-                    }
-                    inputData += inputChannelCount;
-                    outputData += outputChannelCount;
-                }
-                if (numWritten  < numFrames) {
-                    nextState = STATE_DONE;
-                }
-                break;
-
-            case STATE_DONE:
-            case STATE_FAILED:
-            default:
-                break;
-        }
-
-        mState = nextState;
-        mLoopCounter++;
-        return PROCESS_RESULT_OK;
-    }
-
-    int save(const char *fileName) override {
-        return mAudioRecording.save(fileName);
-    }
-
-    int load(const char *fileName) override {
-        int result = mAudioRecording.load(fileName);
-        setSampleRate(mAudioRecording.getSampleRate());
-        mState = STATE_DONE;
-        return result;
-    }
-
-private:
-
-    enum error_code {
-        ERROR_OK = 0,
-        ERROR_NOISY = -99,
-        ERROR_CLIPPING,
-        ERROR_CONFIDENCE,
-        ERROR_INVALID_STATE
-    };
-
-    enum echo_state {
-        STATE_INITIAL_SILENCE,
-        STATE_MEASURING_GAIN,
-        STATE_WAITING_FOR_SILENCE,
-        STATE_SENDING_PULSE,
-        STATE_GATHERING_ECHOS,
-        STATE_DONE,
-        STATE_FAILED
-    };
-
-    const char *convertStateToText(echo_state state) {
-        const char *result = "Unknown";
-        switch(state) {
-            case STATE_INITIAL_SILENCE:
-                result = "INIT";
-                break;
-            case STATE_MEASURING_GAIN:
-                result = "GAIN";
-                break;
-            case STATE_WAITING_FOR_SILENCE:
-                result = "SILENCE";
-                break;
-            case STATE_SENDING_PULSE:
-                result = "PULSE";
-                break;
-            case STATE_GATHERING_ECHOS:
-                result = "ECHOS";
-                break;
-            case STATE_DONE:
-                result = "DONE";
-                break;
-            case STATE_FAILED:
-                result = "FAILED";
-                break;
-        }
-        return result;
-    }
-
-
-    int32_t         mDownCounter = 500;
-    int32_t         mLoopCounter = 0;
-    int32_t         mSampleIndex = 0;
-    float           mPulseThreshold = 0.02f;
-    float           mSilenceThreshold = 0.002f;
-    float           mMeasuredLoopGain = 0.0f;
-    float           mDesiredEchoGain = 0.95f;
-    float           mEchoGain = 1.0f;
-    echo_state      mState = STATE_INITIAL_SILENCE;
-
-    AudioRecording  mAudioRecording; // contains only the input after the gain detection burst
-    LatencyReport   mLatencyReport;
-    // PeakDetector    mPeakDetector;
-};
-
-
-// ====================================================================================
-/**
- * Output a steady sinewave and analyze the return signal.
- *
- * Use a cosine transform to measure the predicted magnitude and relative phase of the
- * looped back sine wave. Then generate a predicted signal and compare with the actual signal.
- */
-class SineAnalyzer : public LoopbackProcessor {
-public:
-
-    void report() override {
-        printf("SineAnalyzer ------------------\n");
-        printf(LOOPBACK_RESULT_TAG "peak.amplitude     = %8f\n", mPeakAmplitude);
-        printf(LOOPBACK_RESULT_TAG "sine.magnitude     = %8f\n", mMagnitude);
-        printf(LOOPBACK_RESULT_TAG "peak.noise         = %8f\n", mPeakNoise);
-        printf(LOOPBACK_RESULT_TAG "rms.noise          = %8f\n", mRootMeanSquareNoise);
-        float amplitudeRatio = mMagnitude / mPeakNoise;
-        float signalToNoise = amplitudeRatio * amplitudeRatio;
-        printf(LOOPBACK_RESULT_TAG "signal.to.noise    = %8.2f\n", signalToNoise);
-        float signalToNoiseDB = 10.0 * log(signalToNoise);
-        printf(LOOPBACK_RESULT_TAG "signal.to.noise.db = %8.2f\n", signalToNoiseDB);
-        if (signalToNoiseDB < MIN_SNRATIO_DB) {
-            printf("ERROR - signal to noise ratio is too low! < %d dB. Adjust volume.\n", MIN_SNRATIO_DB);
-            setResult(ERROR_NOISY);
-        }
-        printf(LOOPBACK_RESULT_TAG "frames.accumulated = %8d\n", mFramesAccumulated);
-        printf(LOOPBACK_RESULT_TAG "sine.period        = %8d\n", mSinePeriod);
-        printf(LOOPBACK_RESULT_TAG "test.state         = %8d\n", mState);
-        printf(LOOPBACK_RESULT_TAG "frame.count        = %8d\n", mFrameCounter);
-        // Did we ever get a lock?
-        bool gotLock = (mState == STATE_LOCKED) || (mGlitchCount > 0);
-        if (!gotLock) {
-            printf("ERROR - failed to lock on reference sine tone\n");
-            setResult(ERROR_NO_LOCK);
-        } else {
-            // Only print if meaningful.
-            printf(LOOPBACK_RESULT_TAG "glitch.count       = %8d\n", mGlitchCount);
-            printf(LOOPBACK_RESULT_TAG "max.glitch         = %8f\n", mMaxGlitchDelta);
-            if (mGlitchCount > 0) {
-                printf("ERROR - number of glitches > 0\n");
-                setResult(ERROR_GLITCHES);
-            }
-        }
-    }
-
-    void printStatus() override {
-        printf("st = %d, #gl = %3d,", mState, mGlitchCount);
-    }
-
-    double calculateMagnitude(double *phasePtr = NULL) {
-        if (mFramesAccumulated == 0) {
-            return 0.0;
-        }
-        double sinMean = mSinAccumulator / mFramesAccumulated;
-        double cosMean = mCosAccumulator / mFramesAccumulated;
-        double magnitude = 2.0 * sqrt( (sinMean * sinMean) + (cosMean * cosMean ));
-        if( phasePtr != NULL )
-        {
-            double phase = M_PI_2 - atan2( sinMean, cosMean );
-            *phasePtr = phase;
-        }
-        return magnitude;
-    }
-
-    /**
-     * @param inputData contains microphone data with sine signal feedback
-     * @param outputData contains the reference sine wave
-     */
-    process_result process(float *inputData, int inputChannelCount,
-                 float *outputData, int outputChannelCount,
-                 int numFrames) override {
-        process_result result = PROCESS_RESULT_OK;
-        mProcessCount++;
-
-        float peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
-        if (peak > mPeakAmplitude) {
-            mPeakAmplitude = peak;
-        }
-
-        for (int i = 0; i < numFrames; i++) {
-            bool sineEnabled = true;
-            float sample = inputData[i * inputChannelCount];
-
-            float sinOut = sinf(mPhase);
-
-            switch (mState) {
-                case STATE_IDLE:
-                    sineEnabled = false;
-                    mDownCounter--;
-                    if (mDownCounter <= 0) {
-                        mState = STATE_MEASURE_NOISE;
-                        mDownCounter = NOISE_FRAME_COUNT;
-                    }
-                    break;
-                case STATE_MEASURE_NOISE:
-                    sineEnabled = false;
-                    mPeakNoise = std::max(abs(sample), mPeakNoise);
-                    mNoiseSumSquared += sample * sample;
-                    mDownCounter--;
-                    if (mDownCounter <= 0) {
-                        mState = STATE_WAITING_FOR_SIGNAL;
-                        mRootMeanSquareNoise = sqrt(mNoiseSumSquared / NOISE_FRAME_COUNT);
-                        mTolerance = std::max(MIN_TOLERANCE, mPeakNoise * 2.0f);
-                        mPhase = 0.0; // prevent spike at start
-                    }
-                    break;
-
-                case STATE_IMMUNE:
-                    mDownCounter--;
-                    if (mDownCounter <= 0) {
-                        mState = STATE_WAITING_FOR_SIGNAL;
-                    }
-                    break;
-
-                case STATE_WAITING_FOR_SIGNAL:
-                    if (peak > mThreshold) {
-                        mState = STATE_WAITING_FOR_LOCK;
-                        //printf("%5d: switch to STATE_WAITING_FOR_LOCK\n", mFrameCounter);
-                        resetAccumulator();
-                    }
-                    break;
-
-                case STATE_WAITING_FOR_LOCK:
-                    mSinAccumulator += sample * sinOut;
-                    mCosAccumulator += sample * cosf(mPhase);
-                    mFramesAccumulated++;
-                    // Must be a multiple of the period or the calculation will not be accurate.
-                    if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
-                        mPhaseOffset = 0.0;
-                        mMagnitude = calculateMagnitude(&mPhaseOffset);
-                        if (mMagnitude > mThreshold) {
-                            if (fabs(mPreviousPhaseOffset - mPhaseOffset) < 0.001) {
-                                mState = STATE_LOCKED;
-                                //printf("%5d: switch to STATE_LOCKED\n", mFrameCounter);
-                            }
-                            mPreviousPhaseOffset = mPhaseOffset;
-                        }
-                        resetAccumulator();
-                    }
-                    break;
-
-                case STATE_LOCKED: {
-                    // Predict next sine value
-                    float predicted = sinf(mPhase + mPhaseOffset) * mMagnitude;
-                    // printf("    predicted = %f, actual = %f\n", predicted, sample);
-
-                    float diff = predicted - sample;
-                    float absDiff = fabs(diff);
-                    mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
-                    if (absDiff > mTolerance) {
-                        mGlitchCount++;
-                        result = PROCESS_RESULT_GLITCH;
-                        //printf("%5d: Got a glitch # %d, predicted = %f, actual = %f\n",
-                        //       mFrameCounter, mGlitchCount, predicted, sample);
-                        mState = STATE_IMMUNE;
-                        mDownCounter = mSinePeriod * PERIODS_IMMUNE;
-                    }
-
-                    // Track incoming signal and slowly adjust magnitude to account
-                    // for drift in the DRC or AGC.
-                    mSinAccumulator += sample * sinOut;
-                    mCosAccumulator += sample * cosf(mPhase);
-                    mFramesAccumulated++;
-                    // Must be a multiple of the period or the calculation will not be accurate.
-                    if (mFramesAccumulated == mSinePeriod) {
-                        const double coefficient = 0.1;
-                        double phaseOffset = 0.0;
-                        double magnitude = calculateMagnitude(&phaseOffset);
-                        // One pole averaging filter.
-                        mMagnitude = (mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient);
-                        resetAccumulator();
-                    }
-                } break;
-            }
-
-            float output = 0.0f;
-            // Output sine wave so we can measure it.
-            if (sineEnabled) {
-                output = (sinOut * mOutputAmplitude)
-                         + (mWhiteNoise.nextRandomDouble() * mNoiseAmplitude);
-                // printf("%5d: sin(%f) = %f, %f\n", i, mPhase, sinOut,  mPhaseIncrement);
-                // advance and wrap phase
-                mPhase += mPhaseIncrement;
-                if (mPhase > M_PI) {
-                    mPhase -= (2.0 * M_PI);
-                }
-            }
-            outputData[i * outputChannelCount] = output;
-
-
-            mFrameCounter++;
-        }
-        return result;
-    }
-
-    void resetAccumulator() {
-        mFramesAccumulated = 0;
-        mSinAccumulator = 0.0;
-        mCosAccumulator = 0.0;
-    }
-
-    void reset() override {
-        mGlitchCount = 0;
-        mState = STATE_IDLE;
-        mDownCounter = IDLE_FRAME_COUNT;
-        mPhaseIncrement = 2.0 * M_PI / mSinePeriod;
-        printf("phaseInc = %f for period %d\n", mPhaseIncrement, mSinePeriod);
-        resetAccumulator();
-        mProcessCount = 0;
-        mPeakNoise = 0.0f;
-        mNoiseSumSquared = 0.0;
-        mRootMeanSquareNoise = 0.0;
-        mPhase = 0.0f;
-        mMaxGlitchDelta = 0.0;
-    }
-
-private:
-
-    enum error_code {
-        OK,
-        ERROR_NO_LOCK = -80,
-        ERROR_GLITCHES,
-        ERROR_NOISY
-    };
-
-    enum sine_state_t {
-        STATE_IDLE,
-        STATE_MEASURE_NOISE,
-        STATE_IMMUNE,
-        STATE_WAITING_FOR_SIGNAL,
-        STATE_WAITING_FOR_LOCK,
-        STATE_LOCKED
-    };
-
-    enum constants {
-        // Arbitrary durations, assuming 48000 Hz
-        IDLE_FRAME_COUNT = 48 * 100,
-        NOISE_FRAME_COUNT = 48 * 600,
-        PERIODS_NEEDED_FOR_LOCK = 8,
-        PERIODS_IMMUNE = 2,
-        MIN_SNRATIO_DB = 65
-    };
-
-    static constexpr float MIN_TOLERANCE = 0.01;
-
-    int     mSinePeriod = 79;
-    double  mPhaseIncrement = 0.0;
-    double  mPhase = 0.0;
-    double  mPhaseOffset = 0.0;
-    double  mPreviousPhaseOffset = 0.0;
-    double  mMagnitude = 0.0;
-    double  mThreshold = 0.005;
-    double  mTolerance = MIN_TOLERANCE;
-    int32_t mFramesAccumulated = 0;
-    int32_t mProcessCount = 0;
-    double  mSinAccumulator = 0.0;
-    double  mCosAccumulator = 0.0;
-    float   mMaxGlitchDelta = 0.0f;
-    int32_t mGlitchCount = 0;
-    double  mPeakAmplitude = 0.0;
-    int     mDownCounter = IDLE_FRAME_COUNT;
-    int32_t mFrameCounter = 0;
-    float   mOutputAmplitude = 0.75;
-
-    // measure background noise
-    float   mPeakNoise = 0.0f;
-    double  mNoiseSumSquared = 0.0;
-    double  mRootMeanSquareNoise = 0.0;
-
-    PseudoRandom  mWhiteNoise;
-    float   mNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
-
-    sine_state_t  mState = STATE_IDLE;
-};
-
-#undef LOOPBACK_RESULT_TAG
-
-#endif /* AAUDIO_EXAMPLES_LOOPBACK_ANALYSER_H */
diff --git a/media/libaaudio/examples/loopback/src/analyzer/GlitchAnalyzer.h b/media/libaaudio/examples/loopback/src/analyzer/GlitchAnalyzer.h
new file mode 100644
index 0000000..04435d1
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/analyzer/GlitchAnalyzer.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_GLITCH_ANALYZER_H
+#define ANALYZER_GLITCH_ANALYZER_H
+
+#include <algorithm>
+#include <cctype>
+#include <iomanip>
+#include <iostream>
+
+#include "LatencyAnalyzer.h"
+#include "PseudoRandom.h"
+
+/**
+ * Output a steady sine wave and analyze the return signal.
+ *
+ * Use a cosine transform to measure the predicted magnitude and relative phase of the
+ * looped back sine wave. Then generate a predicted signal and compare with the actual signal.
+ */
+class GlitchAnalyzer : public LoopbackProcessor {
+public:
+
+    int32_t getState() const {
+        return mState;
+    }
+
+    double getPeakAmplitude() const {
+        return mPeakFollower.getLevel();
+    }
+
+    double getTolerance() {
+        return mTolerance;
+    }
+
+    void setTolerance(double tolerance) {
+        mTolerance = tolerance;
+        mScaledTolerance = mMagnitude * mTolerance;
+    }
+
+    void setMagnitude(double magnitude) {
+        mMagnitude = magnitude;
+        mScaledTolerance = mMagnitude * mTolerance;
+    }
+
+    int32_t getGlitchCount() const {
+        return mGlitchCount;
+    }
+
+    int32_t getStateFrameCount(int state) const {
+        return mStateFrameCounters[state];
+    }
+
+    double getSignalToNoiseDB() {
+        static const double threshold = 1.0e-14;
+        if (mMeanSquareSignal < threshold || mMeanSquareNoise < threshold) {
+            return 0.0;
+        } else {
+            double signalToNoise = mMeanSquareSignal / mMeanSquareNoise; // power ratio
+            double signalToNoiseDB = 10.0 * log(signalToNoise);
+            if (signalToNoiseDB < MIN_SNR_DB) {
+                ALOGD("ERROR - signal to noise ratio is too low! < %d dB. Adjust volume.",
+                     MIN_SNR_DB);
+                setResult(ERROR_VOLUME_TOO_LOW);
+            }
+            return signalToNoiseDB;
+        }
+    }
+
+    std::string analyze() override {
+        std::stringstream report;
+        report << "GlitchAnalyzer ------------------\n";
+        report << LOOPBACK_RESULT_TAG "peak.amplitude     = " << std::setw(8)
+               << getPeakAmplitude() << "\n";
+        report << LOOPBACK_RESULT_TAG "sine.magnitude     = " << std::setw(8)
+               << mMagnitude << "\n";
+        report << LOOPBACK_RESULT_TAG "rms.noise          = " << std::setw(8)
+               << mMeanSquareNoise << "\n";
+        report << LOOPBACK_RESULT_TAG "signal.to.noise.db = " << std::setw(8)
+               << getSignalToNoiseDB() << "\n";
+        report << LOOPBACK_RESULT_TAG "frames.accumulated = " << std::setw(8)
+               << mFramesAccumulated << "\n";
+        report << LOOPBACK_RESULT_TAG "sine.period        = " << std::setw(8)
+               << mSinePeriod << "\n";
+        report << LOOPBACK_RESULT_TAG "test.state         = " << std::setw(8)
+               << mState << "\n";
+        report << LOOPBACK_RESULT_TAG "frame.count        = " << std::setw(8)
+               << mFrameCounter << "\n";
+        // Did we ever get a lock?
+        bool gotLock = (mState == STATE_LOCKED) || (mGlitchCount > 0);
+        if (!gotLock) {
+            report << "ERROR - failed to lock on reference sine tone.\n";
+            setResult(ERROR_NO_LOCK);
+        } else {
+            // Only print if meaningful.
+            report << LOOPBACK_RESULT_TAG "glitch.count       = " << std::setw(8)
+                   << mGlitchCount << "\n";
+            report << LOOPBACK_RESULT_TAG "max.glitch         = " << std::setw(8)
+                   << mMaxGlitchDelta << "\n";
+            if (mGlitchCount > 0) {
+                report << "ERROR - number of glitches > 0\n";
+                setResult(ERROR_GLITCHES);
+            }
+        }
+        return report.str();
+    }
+
+    void printStatus() override {
+        ALOGD("st = %d, #gl = %3d,", mState, mGlitchCount);
+    }
+    /**
+     * Calculate the magnitude of the component of the input signal
+     * that matches the analysis frequency.
+     * Also calculate the phase that we can use to create a
+     * signal that matches that component.
+     * The phase will be between -PI and +PI.
+     */
+    double calculateMagnitude(double *phasePtr = nullptr) {
+        if (mFramesAccumulated == 0) {
+            return 0.0;
+        }
+        double sinMean = mSinAccumulator / mFramesAccumulated;
+        double cosMean = mCosAccumulator / mFramesAccumulated;
+        double magnitude = 2.0 * sqrt((sinMean * sinMean) + (cosMean * cosMean));
+        if (phasePtr != nullptr) {
+            double phase = M_PI_2 - atan2(sinMean, cosMean);
+            *phasePtr = phase;
+        }
+        return magnitude;
+    }
+
+    /**
+     * @param frameData contains microphone data with sine signal feedback
+     * @param channelCount
+     */
+    result_code processInputFrame(float *frameData, int /* channelCount */) override {
+        result_code result = RESULT_OK;
+
+        float sample = frameData[0];
+        float peak = mPeakFollower.process(sample);
+
+        // Force a periodic glitch to test the detector!
+        if (mForceGlitchDuration > 0) {
+            if (mForceGlitchCounter == 0) {
+                ALOGE("%s: force a glitch!!", __func__);
+                mForceGlitchCounter = getSampleRate();
+            } else if (mForceGlitchCounter <= mForceGlitchDuration) {
+                // Force an abrupt offset.
+                sample += (sample > 0.0) ? -0.5f : 0.5f;
+            }
+            --mForceGlitchCounter;
+        }
+
+        mStateFrameCounters[mState]++; // count how many frames we are in each state
+
+        switch (mState) {
+            case STATE_IDLE:
+                mDownCounter--;
+                if (mDownCounter <= 0) {
+                    mState = STATE_IMMUNE;
+                    mDownCounter = IMMUNE_FRAME_COUNT;
+                    mInputPhase = 0.0; // prevent spike at start
+                    mOutputPhase = 0.0;
+                }
+                break;
+
+            case STATE_IMMUNE:
+                mDownCounter--;
+                if (mDownCounter <= 0) {
+                    mState = STATE_WAITING_FOR_SIGNAL;
+                }
+                break;
+
+            case STATE_WAITING_FOR_SIGNAL:
+                if (peak > mThreshold) {
+                    mState = STATE_WAITING_FOR_LOCK;
+                    //ALOGD("%5d: switch to STATE_WAITING_FOR_LOCK", mFrameCounter);
+                    resetAccumulator();
+                }
+                break;
+
+            case STATE_WAITING_FOR_LOCK:
+                mSinAccumulator += sample * sinf(mInputPhase);
+                mCosAccumulator += sample * cosf(mInputPhase);
+                mFramesAccumulated++;
+                // Must be a multiple of the period or the calculation will not be accurate.
+                if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
+                    double phaseOffset = 0.0;
+                    setMagnitude(calculateMagnitude(&phaseOffset));
+//                    ALOGD("%s() mag = %f, offset = %f, prev = %f",
+//                            __func__, mMagnitude, mPhaseOffset, mPreviousPhaseOffset);
+                    if (mMagnitude > mThreshold) {
+                        if (abs(phaseOffset) < kMaxPhaseError) {
+                            mState = STATE_LOCKED;
+//                            ALOGD("%5d: switch to STATE_LOCKED", mFrameCounter);
+                        }
+                        // Adjust mInputPhase to match measured phase
+                        mInputPhase += phaseOffset;
+                    }
+                    resetAccumulator();
+                }
+                incrementInputPhase();
+                break;
+
+            case STATE_LOCKED: {
+                // Predict next sine value
+                double predicted = sinf(mInputPhase) * mMagnitude;
+                double diff = predicted - sample;
+                double absDiff = fabs(diff);
+                mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
+                if (absDiff > mScaledTolerance) {
+                    result = ERROR_GLITCHES;
+                    onGlitchStart();
+//                    LOGI("diff glitch detected, absDiff = %g", absDiff);
+                } else {
+                    mSumSquareSignal += predicted * predicted;
+                    mSumSquareNoise += diff * diff;
+                    // Track incoming signal and slowly adjust magnitude to account
+                    // for drift in the DRC or AGC.
+                    mSinAccumulator += sample * sinf(mInputPhase);
+                    mCosAccumulator += sample * cosf(mInputPhase);
+                    mFramesAccumulated++;
+                    // Must be a multiple of the period or the calculation will not be accurate.
+                    if (mFramesAccumulated == mSinePeriod) {
+                        const double coefficient = 0.1;
+                        double phaseOffset = 0.0;
+                        double magnitude = calculateMagnitude(&phaseOffset);
+                        // One pole averaging filter.
+                        setMagnitude((mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient));
+
+                        mMeanSquareNoise = mSumSquareNoise * mInverseSinePeriod;
+                        mMeanSquareSignal = mSumSquareSignal * mInverseSinePeriod;
+                        resetAccumulator();
+
+                        if (abs(phaseOffset) > kMaxPhaseError) {
+                            result = ERROR_GLITCHES;
+                            onGlitchStart();
+                            ALOGD("phase glitch detected, phaseOffset = %g", phaseOffset);
+                        } else if (mMagnitude < mThreshold) {
+                            result = ERROR_GLITCHES;
+                            onGlitchStart();
+                            ALOGD("magnitude glitch detected, mMagnitude = %g", mMagnitude);
+                        }
+                    }
+                }
+                incrementInputPhase();
+            } break;
+
+            case STATE_GLITCHING: {
+                // Predict next sine value
+                mGlitchLength++;
+                double predicted = sinf(mInputPhase) * mMagnitude;
+                double diff = predicted - sample;
+                double absDiff = fabs(diff);
+                mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
+                if (absDiff < mScaledTolerance) { // close enough?
+                    // If we get a full sine period of non-glitch samples in a row then consider the glitch over.
+                    // We don't want to just consider a zero crossing the end of a glitch.
+                    if (mNonGlitchCount++ > mSinePeriod) {
+                        onGlitchEnd();
+                    }
+                } else {
+                    mNonGlitchCount = 0;
+                    if (mGlitchLength > (4 * mSinePeriod)) {
+                        relock();
+                    }
+                }
+                incrementInputPhase();
+            } break;
+
+            case NUM_STATES: // not a real state
+                break;
+        }
+
+        mFrameCounter++;
+
+        return result;
+    }
+
+    // advance and wrap phase
+    void incrementInputPhase() {
+        mInputPhase += mPhaseIncrement;
+        if (mInputPhase > M_PI) {
+            mInputPhase -= (2.0 * M_PI);
+        }
+    }
+
+    // advance and wrap phase
+    void incrementOutputPhase() {
+        mOutputPhase += mPhaseIncrement;
+        if (mOutputPhase > M_PI) {
+            mOutputPhase -= (2.0 * M_PI);
+        }
+    }
+
+    /**
+     * @param frameData upon return, contains the reference sine wave
+     * @param channelCount
+     */
+    result_code processOutputFrame(float *frameData, int channelCount) override {
+        float output = 0.0f;
+        // Output sine wave so we can measure it.
+        if (mState != STATE_IDLE) {
+            float sinOut = sinf(mOutputPhase);
+            incrementOutputPhase();
+            output = (sinOut * mOutputAmplitude)
+                     + (mWhiteNoise.nextRandomDouble() * kNoiseAmplitude);
+            // ALOGD("sin(%f) = %f, %f\n", mOutputPhase, sinOut,  mPhaseIncrement);
+        }
+        frameData[0] = output;
+        for (int i = 1; i < channelCount; i++) {
+            frameData[i] = 0.0f;
+        }
+        return RESULT_OK;
+    }
+
+    void onGlitchStart() {
+        mGlitchCount++;
+//        ALOGD("%5d: STARTED a glitch # %d", mFrameCounter, mGlitchCount);
+        mState = STATE_GLITCHING;
+        mGlitchLength = 1;
+        mNonGlitchCount = 0;
+    }
+
+    void onGlitchEnd() {
+//        ALOGD("%5d: ENDED a glitch # %d, length = %d", mFrameCounter, mGlitchCount, mGlitchLength);
+        mState = STATE_LOCKED;
+        resetAccumulator();
+    }
+
+    // reset the sine wave detector
+    void resetAccumulator() {
+        mFramesAccumulated = 0;
+        mSinAccumulator = 0.0;
+        mCosAccumulator = 0.0;
+        mSumSquareSignal = 0.0;
+        mSumSquareNoise = 0.0;
+    }
+
+    void relock() {
+//        ALOGD("relock: %d because of a very long %d glitch", mFrameCounter, mGlitchLength);
+        mState = STATE_WAITING_FOR_LOCK;
+        resetAccumulator();
+    }
+
+    void reset() override {
+        LoopbackProcessor::reset();
+        mState = STATE_IDLE;
+        mDownCounter = IDLE_FRAME_COUNT;
+        resetAccumulator();
+    }
+
+    void prepareToTest() override {
+        LoopbackProcessor::prepareToTest();
+        mSinePeriod = getSampleRate() / kTargetGlitchFrequency;
+        mOutputPhase = 0.0f;
+        mInverseSinePeriod = 1.0 / mSinePeriod;
+        mPhaseIncrement = 2.0 * M_PI * mInverseSinePeriod;
+        mGlitchCount = 0;
+        mMaxGlitchDelta = 0.0;
+        for (int i = 0; i < NUM_STATES; i++) {
+            mStateFrameCounters[i] = 0;
+        }
+    }
+
+private:
+
+    // These must match the values in GlitchActivity.java
+    enum sine_state_t {
+        STATE_IDLE,               // beginning
+        STATE_IMMUNE,             // ignoring input, waiting fo HW to settle
+        STATE_WAITING_FOR_SIGNAL, // looking for a loud signal
+        STATE_WAITING_FOR_LOCK,   // trying to lock onto the phase of the sine
+        STATE_LOCKED,             // locked on the sine wave, looking for glitches
+        STATE_GLITCHING,           // locked on the sine wave but glitching
+        NUM_STATES
+    };
+
+    enum constants {
+        // Arbitrary durations, assuming 48000 Hz
+        IDLE_FRAME_COUNT = 48 * 100,
+        IMMUNE_FRAME_COUNT = 48 * 100,
+        PERIODS_NEEDED_FOR_LOCK = 8,
+        MIN_SNR_DB = 65
+    };
+
+    static constexpr float kNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
+    static constexpr int kTargetGlitchFrequency = 607;
+    static constexpr double kMaxPhaseError = M_PI * 0.05;
+
+    float   mTolerance = 0.10; // scaled from 0.0 to 1.0
+    double  mThreshold = 0.005;
+    int     mSinePeriod = 1; // this will be set before use
+    double  mInverseSinePeriod = 1.0;
+
+    int32_t mStateFrameCounters[NUM_STATES];
+
+    double  mPhaseIncrement = 0.0;
+    double  mInputPhase = 0.0;
+    double  mOutputPhase = 0.0;
+    double  mMagnitude = 0.0;
+    int32_t mFramesAccumulated = 0;
+    double  mSinAccumulator = 0.0;
+    double  mCosAccumulator = 0.0;
+    double  mMaxGlitchDelta = 0.0;
+    int32_t mGlitchCount = 0;
+    int32_t mNonGlitchCount = 0;
+    int32_t mGlitchLength = 0;
+    // This is used for processing every frame so we cache it here.
+    double  mScaledTolerance = 0.0;
+    int     mDownCounter = IDLE_FRAME_COUNT;
+    int32_t mFrameCounter = 0;
+    double  mOutputAmplitude = 0.75;
+
+    int32_t mForceGlitchDuration = 0; // if > 0 then force a glitch for debugging
+    int32_t mForceGlitchCounter = 4 * 48000; // count down and trigger at zero
+
+    // measure background noise continuously as a deviation from the expected signal
+    double  mSumSquareSignal = 0.0;
+    double  mSumSquareNoise = 0.0;
+    double  mMeanSquareSignal = 0.0;
+    double  mMeanSquareNoise = 0.0;
+
+    PeakDetector  mPeakFollower;
+
+    PseudoRandom  mWhiteNoise;
+
+    sine_state_t  mState = STATE_IDLE;
+};
+
+
+#endif //ANALYZER_GLITCH_ANALYZER_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/LatencyAnalyzer.h b/media/libaaudio/examples/loopback/src/analyzer/LatencyAnalyzer.h
new file mode 100644
index 0000000..e506791
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/analyzer/LatencyAnalyzer.h
@@ -0,0 +1,606 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tools for measuring latency and for detecting glitches.
+ * These classes are pure math and can be used with any audio system.
+ */
+
+#ifndef ANALYZER_LATENCY_ANALYZER_H
+#define ANALYZER_LATENCY_ANALYZER_H
+
+#include <algorithm>
+#include <assert.h>
+#include <cctype>
+#include <iomanip>
+#include <iostream>
+#include <math.h>
+#include <memory>
+#include <sstream>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <vector>
+
+#include "PeakDetector.h"
+#include "PseudoRandom.h"
+#include "RandomPulseGenerator.h"
+
+// This is used when the code is in Oboe.
+#ifndef ALOGD
+#define ALOGD printf
+#define ALOGE printf
+#define ALOGW printf
+#endif
+
+#define LOOPBACK_RESULT_TAG  "RESULT: "
+
+static constexpr int32_t kDefaultSampleRate = 48000;
+static constexpr int32_t kMillisPerSecond   = 1000;
+static constexpr int32_t kMaxLatencyMillis  = 700;  // arbitrary and generous
+static constexpr double  kMinimumConfidence = 0.2;
+
+struct LatencyReport {
+    int32_t latencyInFrames = 0.0;
+    double confidence = 0.0;
+
+    void reset() {
+        latencyInFrames = 0;
+        confidence = 0.0;
+    }
+};
+
+// Calculate a normalized cross correlation.
+static double calculateNormalizedCorrelation(const float *a,
+                                             const float *b,
+                                             int windowSize) {
+    double correlation = 0.0;
+    double sumProducts = 0.0;
+    double sumSquares = 0.0;
+
+    // Correlate a against b.
+    for (int i = 0; i < windowSize; i++) {
+        float s1 = a[i];
+        float s2 = b[i];
+        // Use a normalized cross-correlation.
+        sumProducts += s1 * s2;
+        sumSquares += ((s1 * s1) + (s2 * s2));
+    }
+
+    if (sumSquares >= 1.0e-9) {
+        correlation = 2.0 * sumProducts / sumSquares;
+    }
+    return correlation;
+}
+
+static double calculateRootMeanSquare(float *data, int32_t numSamples) {
+    double sum = 0.0;
+    for (int32_t i = 0; i < numSamples; i++) {
+        float sample = data[i];
+        sum += sample * sample;
+    }
+    return sqrt(sum / numSamples);
+}
+
+/**
+ * Monophonic recording with processing.
+ */
+class AudioRecording
+{
+public:
+
+    void allocate(int maxFrames) {
+        mData = std::make_unique<float[]>(maxFrames);
+        mMaxFrames = maxFrames;
+    }
+
+    // Write SHORT data from the first channel.
+    int32_t write(int16_t *inputData, int32_t inputChannelCount, int32_t numFrames) {
+        // stop at end of buffer
+        if ((mFrameCounter + numFrames) > mMaxFrames) {
+            numFrames = mMaxFrames - mFrameCounter;
+        }
+        for (int i = 0; i < numFrames; i++) {
+            mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
+        }
+        return numFrames;
+    }
+
+    // Write FLOAT data from the first channel.
+    int32_t write(float *inputData, int32_t inputChannelCount, int32_t numFrames) {
+        // stop at end of buffer
+        if ((mFrameCounter + numFrames) > mMaxFrames) {
+            numFrames = mMaxFrames - mFrameCounter;
+        }
+        for (int i = 0; i < numFrames; i++) {
+            mData[mFrameCounter++] = inputData[i * inputChannelCount];
+        }
+        return numFrames;
+    }
+
+    // Write FLOAT data from the first channel.
+    int32_t write(float sample) {
+        // stop at end of buffer
+        if (mFrameCounter < mMaxFrames) {
+            mData[mFrameCounter++] = sample;
+            return 1;
+        }
+        return 0;
+    }
+
+    void clear() {
+        mFrameCounter = 0;
+    }
+    int32_t size() const {
+        return mFrameCounter;
+    }
+
+    bool isFull() const {
+        return mFrameCounter >= mMaxFrames;
+    }
+
+    float *getData() const {
+        return mData.get();
+    }
+
+    void setSampleRate(int32_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    int32_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    /**
+     * Square the samples so they are all positive and so the peaks are emphasized.
+     */
+    void square() {
+        float *x = mData.get();
+        for (int i = 0; i < mFrameCounter; i++) {
+            x[i] *= x[i];
+        }
+    }
+
+    /**
+     * Amplify a signal so that the peak matches the specified target.
+     *
+     * @param target final max value
+     * @return gain applied to signal
+     */
+    float normalize(float target) {
+        float maxValue = 1.0e-9f;
+        for (int i = 0; i < mFrameCounter; i++) {
+            maxValue = std::max(maxValue, abs(mData[i]));
+        }
+        float gain = target / maxValue;
+        for (int i = 0; i < mFrameCounter; i++) {
+            mData[i] *= gain;
+        }
+        return gain;
+    }
+
+private:
+    std::unique_ptr<float[]> mData;
+    int32_t       mFrameCounter = 0;
+    int32_t       mMaxFrames = 0;
+    int32_t       mSampleRate = kDefaultSampleRate; // common default
+};
+
+static int measureLatencyFromPulse(AudioRecording &recorded,
+                                   AudioRecording &pulse,
+                                   LatencyReport *report) {
+
+    report->latencyInFrames = 0;
+    report->confidence = 0.0;
+
+    int numCorrelations = recorded.size() - pulse.size();
+    if (numCorrelations < 10) {
+        ALOGE("%s() recording too small = %d frames\n", __func__, recorded.size());
+        return -1;
+    }
+    std::unique_ptr<float[]> correlations= std::make_unique<float[]>(numCorrelations);
+
+    // Correlate pulse against the recorded data.
+    for (int i = 0; i < numCorrelations; i++) {
+        float correlation = (float) calculateNormalizedCorrelation(&recorded.getData()[i],
+                                                                   &pulse.getData()[0],
+                                                                   pulse.size());
+        correlations[i] = correlation;
+    }
+
+    // Find highest peak in correlation array.
+    float peakCorrelation = 0.0;
+    int peakIndex = -1;
+    for (int i = 0; i < numCorrelations; i++) {
+        float value = abs(correlations[i]);
+        if (value > peakCorrelation) {
+            peakCorrelation = value;
+            peakIndex = i;
+        }
+    }
+    if (peakIndex < 0) {
+        ALOGE("%s() no signal for correlation\n", __func__);
+        return -2;
+    }
+
+    report->latencyInFrames = peakIndex;
+    report->confidence = peakCorrelation;
+
+    return 0;
+}
+
+// ====================================================================================
+class LoopbackProcessor {
+public:
+    virtual ~LoopbackProcessor() = default;
+
+    enum result_code {
+        RESULT_OK = 0,
+        ERROR_NOISY = -99,
+        ERROR_VOLUME_TOO_LOW,
+        ERROR_VOLUME_TOO_HIGH,
+        ERROR_CONFIDENCE,
+        ERROR_INVALID_STATE,
+        ERROR_GLITCHES,
+        ERROR_NO_LOCK
+    };
+
+    virtual void prepareToTest() {
+        reset();
+    }
+
+    virtual void reset() {
+        mResult = 0;
+        mResetCount++;
+    }
+
+    virtual result_code processInputFrame(float *frameData, int channelCount) = 0;
+    virtual result_code processOutputFrame(float *frameData, int channelCount) = 0;
+
+    void process(float *inputData, int inputChannelCount, int numInputFrames,
+                 float *outputData, int outputChannelCount, int numOutputFrames) {
+        int numBoth = std::min(numInputFrames, numOutputFrames);
+        // Process one frame at a time.
+        for (int i = 0; i < numBoth; i++) {
+            processInputFrame(inputData, inputChannelCount);
+            inputData += inputChannelCount;
+            processOutputFrame(outputData, outputChannelCount);
+            outputData += outputChannelCount;
+        }
+        // If there is more input than output.
+        for (int i = numBoth; i < numInputFrames; i++) {
+            processInputFrame(inputData, inputChannelCount);
+            inputData += inputChannelCount;
+        }
+        // If there is more output than input.
+        for (int i = numBoth; i < numOutputFrames; i++) {
+            processOutputFrame(outputData, outputChannelCount);
+            outputData += outputChannelCount;
+        }
+    }
+
+    virtual std::string analyze() = 0;
+
+    virtual void printStatus() {};
+
+    int32_t getResult() {
+        return mResult;
+    }
+
+    void setResult(int32_t result) {
+        mResult = result;
+    }
+
+    virtual bool isDone() {
+        return false;
+    }
+
+    virtual int save(const char *fileName) {
+        (void) fileName;
+        return -1;
+    }
+
+    virtual int load(const char *fileName) {
+        (void) fileName;
+        return -1;
+    }
+
+    virtual void setSampleRate(int32_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    int32_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    int32_t getResetCount() const {
+        return mResetCount;
+    }
+
+    /** Called when not enough input frames could be read after synchronization.
+     */
+    virtual void onInsufficientRead() {
+        reset();
+    }
+
+protected:
+    int32_t   mResetCount = 0;
+
+private:
+    int32_t mSampleRate = kDefaultSampleRate;
+    int32_t mResult = 0;
+};
+
+class LatencyAnalyzer : public LoopbackProcessor {
+public:
+
+    LatencyAnalyzer() : LoopbackProcessor() {}
+    virtual ~LatencyAnalyzer() = default;
+
+    virtual int32_t getProgress() const = 0;
+
+    virtual int getState() = 0;
+
+    // @return latency in frames
+    virtual int32_t getMeasuredLatency() = 0;
+
+    virtual double getMeasuredConfidence() = 0;
+
+    virtual double getBackgroundRMS() = 0;
+
+    virtual double getSignalRMS() = 0;
+
+};
+
+// ====================================================================================
+/**
+ * Measure latency given a loopback stream data.
+ * Use an encoded bit train as the sound source because it
+ * has an unambiguous correlation value.
+ * Uses a state machine to cycle through various stages.
+ *
+ */
+class PulseLatencyAnalyzer : public LatencyAnalyzer {
+public:
+
+    PulseLatencyAnalyzer() : LatencyAnalyzer() {
+        int32_t maxLatencyFrames = getSampleRate() * kMaxLatencyMillis / kMillisPerSecond;
+        int32_t numPulseBits = getSampleRate() * kPulseLengthMillis
+                / (kFramesPerEncodedBit * kMillisPerSecond);
+        int32_t  pulseLength = numPulseBits * kFramesPerEncodedBit;
+        mFramesToRecord = pulseLength + maxLatencyFrames;
+        mAudioRecording.allocate(mFramesToRecord);
+        mAudioRecording.setSampleRate(getSampleRate());
+        generateRandomPulse(pulseLength);
+    }
+
+    void generateRandomPulse(int32_t pulseLength) {
+        mPulse.allocate(pulseLength);
+        RandomPulseGenerator pulser(kFramesPerEncodedBit);
+        for (int i = 0; i < pulseLength; i++) {
+            mPulse.write(pulser.nextFloat());
+        }
+    }
+
+    int getState() override {
+        return mState;
+    }
+
+    void setSampleRate(int32_t sampleRate) override {
+        LoopbackProcessor::setSampleRate(sampleRate);
+        mAudioRecording.setSampleRate(sampleRate);
+    }
+
+    void reset() override {
+        LoopbackProcessor::reset();
+        mDownCounter = getSampleRate() / 2;
+        mLoopCounter = 0;
+
+        mPulseCursor = 0;
+        mBackgroundSumSquare = 0.0f;
+        mBackgroundSumCount = 0;
+        mBackgroundRMS = 0.0f;
+        mSignalRMS = 0.0f;
+
+        mState = STATE_MEASURE_BACKGROUND;
+        mAudioRecording.clear();
+        mLatencyReport.reset();
+    }
+
+    bool hasEnoughData() {
+        return mAudioRecording.isFull();
+    }
+
+    bool isDone() override {
+        return mState == STATE_DONE;
+    }
+
+    int32_t getProgress() const override {
+        return mAudioRecording.size();
+    }
+
+    std::string analyze() override {
+        std::stringstream report;
+        report << "PulseLatencyAnalyzer ---------------\n";
+        report << LOOPBACK_RESULT_TAG "test.state             = "
+                << std::setw(8) << mState << "\n";
+        report << LOOPBACK_RESULT_TAG "test.state.name        = "
+                << convertStateToText(mState) << "\n";
+        report << LOOPBACK_RESULT_TAG "background.rms         = "
+                << std::setw(8) << mBackgroundRMS << "\n";
+
+        int32_t newResult = RESULT_OK;
+        if (mState != STATE_GOT_DATA) {
+            report << "WARNING - Bad state. Check volume on device.\n";
+            // setResult(ERROR_INVALID_STATE);
+        } else {
+            float gain = mAudioRecording.normalize(1.0f);
+            measureLatencyFromPulse(mAudioRecording,
+                                    mPulse,
+                                    &mLatencyReport);
+
+            if (mLatencyReport.confidence < kMinimumConfidence) {
+                report << "   ERROR - confidence too low!";
+                newResult = ERROR_CONFIDENCE;
+            } else {
+                mSignalRMS = calculateRootMeanSquare(
+                        &mAudioRecording.getData()[mLatencyReport.latencyInFrames], mPulse.size())
+                                / gain;
+            }
+            double latencyMillis = kMillisPerSecond * (double) mLatencyReport.latencyInFrames
+                                   / getSampleRate();
+            report << LOOPBACK_RESULT_TAG "latency.frames         = " << std::setw(8)
+                   << mLatencyReport.latencyInFrames << "\n";
+            report << LOOPBACK_RESULT_TAG "latency.msec           = " << std::setw(8)
+                   << latencyMillis << "\n";
+            report << LOOPBACK_RESULT_TAG "latency.confidence     = " << std::setw(8)
+                   << mLatencyReport.confidence << "\n";
+        }
+        mState = STATE_DONE;
+        if (getResult() == RESULT_OK) {
+            setResult(newResult);
+        }
+
+        return report.str();
+    }
+
+    int32_t getMeasuredLatency() override {
+        return mLatencyReport.latencyInFrames;
+    }
+
+    double getMeasuredConfidence() override {
+        return mLatencyReport.confidence;
+    }
+
+    double getBackgroundRMS() override {
+        return mBackgroundRMS;
+    }
+
+    double getSignalRMS() override {
+        return mSignalRMS;
+    }
+
+    void printStatus() override {
+        ALOGD("st = %d", mState);
+    }
+
+    result_code processInputFrame(float *frameData, int channelCount) override {
+        echo_state nextState = mState;
+        mLoopCounter++;
+
+        switch (mState) {
+            case STATE_MEASURE_BACKGROUND:
+                // Measure background RMS on channel 0
+                mBackgroundSumSquare += frameData[0] * frameData[0];
+                mBackgroundSumCount++;
+                mDownCounter--;
+                if (mDownCounter <= 0) {
+                    mBackgroundRMS = sqrtf(mBackgroundSumSquare / mBackgroundSumCount);
+                    nextState = STATE_IN_PULSE;
+                    mPulseCursor = 0;
+                }
+                break;
+
+            case STATE_IN_PULSE:
+                // Record input until the mAudioRecording is full.
+                mAudioRecording.write(frameData, channelCount, 1);
+                if (hasEnoughData()) {
+                    nextState = STATE_GOT_DATA;
+                }
+                break;
+
+            case STATE_GOT_DATA:
+            case STATE_DONE:
+            default:
+                break;
+        }
+
+        mState = nextState;
+        return RESULT_OK;
+    }
+
+    result_code processOutputFrame(float *frameData, int channelCount) override {
+        switch (mState) {
+            case STATE_IN_PULSE:
+                if (mPulseCursor < mPulse.size()) {
+                    float pulseSample = mPulse.getData()[mPulseCursor++];
+                    for (int i = 0; i < channelCount; i++) {
+                        frameData[i] = pulseSample;
+                    }
+                } else {
+                    for (int i = 0; i < channelCount; i++) {
+                        frameData[i] = 0;
+                    }
+                }
+                break;
+
+            case STATE_MEASURE_BACKGROUND:
+            case STATE_GOT_DATA:
+            case STATE_DONE:
+            default:
+                for (int i = 0; i < channelCount; i++) {
+                    frameData[i] = 0.0f; // silence
+                }
+                break;
+        }
+
+        return RESULT_OK;
+    }
+
+private:
+
+    enum echo_state {
+        STATE_MEASURE_BACKGROUND,
+        STATE_IN_PULSE,
+        STATE_GOT_DATA, // must match RoundTripLatencyActivity.java
+        STATE_DONE,
+    };
+
+    const char *convertStateToText(echo_state state) {
+        switch (state) {
+            case STATE_MEASURE_BACKGROUND:
+                return "INIT";
+            case STATE_IN_PULSE:
+                return "PULSE";
+            case STATE_GOT_DATA:
+                return "GOT_DATA";
+            case STATE_DONE:
+                return "DONE";
+        }
+        return "UNKNOWN";
+    }
+
+    int32_t         mDownCounter = 500;
+    int32_t         mLoopCounter = 0;
+    echo_state      mState = STATE_MEASURE_BACKGROUND;
+
+    static constexpr int32_t kFramesPerEncodedBit = 8; // multiple of 2
+    static constexpr int32_t kPulseLengthMillis = 500;
+
+    AudioRecording     mPulse;
+    int32_t            mPulseCursor = 0;
+
+    double             mBackgroundSumSquare = 0.0;
+    int32_t            mBackgroundSumCount = 0;
+    double             mBackgroundRMS = 0.0;
+    double             mSignalRMS = 0.0;
+    int32_t            mFramesToRecord = 0;
+
+    AudioRecording     mAudioRecording; // contains only the input after starting the pulse
+    LatencyReport      mLatencyReport;
+};
+
+#endif // ANALYZER_LATENCY_ANALYZER_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/ManchesterEncoder.h b/media/libaaudio/examples/loopback/src/analyzer/ManchesterEncoder.h
new file mode 100644
index 0000000..0a4bd5b
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/analyzer/ManchesterEncoder.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_MANCHESTER_ENCODER_H
+#define ANALYZER_MANCHESTER_ENCODER_H
+
+#include <cstdint>
+
+/**
+ * Encode bytes using Manchester Coding scheme.
+ *
+ * Manchester Code is self clocking.
+ * There is a transition in the middle of every bit.
+ * Zero is high then low.
+ * One is low then high.
+ *
+ * This avoids having long DC sections that would droop when
+ * passed though analog circuits with AC coupling.
+ *
+ * IEEE 802.3 compatible.
+ */
+
+class ManchesterEncoder {
+public:
+    ManchesterEncoder(int samplesPerPulse)
+            : mSamplesPerPulse(samplesPerPulse)
+            , mSamplesPerPulseHalf(samplesPerPulse / 2)
+            , mCursor(samplesPerPulse) {
+    }
+
+    virtual ~ManchesterEncoder() = default;
+
+    /**
+     * This will be called when the next byte is needed.
+     * @return
+     */
+    virtual uint8_t onNextByte() = 0;
+
+    /**
+     * Generate the next floating point sample.
+     * @return
+     */
+    virtual float nextFloat() {
+        advanceSample();
+        if (mCurrentBit) {
+            return (mCursor < mSamplesPerPulseHalf) ? -1.0f : 1.0f; // one
+        } else {
+            return (mCursor < mSamplesPerPulseHalf) ? 1.0f : -1.0f; // zero
+        }
+    }
+
+protected:
+    /**
+     * This will be called when a new bit is ready to be encoded.
+     * It can be used to prepare the encoded samples.
+     * @param current
+     */
+    virtual void onNextBit(bool /* current */) {};
+
+    void advanceSample() {
+        // Are we ready for a new bit?
+        if (++mCursor >= mSamplesPerPulse) {
+            mCursor = 0;
+            if (mBitsLeft == 0) {
+                mCurrentByte = onNextByte();
+                mBitsLeft = 8;
+            }
+            --mBitsLeft;
+            mCurrentBit = (mCurrentByte >> mBitsLeft) & 1;
+            onNextBit(mCurrentBit);
+        }
+    }
+
+    bool getCurrentBit() {
+        return mCurrentBit;
+    }
+
+    const int mSamplesPerPulse;
+    const int mSamplesPerPulseHalf;
+    int       mCursor;
+    int       mBitsLeft = 0;
+    uint8_t   mCurrentByte = 0;
+    bool      mCurrentBit = false;
+};
+#endif //ANALYZER_MANCHESTER_ENCODER_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/PeakDetector.h b/media/libaaudio/examples/loopback/src/analyzer/PeakDetector.h
new file mode 100644
index 0000000..4b3b4e7
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/analyzer/PeakDetector.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_PEAK_DETECTOR_H
+#define ANALYZER_PEAK_DETECTOR_H
+
+#include <math.h>
+
+/**
+ * Measure a peak envelope by rising with the peaks,
+ * and decaying exponentially after each peak.
+ * The absolute value of the input signal is used.
+ */
+class PeakDetector {
+public:
+
+    void reset() {
+        mLevel = 0.0;
+    }
+
+    double process(double input) {
+        mLevel *= mDecay; // exponential decay
+        input = fabs(input);
+        // never fall below the input signal
+        if (input > mLevel) {
+            mLevel = input;
+        }
+        return mLevel;
+    }
+
+    double getLevel() const {
+        return mLevel;
+    }
+
+    double getDecay() const {
+        return mDecay;
+    }
+
+    /**
+     * Multiply the level by this amount on every iteration.
+     * This provides an exponential decay curve.
+     * A value just under 1.0 is best, for example, 0.99;
+     * @param decay scale level for each input
+     */
+    void setDecay(double decay) {
+        mDecay = decay;
+    }
+
+private:
+    static constexpr double kDefaultDecay = 0.99f;
+
+    double mLevel = 0.0;
+    double mDecay = kDefaultDecay;
+};
+#endif //ANALYZER_PEAK_DETECTOR_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/PseudoRandom.h b/media/libaaudio/examples/loopback/src/analyzer/PseudoRandom.h
new file mode 100644
index 0000000..1c4938c
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/analyzer/PseudoRandom.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANALYZER_PSEUDORANDOM_H
+#define ANALYZER_PSEUDORANDOM_H
+
+#include <cctype>
+
+class PseudoRandom {
+public:
+    PseudoRandom(int64_t seed = 99887766)
+            :    mSeed(seed)
+    {}
+
+    /**
+     * Returns the next random double from -1.0 to 1.0
+     *
+     * @return value from -1.0 to 1.0
+     */
+    double nextRandomDouble() {
+        return nextRandomInteger() * (0.5 / (((int32_t)1) << 30));
+    }
+
+    /** Calculate random 32 bit number using linear-congruential method
+     * with known real-time performance.
+     */
+    int32_t nextRandomInteger() {
+#if __has_builtin(__builtin_mul_overflow) && __has_builtin(__builtin_add_overflow)
+        int64_t prod;
+        // Use values for 64-bit sequence from MMIX by Donald Knuth.
+        __builtin_mul_overflow(mSeed, (int64_t)6364136223846793005, &prod);
+        __builtin_add_overflow(prod, (int64_t)1442695040888963407, &mSeed);
+#else
+        mSeed = (mSeed * (int64_t)6364136223846793005) + (int64_t)1442695040888963407;
+#endif
+        return (int32_t) (mSeed >> 32); // The higher bits have a longer sequence.
+    }
+
+private:
+    int64_t mSeed;
+};
+
+#endif //ANALYZER_PSEUDORANDOM_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/RandomPulseGenerator.h b/media/libaaudio/examples/loopback/src/analyzer/RandomPulseGenerator.h
new file mode 100644
index 0000000..030050b
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/analyzer/RandomPulseGenerator.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_RANDOM_PULSE_GENERATOR_H
+#define ANALYZER_RANDOM_PULSE_GENERATOR_H
+
+#include <stdlib.h>
+#include "RoundedManchesterEncoder.h"
+
+/**
+ * Encode random ones and zeros using Manchester Code per IEEE 802.3.
+ */
+class RandomPulseGenerator : public RoundedManchesterEncoder {
+public:
+    RandomPulseGenerator(int samplesPerPulse)
+    : RoundedManchesterEncoder(samplesPerPulse) {
+    }
+
+    virtual ~RandomPulseGenerator() = default;
+
+    /**
+     * This will be called when the next byte is needed.
+     * @return random byte
+     */
+    uint8_t onNextByte() override {
+        return static_cast<uint8_t>(rand());
+    }
+};
+
+#endif //ANALYZER_RANDOM_PULSE_GENERATOR_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/RoundedManchesterEncoder.h b/media/libaaudio/examples/loopback/src/analyzer/RoundedManchesterEncoder.h
new file mode 100644
index 0000000..f2eba84
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/analyzer/RoundedManchesterEncoder.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
+#define ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
+
+#include <math.h>
+#include <memory.h>
+#include <stdlib.h>
+#include "ManchesterEncoder.h"
+
+/**
+ * Encode bytes using Manchester Code.
+ * Round the edges using a half cosine to reduce ringing caused by a hard edge.
+ */
+
+class RoundedManchesterEncoder : public ManchesterEncoder {
+public:
+    RoundedManchesterEncoder(int samplesPerPulse)
+            : ManchesterEncoder(samplesPerPulse) {
+        int rampSize = samplesPerPulse / 4;
+        mZeroAfterZero = std::make_unique<float[]>(samplesPerPulse);
+        mZeroAfterOne = std::make_unique<float[]>(samplesPerPulse);
+
+        int sampleIndex = 0;
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            float phase = (rampIndex + 1) * M_PI / rampSize;
+            float sample = -cosf(phase);
+            mZeroAfterZero[sampleIndex] = sample;
+            mZeroAfterOne[sampleIndex] = 1.0f;
+            sampleIndex++;
+        }
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            mZeroAfterZero[sampleIndex] = 1.0f;
+            mZeroAfterOne[sampleIndex] = 1.0f;
+            sampleIndex++;
+        }
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            float phase = (rampIndex + 1) * M_PI / rampSize;
+            float sample = cosf(phase);
+            mZeroAfterZero[sampleIndex] = sample;
+            mZeroAfterOne[sampleIndex] = sample;
+            sampleIndex++;
+        }
+        for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
+            mZeroAfterZero[sampleIndex] = -1.0f;
+            mZeroAfterOne[sampleIndex] = -1.0f;
+            sampleIndex++;
+        }
+    }
+
+    void onNextBit(bool current) override {
+        // Do we need to use the rounded edge?
+        mCurrentSamples = (current ^ mPreviousBit)
+                          ? mZeroAfterOne.get()
+                          : mZeroAfterZero.get();
+        mPreviousBit = current;
+    }
+
+    float nextFloat() override {
+        advanceSample();
+        float output = mCurrentSamples[mCursor];
+        if (getCurrentBit()) output = -output;
+        return output;
+    }
+
+private:
+
+    bool mPreviousBit = false;
+    float *mCurrentSamples = nullptr;
+    std::unique_ptr<float[]> mZeroAfterZero;
+    std::unique_ptr<float[]> mZeroAfterOne;
+};
+
+#endif //ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 49d921f..0d2ec70 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -20,6 +20,8 @@
 #include <assert.h>
 #include <cctype>
 #include <errno.h>
+#include <iomanip>
+#include <iostream>
 #include <math.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -33,7 +35,9 @@
 #include "AAudioSimplePlayer.h"
 #include "AAudioSimpleRecorder.h"
 #include "AAudioExampleUtils.h"
-#include "LoopbackAnalyzer.h"
+
+#include "analyzer/GlitchAnalyzer.h"
+#include "analyzer/LatencyAnalyzer.h"
 #include "../../utils/AAudioExampleUtils.h"
 
 // V0.4.00 = rectify and low-pass filter the echos, auto-correlate entire echo
@@ -41,7 +45,8 @@
 //           fix -n option to set output buffer for -tm
 //           plot first glitch
 // V0.4.02 = allow -n0 for minimal buffer size
-#define APP_VERSION             "0.4.02"
+// V0.5.00 = use latency analyzer from OboeTester, uses random noise for latency
+#define APP_VERSION             "0.5.00"
 
 // Tag for machine readable results as property = value pairs
 #define RESULT_TAG              "RESULT: "
@@ -57,6 +62,20 @@
 constexpr int kDefaultHangTimeMillis = 50;
 constexpr int kMaxGlitchEventsToSave = 32;
 
+static void printAudioScope(float sample) {
+    const int maxStars = 80; // arbitrary, fits on one line
+    char c = '*';
+    if (sample < -1.0) {
+        sample = -1.0;
+        c = '$';
+    } else if (sample > 1.0) {
+        sample = 1.0;
+        c = '$';
+    }
+    int numSpaces = (int) (((sample + 1.0) * 0.5) * maxStars);
+    printf("%*c%c\n", numSpaces, ' ', c);
+}
+
 struct LoopbackData {
     AAudioStream      *inputStream = nullptr;
     AAudioStream      *outputStream = nullptr;
@@ -83,8 +102,8 @@
     aaudio_result_t    inputError = AAUDIO_OK;
     aaudio_result_t    outputError = AAUDIO_OK;
 
-    SineAnalyzer       sineAnalyzer;
-    EchoAnalyzer       echoAnalyzer;
+    GlitchAnalyzer     sineAnalyzer;
+    PulseLatencyAnalyzer echoAnalyzer;
     AudioRecording     audioRecording;
     LoopbackProcessor *loopbackProcessor;
 
@@ -254,17 +273,18 @@
             }
 
             // Analyze the data.
-            LoopbackProcessor::process_result procResult = myData->loopbackProcessor->process(myData->inputFloatData,
+            myData->loopbackProcessor->process(myData->inputFloatData,
                                                myData->actualInputChannelCount,
+                                               numFrames,
                                                outputData,
                                                myData->actualOutputChannelCount,
                                                numFrames);
-
-            if (procResult == LoopbackProcessor::PROCESS_RESULT_GLITCH) {
-                if (myData->numGlitchEvents < kMaxGlitchEventsToSave) {
-                    myData->glitchFrames[myData->numGlitchEvents++] = myData->audioRecording.size();
-                }
-            }
+//
+//            if (procResult == LoopbackProcessor::PROCESS_RESULT_GLITCH) {
+//                if (myData->numGlitchEvents < kMaxGlitchEventsToSave) {
+//                    myData->glitchFrames[myData->numGlitchEvents++] = myData->audioRecording.size();
+//                }
+//            }
 
             // Save for later.
             myData->audioRecording.write(myData->inputFloatData,
@@ -283,8 +303,8 @@
 }
 
 static void MyErrorCallbackProc(
-        AAudioStream *stream __unused,
-        void *userData __unused,
+        AAudioStream * /* stream */,
+        void * userData,
         aaudio_result_t error) {
     printf("Error Callback, error: %d\n",(int)error);
     LoopbackData *myData = (LoopbackData *) userData;
@@ -305,8 +325,8 @@
     printf("          l for _LATENCY\n");
     printf("          p for _POWER_SAVING\n");
     printf("      -t{test}          select test mode\n");
-    printf("          m for sine magnitude\n");
-    printf("          e for echo latency (default)\n");
+    printf("          g for Glitch detection\n");
+    printf("          l for round trip Latency (default)\n");
     printf("          f for file latency, analyzes %s\n\n", FILENAME_ECHOS);
     printf("      -X  use EXCLUSIVE mode for input\n");
     printf("Example:  aaudio_loopback -n2 -pl -Pl -x\n");
@@ -333,20 +353,22 @@
 }
 
 enum {
-    TEST_SINE_MAGNITUDE = 0,
-    TEST_ECHO_LATENCY,
+    TEST_GLITCHES = 0,
+    TEST_LATENCY,
     TEST_FILE_LATENCY,
 };
 
 static int parseTestMode(char c) {
-    int testMode = TEST_ECHO_LATENCY;
+    int testMode = TEST_LATENCY;
     c = tolower(c);
     switch (c) {
-        case 'm':
-            testMode = TEST_SINE_MAGNITUDE;
+        case 'm': // deprecated
+        case 'g':
+            testMode = TEST_GLITCHES;
             break;
-        case 'e':
-            testMode = TEST_ECHO_LATENCY;
+        case 'e': // deprecated
+        case 'l':
+            testMode = TEST_LATENCY;
             break;
         case 'f':
             testMode = TEST_FILE_LATENCY;
@@ -408,9 +430,10 @@
     int32_t               actualSampleRate           = 0;
     int                   written                    = 0;
 
-    int                   testMode                   = TEST_ECHO_LATENCY;
+    int                   testMode                   = TEST_LATENCY;
     double                gain                       = 1.0;
     int                   hangTimeMillis             = 0;
+    std::string           report;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
@@ -488,22 +511,21 @@
     int32_t requestedOutputBursts = argParser.getNumberOfBursts();
 
     switch(testMode) {
-        case TEST_SINE_MAGNITUDE:
+        case TEST_GLITCHES:
             loopbackData.loopbackProcessor = &loopbackData.sineAnalyzer;
             break;
-        case TEST_ECHO_LATENCY:
-            loopbackData.echoAnalyzer.setGain(gain);
+        case TEST_LATENCY:
+            // TODO loopbackData.echoAnalyzer.setGain(gain);
             loopbackData.loopbackProcessor = &loopbackData.echoAnalyzer;
             break;
         case TEST_FILE_LATENCY: {
-            loopbackData.echoAnalyzer.setGain(gain);
-
+            // TODO loopbackData.echoAnalyzer.setGain(gain);
             loopbackData.loopbackProcessor = &loopbackData.echoAnalyzer;
             int read = loopbackData.loopbackProcessor->load(FILENAME_ECHOS);
             printf("main() read %d mono samples from %s on Android device, rate = %d\n",
                    read, FILENAME_ECHOS,
                    loopbackData.loopbackProcessor->getSampleRate());
-            loopbackData.loopbackProcessor->report();
+            std::cout << loopbackData.loopbackProcessor->analyze();
             goto report_result;
         }
             break;
@@ -557,7 +579,7 @@
         int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
         (void) AAudioStream_setBufferSizeInFrames(inputStream, actualCapacity);
 
-        if (testMode == TEST_SINE_MAGNITUDE
+        if (testMode == TEST_GLITCHES
                 && requestedOutputBursts == AAUDIO_UNSPECIFIED) {
             result = AAudioStream_setBufferSizeInFrames(outputStream, actualCapacity);
             if (result < 0) {
@@ -594,10 +616,10 @@
     loopbackData.inputFloatData = new float[loopbackData.inputFramesMaximum *
                                               loopbackData.actualInputChannelCount]{};
 
-    loopbackData.loopbackProcessor->reset();
-
     loopbackData.hangTimeMillis = hangTimeMillis;
 
+    loopbackData.loopbackProcessor->prepareToTest();
+
     // Start OUTPUT first so INPUT does not overflow.
     result = player.start();
     if (result != AAUDIO_OK) {
@@ -669,7 +691,8 @@
 
     printf("input error = %d = %s\n",
            loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
-
+/*
+    // TODO Restore this code some day if we want to save files.
     written = loopbackData.loopbackProcessor->save(FILENAME_ECHOS);
     if (written > 0) {
         printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
@@ -681,9 +704,9 @@
         printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
                written, FILENAME_ALL);
     }
-
+*/
     if (loopbackData.inputError == AAUDIO_OK) {
-        if (testMode == TEST_SINE_MAGNITUDE) {
+        if (testMode == TEST_GLITCHES) {
             if (loopbackData.numGlitchEvents > 0) {
                 // Graph around the first glitch if there is one.
                 const int32_t start = loopbackData.glitchFrames[0] - 8;
@@ -697,7 +720,8 @@
             }
         }
 
-        loopbackData.loopbackProcessor->report();
+        std::cout << "Please wait several seconds for analysis to complete.\n";
+        std::cout << loopbackData.loopbackProcessor->analyze();
     }
 
     {
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 2494313..a438c77 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -896,7 +896,6 @@
 {
     // previous and new IAudioRecord sequence numbers are used to detect track re-creation
     uint32_t oldSequence = 0;
-    uint32_t newSequence;
 
     Proxy::Buffer buffer;
     status_t status = NO_ERROR;
@@ -914,7 +913,7 @@
             // start of lock scope
             AutoMutex lock(mLock);
 
-            newSequence = mSequence;
+            uint32_t newSequence = mSequence;
             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
             if (status == DEAD_OBJECT) {
                 // re-create track, unless someone else has already done so
@@ -951,6 +950,7 @@
     audioBuffer->frameCount = buffer.mFrameCount;
     audioBuffer->size = buffer.mFrameCount * mFrameSize;
     audioBuffer->raw = buffer.mRaw;
+    audioBuffer->sequence = oldSequence;
     if (nonContig != NULL) {
         *nonContig = buffer.mNonContig;
     }
@@ -971,6 +971,12 @@
     buffer.mRaw = audioBuffer->raw;
 
     AutoMutex lock(mLock);
+    if (audioBuffer->sequence != mSequence) {
+        // This Buffer came from a different IAudioRecord instance, so ignore the releaseBuffer
+        ALOGD("%s is no-op due to IAudioRecord sequence mismatch %u != %u",
+                __func__, audioBuffer->sequence, mSequence);
+        return;
+    }
     mInOverrun = false;
     mProxy->releaseBuffer(&buffer);
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index ad39abe..941cf54 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1491,7 +1491,14 @@
             }
         }
     }
-    ALOGE("invalid attributes %s when converting to stream",  toString(attr).c_str());
+    switch (attr.usage) {
+        case AUDIO_USAGE_VIRTUAL_SOURCE:
+            // virtual source is not expected to have an associated product strategy
+            break;
+        default:
+            ALOGE("invalid attributes %s when converting to stream",  toString(attr).c_str());
+            break;
+    }
     return AUDIO_STREAM_MUSIC;
 }
 
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index db90e6a..b510378 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -92,6 +92,11 @@
             int8_t*     i8;         // unsigned 8-bit, offset by 0x80
                                     // input to obtainBuffer(): unused, output: pointer to buffer
         };
+
+        uint32_t    sequence;       // IAudioRecord instance sequence number, as of obtainBuffer().
+                                    // It is set by obtainBuffer() and confirmed by releaseBuffer().
+                                    // Not "user-serviceable".
+                                    // TODO Consider sp<IMemory> instead, or in addition to this.
     };
 
     /* As a convenience, if a callback is supplied, a handler thread
@@ -420,14 +425,17 @@
      *  frameCount  number of frames requested
      *  size        ignored
      *  raw         ignored
+     *  sequence    ignored
      * After error return:
      *  frameCount  0
      *  size        0
      *  raw         undefined
+     *  sequence    undefined
      * After successful return:
      *  frameCount  actual number of frames available, <= number requested
      *  size        actual number of bytes available
      *  raw         pointer to the buffer
+     *  sequence    IAudioRecord instance sequence number, as of obtainBuffer()
      */
 
             status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index f39eb0c..218249d 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -100,6 +100,7 @@
         {AUDIO_STREAM_ENFORCED_AUDIBLE, "enforced_audible"},
         {AUDIO_STREAM_DTMF, "dtmf"},
         {AUDIO_STREAM_TTS, "tts"},
+        {AUDIO_STREAM_ASSISTANT, "assistant"},
 };
 
 /** All input stream types which support effects.
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index aa54b82..fc037a6 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -312,6 +312,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
     MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
     MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ASSISTANT),
     MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
     MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
     TERMINATOR
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
index 4aceac5..a3c2f1a 100644
--- a/media/libmediametrics/MediaMetrics.cpp
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -31,28 +31,31 @@
 // ALL functions returning a char * give responsibility for the allocated buffer
 // to the caller. The caller is responsible to call free() on that pointer.
 //
+//
+
+using namespace android::mediametrics;
 
 // manage the overall record
 mediametrics_handle_t mediametrics_create(mediametricskey_t key) {
-    android::mediametrics::Item *item = android::mediametrics::Item::create(key);
+    Item *item = Item::create(key);
     return (mediametrics_handle_t) item;
 }
 
 void mediametrics_delete(mediametrics_handle_t handle) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return;
     delete item;
 }
 
 mediametricskey_t mediametrics_getKey(mediametrics_handle_t handle) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return NULL;
     return strdup(item->getKey().c_str());
 }
 
 // nuplayer, et al use it when acting as proxies
 void mediametrics_setUid(mediametrics_handle_t handle, uid_t uid) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->setUid(uid);
 }
 
@@ -61,31 +64,31 @@
 
 void mediametrics_setInt32(mediametrics_handle_t handle, attr_t attr,
                                 int32_t value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->setInt32(attr, value);
 }
 
 void mediametrics_setInt64(mediametrics_handle_t handle, attr_t attr,
                                 int64_t value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->setInt64(attr, value);
 }
 
 void mediametrics_setDouble(mediametrics_handle_t handle, attr_t attr,
                                  double value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->setDouble(attr, value);
 }
 
 void mediametrics_setRate(mediametrics_handle_t handle, attr_t attr,
                                int64_t count, int64_t duration) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->setRate(attr, count, duration);
 }
 
 void mediametrics_setCString(mediametrics_handle_t handle, attr_t attr,
                                  const char *value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->setCString(attr, value);
 }
 
@@ -94,25 +97,25 @@
 
 void mediametrics_addInt32(mediametrics_handle_t handle, attr_t attr,
                                 int32_t value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->addInt32(attr, value);
 }
 
 void mediametrics_addInt64(mediametrics_handle_t handle, attr_t attr,
                                 int64_t value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->addInt64(attr, value);
 }
 
 void mediametrics_addDouble(mediametrics_handle_t handle, attr_t attr,
                                  double value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->addDouble(attr, value);
 }
 
 void mediametrics_addRate(mediametrics_handle_t handle, attr_t attr,
                                int64_t count, int64_t duration) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item != NULL) item->addRate(attr, count, duration);
 }
 
@@ -123,28 +126,28 @@
 
 bool mediametrics_getInt32(mediametrics_handle_t handle, attr_t attr,
                                 int32_t * value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return false;
     return item->getInt32(attr, value);
 }
 
 bool mediametrics_getInt64(mediametrics_handle_t handle, attr_t attr,
                                 int64_t * value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return false;
     return item->getInt64(attr, value);
 }
 
 bool mediametrics_getDouble(mediametrics_handle_t handle, attr_t attr,
                                  double *value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return false;
     return item->getDouble(attr, value);
 }
 
 bool mediametrics_getRate(mediametrics_handle_t handle, attr_t attr,
                                int64_t * count, int64_t * duration, double *rate) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return false;
     return item->getRate(attr, count, duration, rate);
 }
@@ -152,7 +155,7 @@
 // NB: caller owns the string that comes back, is responsible for freeing it
 bool mediametrics_getCString(mediametrics_handle_t handle, attr_t attr,
                                  char **value) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return false;
 
     return item->getCString(attr, value);
@@ -164,36 +167,36 @@
 }
 
 bool mediametrics_selfRecord(mediametrics_handle_t handle) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return false;
     return item->selfrecord();
 }
 
 mediametrics_handle_t mediametrics_dup(mediametrics_handle_t handle) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
-    if (item == NULL) return android::mediametrics::Item::convert(item);
-    return android::mediametrics::Item::convert(item->dup());
+    Item *item = (Item *) handle;
+    if (item == NULL) return Item::convert(item);
+    return Item::convert(item->dup());
 }
 
 const char *mediametrics_readable(mediametrics_handle_t handle) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return "";
     return item->toCString();
 }
 
 int32_t mediametrics_count(mediametrics_handle_t handle) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return 0;
     return item->count();
 }
 
 bool mediametrics_isEnabled() {
     // static, so doesn't need an instance
-    return android::mediametrics::Item::isEnabled();
+    return Item::isEnabled();
 }
 
 bool mediametrics_getAttributes(mediametrics_handle_t handle, char **buffer, size_t *length) {
-    android::mediametrics::Item *item = (android::mediametrics::Item *) handle;
+    Item *item = (Item *) handle;
     if (item == NULL) return false;
     return item->writeToByteString(buffer, length) == android::NO_ERROR;
 
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index 98158d2..62af0f7 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -63,51 +63,6 @@
     if (DEBUG_ALLOCATIONS) {
         ALOGD("Destroy  mediametrics::Item @ %p", this);
     }
-    clear();
-}
-
-void mediametrics::Item::clear() {
-
-    // clean allocated storage from key
-    mKey.clear();
-
-    // clean attributes
-    // contents of the attributes
-    for (size_t i = 0 ; i < mPropCount; i++ ) {
-        mProps[i].clear();
-    }
-    // the attribute records themselves
-    if (mProps != NULL) {
-        free(mProps);
-        mProps = NULL;
-    }
-    mPropSize = 0;
-    mPropCount = 0;
-
-    return;
-}
-
-// make a deep copy of myself
-mediametrics::Item *mediametrics::Item::dup() {
-    mediametrics::Item *dst = new mediametrics::Item(this->mKey);
-
-    if (dst != NULL) {
-        // key as part of constructor
-        dst->mPid = this->mPid;
-        dst->mUid = this->mUid;
-        dst->mPkgName = this->mPkgName;
-        dst->mPkgVersionCode = this->mPkgVersionCode;
-        dst->mTimestamp = this->mTimestamp;
-
-        // properties aka attributes
-        dst->growProps(this->mPropCount);
-        for(size_t i=0;i<mPropCount;i++) {
-            dst->mProps[i] = this->mProps[i];
-        }
-        dst->mPropCount = this->mPropCount;
-    }
-
-    return dst;
 }
 
 mediametrics::Item &mediametrics::Item::setTimestamp(nsecs_t ts) {
@@ -151,84 +106,12 @@
     return mPkgVersionCode;
 }
 
-
-// find the proper entry in the list
-size_t mediametrics::Item::findPropIndex(const char *name) const
-{
-    size_t i = 0;
-    for (; i < mPropCount; i++) {
-        if (mProps[i].isNamed(name)) break;
-    }
-    return i;
-}
-
-mediametrics::Item::Prop *mediametrics::Item::findProp(const char *name) const {
-    const size_t i = findPropIndex(name);
-    if (i < mPropCount) {
-        return &mProps[i];
-    }
-    return nullptr;
-}
-
-// consider this "find-or-allocate".
-// caller validates type and uses clearPropValue() accordingly
-mediametrics::Item::Prop *mediametrics::Item::allocateProp(const char *name) {
-    const size_t i = findPropIndex(name);
-    if (i < mPropCount) {
-        return &mProps[i]; // already have it, return
-    }
-
-    Prop *prop = allocateProp(); // get a new prop
-    if (prop == nullptr) return nullptr;
-    prop->setName(name);
-    return prop;
-}
-
-mediametrics::Item::Prop *mediametrics::Item::allocateProp() {
-    if (mPropCount == mPropSize && growProps() == false) {
-        ALOGE("%s: failed allocation for new properties", __func__);
-        return nullptr;
-    }
-    return &mProps[mPropCount++];
-}
-
-// used within the summarizers; return whether property existed
-bool mediametrics::Item::removeProp(const char *name) {
-    const size_t i = findPropIndex(name);
-    if (i < mPropCount) {
-        mProps[i].clear();
-        if (i != mPropCount-1) {
-            // in the middle, bring last one down to fill gap
-            mProps[i].swap(mProps[mPropCount-1]);
-        }
-        mPropCount--;
-        return true;
-    }
-    return false;
-}
-
 // remove indicated keys and their values
 // return value is # keys removed
 size_t mediametrics::Item::filter(size_t n, const char *attrs[]) {
     size_t zapped = 0;
     for (size_t i = 0; i < n; ++i) {
-        const char *name = attrs[i];
-        size_t j = findPropIndex(name);
-        if (j >= mPropCount) {
-            // not there
-            continue;
-        } else if (j + 1 == mPropCount) {
-            // last one, shorten
-            zapped++;
-            mProps[j].clear();
-            mPropCount--;
-        } else {
-            // in the middle, bring last one down and shorten
-            zapped++;
-            mProps[j].clear();
-            mProps[j] = mProps[mPropCount-1];
-            mPropCount--;
-        }
+        zapped += mProps.erase(attrs[i]);
     }
     return zapped;
 }
@@ -238,49 +121,17 @@
 size_t mediametrics::Item::filterNot(size_t n, const char *attrs[]) {
     std::set<std::string> check(attrs, attrs + n);
     size_t zapped = 0;
-    for (size_t j = 0; j < mPropCount;) {
-        if (check.find(mProps[j].getName()) != check.end()) {
-            ++j;
-            continue;
-        }
-        if (j + 1 == mPropCount) {
-            // last one, shorten
-            zapped++;
-            mProps[j].clear();
-            mPropCount--;
-            break;
+    for (auto it = mProps.begin(); it != mProps.end();) {
+        if (check.find(it->first) != check.end()) {
+            ++it;
         } else {
-            // in the middle, bring last one down and shorten
-            zapped++;
-            mProps[j].clear();
-            mProps[j] = mProps[mPropCount-1];
-            mPropCount--;
+           it = mProps.erase(it);
+           ++zapped;
         }
     }
     return zapped;
 }
 
-bool mediametrics::Item::growProps(int increment)
-{
-    if (increment <= 0) {
-        increment = kGrowProps;
-    }
-    int nsize = mPropSize + increment;
-    Prop *ni = (Prop *)realloc(mProps, sizeof(Prop) * nsize);
-
-    if (ni != NULL) {
-        for (int i = mPropSize; i < nsize; i++) {
-            new (&ni[i]) Prop(); // placement new
-        }
-        mProps = ni;
-        mPropSize = nsize;
-        return true;
-    } else {
-        ALOGW("mediametrics::Item::growProps fails");
-        return false;
-    }
-}
-
 // Parcel / serialize things for binder calls
 //
 
@@ -315,10 +166,11 @@
     if (count < 0) return BAD_VALUE;
     mPkgVersionCode = version;
     mTimestamp = timestamp;
-    for (int i = 0; i < count ; i++) {
-        Prop *prop = allocateProp();
-        status_t status = prop->readFromParcel(data);
+    for (int i = 0; i < count; i++) {
+        Prop prop;
+        status_t status = prop.readFromParcel(data);
         if (status != NO_ERROR) return status;
+        mProps[prop.getName()] = std::move(prop);
     }
     return NO_ERROR;
 }
@@ -349,9 +201,9 @@
         ?: data->writeInt64(mTimestamp);
     if (status != NO_ERROR) return status;
 
-    data->writeInt32((int32_t)mPropCount);
-    for (size_t i = 0 ; i < mPropCount; ++i) {
-        status = mProps[i].writeToParcel(data);
+    data->writeInt32((int32_t)mProps.size());
+    for (auto &prop : *this) {
+        status = prop.writeToParcel(data);
         if (status != NO_ERROR) return status;
     }
     return NO_ERROR;
@@ -376,10 +228,10 @@
 
     snprintf(buffer, sizeof(buffer), "[%d:%s:%d:%d:%lld:%s:%zu:",
             version, mKey.c_str(), mPid, mUid, (long long)mTimestamp,
-            mPkgName.c_str(), mPropCount);
+            mPkgName.c_str(), mProps.size());
     result.append(buffer);
-    for (size_t i = 0 ; i < mPropCount; ++i) {
-        mProps[i].toString(buffer, sizeof(buffer));
+    for (auto &prop : *this) {
+        prop.toStringBuffer(buffer, sizeof(buffer));
         result.append(buffer);
     }
     result.append("]");
@@ -390,7 +242,7 @@
 // calls the appropriate daemon
 bool mediametrics::Item::selfrecord() {
     ALOGD_IF(DEBUG_API, "%s: delivering %s", __func__, this->toString().c_str());
-    sp<IMediaMetricsService> svc = getInstance();
+    sp<IMediaMetricsService> svc = getService();
     if (svc != NULL) {
         status_t status = svc->submit(this);
         if (status != NO_ERROR) {
@@ -460,7 +312,7 @@
     */
 
     ALOGD_IF(DEBUG_API, "%s: delivering %zu bytes", __func__, size);
-    sp<IMediaMetricsService> svc = getInstance();
+    sp<IMediaMetricsService> svc = getService();
     if (svc != nullptr) {
         const status_t status = svc->submitBuffer(buffer, size);
         if (status != NO_ERROR) {
@@ -473,7 +325,7 @@
 }
 
 //static
-sp<IMediaMetricsService> BaseItem::getInstance() {
+sp<IMediaMetricsService> BaseItem::getService() {
     static const char *servicename = "media.metrics";
     static const bool enabled = isEnabled(); // singleton initialized
 
@@ -512,115 +364,6 @@
 }
 
 
-// merge the info from 'incoming' into this record.
-// we finish with a union of this+incoming and special handling for collisions
-bool mediametrics::Item::merge(mediametrics::Item *incoming) {
-
-    // if I don't have key or session id, take them from incoming
-    // 'this' should never be missing both of them...
-    if (mKey.empty()) {
-        mKey = incoming->mKey;
-    }
-
-    // for each attribute from 'incoming', resolve appropriately
-    int nattr = incoming->mPropCount;
-    for (int i = 0 ; i < nattr; i++ ) {
-        Prop *iprop = &incoming->mProps[i];
-        const char *p = iprop->mName;
-        size_t len = strlen(p);
-
-        // should ignore a zero length name...
-        if (len == 0) {
-            continue;
-        }
-
-        Prop *oprop = findProp(iprop->mName);
-
-        if (oprop == NULL) {
-            // no oprop, so we insert the new one
-            oprop = allocateProp(p);
-            if (oprop != NULL) {
-                *oprop = *iprop;
-            } else {
-                ALOGW("dropped property '%s'", iprop->mName);
-            }
-        } else {
-            *oprop = *iprop;
-        }
-    }
-
-    // not sure when we'd return false...
-    return true;
-}
-
-namespace {
-
-template <typename T>
-status_t insert(const T& val, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t size = sizeof(val);
-    if (*bufferpptr + size > bufferptrmax) {
-        ALOGE("%s: buffer exceeded with size %zu", __func__, size);
-        return BAD_VALUE;
-    }
-    memcpy(*bufferpptr, &val, size);
-    *bufferpptr += size;
-    return NO_ERROR;
-}
-
-template <>
-status_t insert(const char * const& val, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t size = strlen(val) + 1;
-    if (size > UINT16_MAX || *bufferpptr + size > bufferptrmax) {
-        ALOGE("%s: buffer exceeded with size %zu", __func__, size);
-        return BAD_VALUE;
-    }
-    memcpy(*bufferpptr, val, size);
-    *bufferpptr += size;
-    return NO_ERROR;
-}
-
-template <>
- __unused
-status_t insert(char * const& val, char **bufferpptr, char *bufferptrmax)
-{
-    return insert((const char *)val, bufferpptr, bufferptrmax);
-}
-
-template <typename T>
-status_t extract(T *val, const char **bufferpptr, const char *bufferptrmax)
-{
-    const size_t size = sizeof(*val);
-    if (*bufferpptr + size > bufferptrmax) {
-        ALOGE("%s: buffer exceeded with size %zu", __func__, size);
-        return BAD_VALUE;
-    }
-    memcpy(val, *bufferpptr, size);
-    *bufferpptr += size;
-    return NO_ERROR;
-}
-
-template <>
-status_t extract(char **val, const char **bufferpptr, const char *bufferptrmax)
-{
-    const char *ptr = *bufferpptr;
-    while (*ptr != 0) {
-        if (ptr >= bufferptrmax) {
-            ALOGE("%s: buffer exceeded", __func__);
-            return BAD_VALUE;
-        }
-        ++ptr;
-    }
-    const size_t size = (ptr - *bufferpptr) + 1;
-    *val = (char *)malloc(size);
-    memcpy(*val, *bufferpptr, size);
-    *bufferpptr += size;
-    return NO_ERROR;
-}
-
-} // namespace
-
 status_t mediametrics::Item::writeToByteString(char **pbuffer, size_t *plength) const
 {
     if (pbuffer == nullptr || plength == nullptr)
@@ -647,14 +390,14 @@
     uint32_t size = header_size
         + sizeof(uint32_t) // # properties
         ;
-    for (size_t i = 0 ; i < mPropCount; ++i) {
-        const size_t propSize = mProps[i].getByteStringSize();
+    for (auto &prop : *this) {
+        const size_t propSize = prop.getByteStringSize();
         if (propSize > UINT16_MAX) {
-            ALOGW("%s: prop %zu size %zu too large", __func__, i, propSize);
+            ALOGW("%s: prop %s size %zu too large", __func__, prop.getName(), propSize);
             return INVALID_OPERATION;
         }
         if (__builtin_add_overflow(size, propSize, &size)) {
-            ALOGW("%s: item size overflow at property %zu", __func__, i);
+            ALOGW("%s: item size overflow at property %s", __func__, prop.getName());
             return INVALID_OPERATION;
         }
     }
@@ -674,16 +417,16 @@
             || insert((int32_t)mPid, &filling, buildmax) != NO_ERROR
             || insert((int32_t)mUid, &filling, buildmax) != NO_ERROR
             || insert((int64_t)mTimestamp, &filling, buildmax) != NO_ERROR
-            || insert((uint32_t)mPropCount, &filling, buildmax) != NO_ERROR) {
+            || insert((uint32_t)mProps.size(), &filling, buildmax) != NO_ERROR) {
         ALOGE("%s:could not write header", __func__);  // shouldn't happen
         free(build);
         return INVALID_OPERATION;
     }
-    for (size_t i = 0 ; i < mPropCount; ++i) {
-        if (mProps[i].writeToByteString(&filling, buildmax) != NO_ERROR) {
+    for (auto &prop : *this) {
+        if (prop.writeToByteString(&filling, buildmax) != NO_ERROR) {
             free(build);
             // shouldn't happen
-            ALOGE("%s:could not write prop %zu of %zu", __func__, i, mPropCount);
+            ALOGE("%s:could not write prop %s", __func__, prop.getName());
             return INVALID_OPERATION;
         }
     }
@@ -710,7 +453,7 @@
     uint32_t header_size;
     uint16_t version;
     uint16_t key_size;
-    char *key = nullptr;
+    std::string key;
     int32_t pid;
     int32_t uid;
     int64_t timestamp;
@@ -724,14 +467,12 @@
             || extract(&uid, &read, readend) != NO_ERROR
             || extract(&timestamp, &read, readend) != NO_ERROR
             || size > length
-            || strlen(key) + 1 != key_size
+            || key.size() + 1 != key_size
             || header_size > size) {
-        free(key);
         ALOGW("%s: invalid header", __func__);
         return INVALID_OPERATION;
     }
-    mKey = key;
-    free(key);
+    mKey = std::move(key);
     const size_t pos = read - bufferptr;
     if (pos > header_size) {
         ALOGW("%s: invalid header pos:%zu > header_size:%u",
@@ -750,45 +491,16 @@
     mUid = uid;
     mTimestamp = timestamp;
     for (size_t i = 0; i < propCount; ++i) {
-        Prop *prop = allocateProp();
-        if (prop->readFromByteString(&read, readend) != NO_ERROR) {
+        Prop prop;
+        if (prop.readFromByteString(&read, readend) != NO_ERROR) {
             ALOGW("%s: cannot read prop %zu", __func__, i);
             return INVALID_OPERATION;
         }
+        mProps[prop.getName()] = std::move(prop);
     }
     return NO_ERROR;
 }
 
-status_t mediametrics::Item::Prop::writeToParcel(Parcel *data) const
-{
-   switch (mType) {
-   case kTypeInt32:
-       return data->writeCString(mName)
-               ?: data->writeInt32(mType)
-               ?: data->writeInt32(u.int32Value);
-   case kTypeInt64:
-       return data->writeCString(mName)
-               ?: data->writeInt32(mType)
-               ?: data->writeInt64(u.int64Value);
-   case kTypeDouble:
-       return data->writeCString(mName)
-               ?: data->writeInt32(mType)
-               ?: data->writeDouble(u.doubleValue);
-   case kTypeRate:
-       return data->writeCString(mName)
-               ?: data->writeInt32(mType)
-               ?: data->writeInt64(u.rate.first)
-               ?: data->writeInt64(u.rate.second);
-   case kTypeCString:
-       return data->writeCString(mName)
-               ?: data->writeInt32(mType)
-               ?: data->writeCString(u.CStringValue);
-   default:
-       ALOGE("%s: found bad type: %d, name %s", __func__, mType, mName);
-       return BAD_VALUE;
-   }
-}
-
 status_t mediametrics::Item::Prop::readFromParcel(const Parcel& data)
 {
     const char *key = data.readCString();
@@ -797,239 +509,99 @@
     status_t status = data.readInt32(&type);
     if (status != NO_ERROR) return status;
     switch (type) {
-    case kTypeInt32:
-        status = data.readInt32(&u.int32Value);
-        break;
-    case kTypeInt64:
-        status = data.readInt64(&u.int64Value);
-        break;
-    case kTypeDouble:
-        status = data.readDouble(&u.doubleValue);
-        break;
-    case kTypeCString: {
+    case mediametrics::kTypeInt32: {
+        int32_t value;
+        status = data.readInt32(&value);
+        if (status != NO_ERROR) return status;
+        mElem = value;
+    } break;
+    case mediametrics::kTypeInt64: {
+        int64_t value;
+        status = data.readInt64(&value);
+        if (status != NO_ERROR) return status;
+        mElem = value;
+    } break;
+    case mediametrics::kTypeDouble: {
+        double value;
+        status = data.readDouble(&value);
+        if (status != NO_ERROR) return status;
+        mElem = value;
+    } break;
+    case mediametrics::kTypeCString: {
         const char *s = data.readCString();
         if (s == nullptr) return BAD_VALUE;
-        set(s);
-        break;
-        }
-    case kTypeRate: {
+        mElem = s;
+    } break;
+    case mediametrics::kTypeRate: {
         std::pair<int64_t, int64_t> rate;
         status = data.readInt64(&rate.first)
                 ?: data.readInt64(&rate.second);
-        if (status == NO_ERROR) {
-            set(rate);
-        }
-        break;
-        }
+        if (status != NO_ERROR) return status;
+        mElem = rate;
+    } break;
+    case mediametrics::kTypeNone: {
+        mElem = std::monostate{};
+    } break;
     default:
-        ALOGE("%s: reading bad item type: %d", __func__, mType);
+        ALOGE("%s: reading bad item type: %d", __func__, type);
         return BAD_VALUE;
     }
-    if (status == NO_ERROR) {
-        setName(key);
-        mType = (Type)type;
-    }
-    return status;
-}
-
-void mediametrics::Item::Prop::toString(char *buffer, size_t length) const
-{
-    switch (mType) {
-    case kTypeInt32:
-        snprintf(buffer, length, "%s=%d:", mName, u.int32Value);
-        break;
-    case mediametrics::Item::kTypeInt64:
-        snprintf(buffer, length, "%s=%lld:", mName, (long long)u.int64Value);
-        break;
-    case mediametrics::Item::kTypeDouble:
-        snprintf(buffer, length, "%s=%e:", mName, u.doubleValue);
-        break;
-    case mediametrics::Item::kTypeRate:
-        snprintf(buffer, length, "%s=%lld/%lld:",
-                mName, (long long)u.rate.first, (long long)u.rate.second);
-        break;
-    case mediametrics::Item::kTypeCString:
-        // TODO sanitize string for ':' '='
-        snprintf(buffer, length, "%s=%s:", mName, u.CStringValue);
-        break;
-    default:
-        ALOGE("%s: bad item type: %d for %s", __func__, mType, mName);
-        if (length > 0) buffer[0] = 0;
-        break;
-    }
-}
-
-size_t mediametrics::Item::Prop::getByteStringSize() const
-{
-    const size_t header =
-        sizeof(uint16_t)      // length
-        + sizeof(uint8_t)     // type
-        + strlen(mName) + 1;  // mName + 0 termination
-    size_t payload = 0;
-    switch (mType) {
-    case mediametrics::Item::kTypeInt32:
-        payload = sizeof(u.int32Value);
-        break;
-    case mediametrics::Item::kTypeInt64:
-        payload = sizeof(u.int64Value);
-        break;
-    case mediametrics::Item::kTypeDouble:
-        payload = sizeof(u.doubleValue);
-        break;
-    case mediametrics::Item::kTypeRate:
-        payload = sizeof(u.rate.first) + sizeof(u.rate.second);
-        break;
-    case mediametrics::Item::kTypeCString:
-        payload = strlen(u.CStringValue) + 1;
-        break;
-    default:
-        ALOGE("%s: found bad prop type: %d, name %s",
-                __func__, mType, mName); // no payload computed
-        break;
-    }
-    return header + payload;
-}
-
-
-// TODO: fold into a template later.
-status_t BaseItem::writeToByteString(
-        const char *name, int32_t value, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t len = 2 + 1 + strlen(name) + 1 + sizeof(value);
-    if (len > UINT16_MAX) return BAD_VALUE;
-    return insert((uint16_t)len, bufferpptr, bufferptrmax)
-            ?: insert((uint8_t)kTypeInt32, bufferpptr, bufferptrmax)
-            ?: insert(name, bufferpptr, bufferptrmax)
-            ?: insert(value, bufferpptr, bufferptrmax);
-}
-
-status_t BaseItem::writeToByteString(
-        const char *name, int64_t value, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t len = 2 + 1 + strlen(name) + 1 + sizeof(value);
-    if (len > UINT16_MAX) return BAD_VALUE;
-    return insert((uint16_t)len, bufferpptr, bufferptrmax)
-            ?: insert((uint8_t)kTypeInt64, bufferpptr, bufferptrmax)
-            ?: insert(name, bufferpptr, bufferptrmax)
-            ?: insert(value, bufferpptr, bufferptrmax);
-}
-
-status_t BaseItem::writeToByteString(
-        const char *name, double value, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t len = 2 + 1 + strlen(name) + 1 + sizeof(value);
-    if (len > UINT16_MAX) return BAD_VALUE;
-    return insert((uint16_t)len, bufferpptr, bufferptrmax)
-            ?: insert((uint8_t)kTypeDouble, bufferpptr, bufferptrmax)
-            ?: insert(name, bufferpptr, bufferptrmax)
-            ?: insert(value, bufferpptr, bufferptrmax);
-}
-
-status_t BaseItem::writeToByteString(
-        const char *name, const std::pair<int64_t, int64_t> &value, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t len = 2 + 1 + strlen(name) + 1 + 8 + 8;
-    if (len > UINT16_MAX) return BAD_VALUE;
-    return insert((uint16_t)len, bufferpptr, bufferptrmax)
-            ?: insert((uint8_t)kTypeRate, bufferpptr, bufferptrmax)
-            ?: insert(name, bufferpptr, bufferptrmax)
-            ?: insert(value.first, bufferpptr, bufferptrmax)
-            ?: insert(value.second, bufferpptr, bufferptrmax);
-}
-
-status_t BaseItem::writeToByteString(
-        const char *name, char * const &value, char **bufferpptr, char *bufferptrmax)
-{
-    return writeToByteString(name, (const char *)value, bufferpptr, bufferptrmax);
-}
-
-status_t BaseItem::writeToByteString(
-        const char *name, const char * const &value, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t len = 2 + 1 + strlen(name) + 1 + strlen(value) + 1;
-    if (len > UINT16_MAX) return BAD_VALUE;
-    return insert((uint16_t)len, bufferpptr, bufferptrmax)
-            ?: insert((uint8_t)kTypeCString, bufferpptr, bufferptrmax)
-            ?: insert(name, bufferpptr, bufferptrmax)
-            ?: insert(value, bufferpptr, bufferptrmax);
-}
-
-
-status_t BaseItem::writeToByteString(
-        const char *name, const none_t &, char **bufferpptr, char *bufferptrmax)
-{
-    const size_t len = 2 + 1 + strlen(name) + 1;
-    if (len > UINT16_MAX) return BAD_VALUE;
-    return insert((uint16_t)len, bufferpptr, bufferptrmax)
-            ?: insert((uint8_t)kTypeCString, bufferpptr, bufferptrmax)
-            ?: insert(name, bufferpptr, bufferptrmax);
-}
-
-
-status_t mediametrics::Item::Prop::writeToByteString(
-        char **bufferpptr, char *bufferptrmax) const
-{
-    switch (mType) {
-    case kTypeInt32:
-        return BaseItem::writeToByteString(mName, u.int32Value, bufferpptr, bufferptrmax);
-    case kTypeInt64:
-        return BaseItem::writeToByteString(mName, u.int64Value, bufferpptr, bufferptrmax);
-    case kTypeDouble:
-        return BaseItem::writeToByteString(mName, u.doubleValue, bufferpptr, bufferptrmax);
-    case kTypeRate:
-        return BaseItem::writeToByteString(mName, u.rate, bufferpptr, bufferptrmax);
-    case kTypeCString:
-        return BaseItem::writeToByteString(mName, u.CStringValue, bufferpptr, bufferptrmax);
-    case kTypeNone:
-        return BaseItem::writeToByteString(mName, none_t{}, bufferpptr, bufferptrmax);
-    default:
-        ALOGE("%s: found bad prop type: %d, name %s",
-                __func__, mType, mName);  // no payload sent
-        return BAD_VALUE;
-    }
+    setName(key);
+    return NO_ERROR;
 }
 
 status_t mediametrics::Item::Prop::readFromByteString(
         const char **bufferpptr, const char *bufferptrmax)
 {
     uint16_t len;
-    char *name;
+    std::string name;
     uint8_t type;
     status_t status = extract(&len, bufferpptr, bufferptrmax)
             ?: extract(&type, bufferpptr, bufferptrmax)
             ?: extract(&name, bufferpptr, bufferptrmax);
     if (status != NO_ERROR) return status;
-    if (mName != nullptr) {
-        free(mName);
-    }
-    mName = name;
-    if (mType == kTypeCString) {
-        free(u.CStringValue);
-        u.CStringValue = nullptr;
-    }
-    mType = (Type)type;
-    switch (mType) {
-    case kTypeInt32:
-        return extract(&u.int32Value, bufferpptr, bufferptrmax);
-    case kTypeInt64:
-        return extract(&u.int64Value, bufferpptr, bufferptrmax);
-    case kTypeDouble:
-        return extract(&u.doubleValue, bufferpptr, bufferptrmax);
-    case kTypeRate:
-        return extract(&u.rate.first, bufferpptr, bufferptrmax)
-                ?: extract(&u.rate.second, bufferpptr, bufferptrmax);
-    case kTypeCString:
-        status = extract(&u.CStringValue, bufferpptr, bufferptrmax);
-        if (status != NO_ERROR) mType = kTypeNone;
-        return status;
-    case kTypeNone:
-        return NO_ERROR;
+    switch (type) {
+    case mediametrics::kTypeInt32: {
+        int32_t value;
+        status = extract(&value, bufferpptr, bufferptrmax);
+        if (status != NO_ERROR) return status;
+        mElem = value;
+    } break;
+    case mediametrics::kTypeInt64: {
+        int64_t value;
+        status = extract(&value, bufferpptr, bufferptrmax);
+        if (status != NO_ERROR) return status;
+        mElem = value;
+    } break;
+    case mediametrics::kTypeDouble: {
+        double value;
+        status = extract(&value, bufferpptr, bufferptrmax);
+        if (status != NO_ERROR) return status;
+        mElem = value;
+    } break;
+    case mediametrics::kTypeRate: {
+        std::pair<int64_t, int64_t> value;
+        status = extract(&value.first, bufferpptr, bufferptrmax)
+                ?: extract(&value.second, bufferpptr, bufferptrmax);
+        if (status != NO_ERROR) return status;
+        mElem = value;
+    } break;
+    case mediametrics::kTypeCString: {
+        std::string value;
+        status = extract(&value, bufferpptr, bufferptrmax);
+        if (status != NO_ERROR) return status;
+        mElem = std::move(value);
+    } break;
+    case mediametrics::kTypeNone: {
+        mElem = std::monostate{};
+    } break;
     default:
-        mType = kTypeNone;
         ALOGE("%s: found bad prop type: %d, name %s",
-                __func__, mType, mName);  // no payload sent
+                __func__, (int)type, mName.c_str());  // no payload sent
         return BAD_VALUE;
     }
+    mName = name;
+    return NO_ERROR;
 }
 
 } // namespace android::mediametrics
diff --git a/media/libmediametrics/include/MediaMetricsItem.h b/media/libmediametrics/include/MediaMetricsItem.h
index 5f33650..f844c82 100644
--- a/media/libmediametrics/include/MediaMetricsItem.h
+++ b/media/libmediametrics/include/MediaMetricsItem.h
@@ -14,15 +14,18 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_MEDIA_MEDIAANALYTICSITEM_H
-#define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
+#ifndef ANDROID_MEDIA_MEDIAMETRICSITEM_H
+#define ANDROID_MEDIA_MEDIAMETRICSITEM_H
 
 #include "MediaMetrics.h"
 
 #include <algorithm>
+#include <map>
 #include <string>
 #include <sys/types.h>
+#include <variant>
 
+#include <binder/Parcel.h>
 #include <cutils/properties.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
@@ -38,7 +41,12 @@
 /*
  * MediaMetrics Item
  *
- * Byte string format.
+ * The MediaMetrics Item allows get/set operations and recording to the service.
+ *
+ * The MediaMetrics LogItem is a faster logging variant. It allows set operations only,
+ * and then recording to the service.
+ *
+ * The Byte String format is as follows:
  *
  * For Java
  *  int64 corresponds to long
@@ -46,10 +54,16 @@
  *  uint16 corresponds to char
  *  uint8, int8 corresponds to byte
  *
- * Hence uint8 and uint32 values are limited to INT8_MAX and INT32_MAX.
+ * For items transmitted from Java, uint8 and uint32 values are limited
+ * to INT8_MAX and INT32_MAX.  This constrains the size of large items
+ * to 2GB, which is consistent with ByteBuffer max size. A native item
+ * can conceivably have size of 4GB.
  *
  * Physical layout of integers and doubles within the MediaMetrics byte string
- * is in Native / host order, which is nearly always little endian.
+ * is in Native / host order, which is usually little endian.
+ *
+ * Note that primitive data (ints, doubles) within a Byte String has
+ * no extra padding or alignment requirements, like ByteBuffer.
  *
  * -- begin of item
  * -- begin of header
@@ -57,7 +71,7 @@
  * (uint32) header size, including the item size and header size fields.
  * (uint16) version: exactly 0
  * (uint16) key size, that is key strlen + 1 for zero termination.
- * (int8)+ key string which is 0 terminated
+ * (int8)+ key, a string which is 0 terminated (UTF-8).
  * (int32) pid
  * (int32) uid
  * (int64) timestamp
@@ -72,10 +86,12 @@
  *       (int32)
  *       (int64)
  *       (double)
- *       (int8)+ for cstring, including 0 termination
+ *       (int8)+ for TYPE_CSTRING, including 0 termination
  *       (int64, int64) for rate
  * -- end body
  * -- end of item
+ *
+ * The Byte String format must match MediaMetrics.java.
  */
 
 namespace mediametrics {
@@ -90,6 +106,26 @@
     kTypeRate = 5,
 };
 
+/**
+ * The MediaMetrics Item has special Item properties,
+ * derived internally or through dedicated setters.
+ *
+ * For consistency we use the following keys to represent
+ * these special Item properties when in a generic Bundle
+ * or in a std::map.
+ *
+ * These values must match MediaMetrics.java
+ */
+static inline constexpr const char *BUNDLE_TOTAL_SIZE = "_totalSize";
+static inline constexpr const char *BUNDLE_HEADER_SIZE = "_headerSize";
+static inline constexpr const char *BUNDLE_VERSION = "_version";
+static inline constexpr const char *BUNDLE_KEY_SIZE = "_keySize";
+static inline constexpr const char *BUNDLE_KEY = "_key";
+static inline constexpr const char *BUNDLE_PID = "_pid";
+static inline constexpr const char *BUNDLE_UID = "_uid";
+static inline constexpr const char *BUNDLE_TIMESTAMP = "_timestamp";
+static inline constexpr const char *BUNDLE_PROPERTY_COUNT = "_propertyCount";
+
 template<size_t N>
 static inline bool startsWith(const std::string &s, const char (&comp)[N]) {
     return !strncmp(s.c_str(), comp, N-1);
@@ -106,8 +142,9 @@
     friend class MediaMetricsDeathNotifier; // for dropInstance
     // enabled 1, disabled 0
 public:
-    // are we collecting analytics data
+    // are we collecting metrics data
     static bool isEnabled();
+    static sp<IMediaMetricsService> getService();
 
 protected:
     static constexpr const char * const EnabledProperty = "media.metrics.enabled";
@@ -116,43 +153,253 @@
 
     // let's reuse a binder connection
     static sp<IMediaMetricsService> sMediaMetricsService;
-    static sp<IMediaMetricsService> getInstance();
+
     static void dropInstance();
     static bool submitBuffer(const char *buffer, size_t len);
 
-    static status_t writeToByteString(
-            const char *name, int32_t value, char **bufferpptr, char *bufferptrmax);
-    static status_t writeToByteString(
-            const char *name, int64_t value, char **bufferpptr, char *bufferptrmax);
-    static status_t writeToByteString(
-            const char *name, double value, char **bufferpptr, char *bufferptrmax);
-    static status_t writeToByteString(
-            const char *name, const std::pair<int64_t, int64_t> &value,
-            char **bufferpptr, char *bufferptrmax);
-    static status_t writeToByteString(
-            const char *name, char * const &value, char **bufferpptr, char *bufferptrmax);
-    static status_t writeToByteString(
-            const char *name, const char * const &value, char **bufferpptr, char *bufferptrmax);
-    struct none_t {}; // for kTypeNone
-    static status_t writeToByteString(
-            const char *name, const none_t &, char **bufferpptr, char *bufferptrmax);
+    template <typename T>
+    struct is_item_type {
+        static constexpr inline bool value =
+             std::is_same<T, int32_t>::value
+             || std::is_same<T, int64_t>::value
+             || std::is_same<T, double>::value
+             || std::is_same<T, std::pair<int64_t, int64_t>>:: value
+             || std::is_same<T, std::string>::value
+             || std::is_same<T, std::monostate>::value;
+    };
 
-    template<typename T>
-    static status_t sizeOfByteString(const char *name, const T& value) {
+    template <typename T>
+    struct get_type_of {
+        static_assert(is_item_type<T>::value);
+        static constexpr inline Type value =
+             std::is_same<T, int32_t>::value ? kTypeInt32
+             : std::is_same<T, int64_t>::value ? kTypeInt64
+             : std::is_same<T, double>::value ? kTypeDouble
+             : std::is_same<T, std::pair<int64_t, int64_t>>:: value ? kTypeRate
+             : std::is_same<T, std::string>::value ? kTypeCString
+             : std::is_same<T, std::monostate>::value ? kTypeNone
+         : kTypeNone;
+    };
+
+    template <typename T>
+    static size_t sizeOfByteString(const char *name, const T& value) {
+        static_assert(is_item_type<T>::value);
         return 2 + 1 + strlen(name) + 1 + sizeof(value);
     }
-    template<> // static
-    status_t sizeOfByteString(const char *name, char * const &value) {
-        return 2 + 1 + strlen(name) + 1 + strlen(value) + 1;
+    template <> // static
+    size_t sizeOfByteString(const char *name, const std::string& value) {
+        return 2 + 1 + strlen(name) + 1 + value.size() + 1;
     }
-    template<> // static
-    status_t sizeOfByteString(const char *name, const char * const &value) {
-        return 2 + 1 + strlen(name) + 1 + strlen(value) + 1;
-    }
-    template<> // static
-    status_t sizeOfByteString(const char *name, const none_t &) {
+    template <> // static
+    size_t sizeOfByteString(const char *name, const std::monostate&) {
          return 2 + 1 + strlen(name) + 1;
     }
+    // for speed
+    static size_t sizeOfByteString(const char *name, const char *value) {
+        return 2 + 1 + strlen(name) + 1 + strlen(value) + 1;
+    }
+
+    template <typename T>
+    static status_t insert(const T& val, char **bufferpptr, char *bufferptrmax) {
+        static_assert(std::is_trivially_constructible<T>::value);
+        const size_t size = sizeof(val);
+        if (*bufferpptr + size > bufferptrmax) {
+            ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+            return BAD_VALUE;
+        }
+        memcpy(*bufferpptr, &val, size);
+        *bufferpptr += size;
+        return NO_ERROR;
+    }
+    template <> // static
+    status_t insert(const std::string& val, char **bufferpptr, char *bufferptrmax) {
+        const size_t size = val.size() + 1;
+        if (size > UINT16_MAX || *bufferpptr + size > bufferptrmax) {
+            ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+            return BAD_VALUE;
+        }
+        memcpy(*bufferpptr, val.c_str(), size);
+        *bufferpptr += size;
+        return NO_ERROR;
+    }
+    template <> // static
+    status_t insert(const std::pair<int64_t, int64_t>& val,
+            char **bufferpptr, char *bufferptrmax) {
+        const size_t size = sizeof(val.first) + sizeof(val.second);
+        if (*bufferpptr + size > bufferptrmax) {
+            ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+            return BAD_VALUE;
+        }
+        memcpy(*bufferpptr, &val.first, sizeof(val.first));
+        memcpy(*bufferpptr + sizeof(val.first), &val.second, sizeof(val.second));
+        *bufferpptr += size;
+        return NO_ERROR;
+    }
+    template <> // static
+    status_t insert(const std::monostate&, char **, char *) {
+        return NO_ERROR;
+    }
+    // for speed
+    static status_t insert(const char *val, char **bufferpptr, char *bufferptrmax) {
+        const size_t size = strlen(val) + 1;
+        if (size > UINT16_MAX || *bufferpptr + size > bufferptrmax) {
+            ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+            return BAD_VALUE;
+        }
+        memcpy(*bufferpptr, val, size);
+        *bufferpptr += size;
+        return NO_ERROR;
+    }
+
+    template <typename T>
+    static status_t writeToByteString(
+            const char *name, const T& value, char **bufferpptr, char *bufferptrmax) {
+        static_assert(is_item_type<T>::value);
+        const size_t len = sizeOfByteString(name, value);
+        if (len > UINT16_MAX) return BAD_VALUE;
+        return insert((uint16_t)len, bufferpptr, bufferptrmax)
+                ?: insert((uint8_t)get_type_of<T>::value, bufferpptr, bufferptrmax)
+                ?: insert(name, bufferpptr, bufferptrmax)
+                ?: insert(value, bufferpptr, bufferptrmax);
+    }
+    // for speed
+    static status_t writeToByteString(
+            const char *name, const char *value, char **bufferpptr, char *bufferptrmax) {
+        const size_t len = sizeOfByteString(name, value);
+        if (len > UINT16_MAX) return BAD_VALUE;
+        return insert((uint16_t)len, bufferpptr, bufferptrmax)
+                ?: insert((uint8_t)kTypeCString, bufferpptr, bufferptrmax)
+                ?: insert(name, bufferpptr, bufferptrmax)
+                ?: insert(value, bufferpptr, bufferptrmax);
+    }
+
+    template <typename T>
+    static void toStringBuffer(
+            const char *name, const T& value, char *buffer, size_t length) = delete;
+    template <> // static
+    void toStringBuffer(
+            const char *name, const int32_t& value, char *buffer, size_t length) {
+        snprintf(buffer, length, "%s=%d:", name, value);
+    }
+    template <> // static
+    void toStringBuffer(
+            const char *name, const int64_t& value, char *buffer, size_t length) {
+        snprintf(buffer, length, "%s=%lld:", name, (long long)value);
+    }
+    template <> // static
+    void toStringBuffer(
+            const char *name, const double& value, char *buffer, size_t length) {
+        snprintf(buffer, length, "%s=%e:", name, value);
+    }
+    template <> // static
+    void toStringBuffer(
+            const char *name, const std::pair<int64_t, int64_t>& value,
+            char *buffer, size_t length) {
+        snprintf(buffer, length, "%s=%lld/%lld:",
+                name, (long long)value.first, (long long)value.second);
+    }
+    template <> // static
+    void toStringBuffer(
+            const char *name, const std::string& value, char *buffer, size_t length) {
+        // TODO sanitize string for ':' '='
+        snprintf(buffer, length, "%s=%s:", name, value.c_str());
+    }
+    template <> // static
+    void toStringBuffer(
+            const char *name, const std::monostate&, char *buffer, size_t length) {
+        snprintf(buffer, length, "%s=():", name);
+    }
+
+    template <typename T>
+    static status_t writeToParcel(
+            const char *name, const T& value, Parcel *parcel) = delete;
+    template <> // static
+    status_t writeToParcel(
+            const char *name, const int32_t& value, Parcel *parcel) {
+        return parcel->writeCString(name)
+               ?: parcel->writeInt32(get_type_of<int32_t>::value)
+               ?: parcel->writeInt32(value);
+    }
+    template <> // static
+    status_t writeToParcel(
+            const char *name, const int64_t& value, Parcel *parcel) {
+        return parcel->writeCString(name)
+               ?: parcel->writeInt32(get_type_of<int64_t>::value)
+               ?: parcel->writeInt64(value);
+    }
+    template <> // static
+    status_t writeToParcel(
+            const char *name, const double& value, Parcel *parcel) {
+        return parcel->writeCString(name)
+               ?: parcel->writeInt32(get_type_of<double>::value)
+               ?: parcel->writeDouble(value);
+    }
+    template <> // static
+    status_t writeToParcel(
+            const char *name, const std::pair<int64_t, int64_t>& value, Parcel *parcel) {
+        return parcel->writeCString(name)
+               ?: parcel->writeInt32(get_type_of< std::pair<int64_t, int64_t>>::value)
+               ?: parcel->writeInt64(value.first)
+               ?: parcel->writeInt64(value.second);
+    }
+    template <> // static
+    status_t writeToParcel(
+            const char *name, const std::string& value, Parcel *parcel) {
+        return parcel->writeCString(name)
+               ?: parcel->writeInt32(get_type_of<std::string>::value)
+               ?: parcel->writeCString(value.c_str());
+    }
+    template <> // static
+    status_t writeToParcel(
+            const char *name, const std::monostate&, Parcel *parcel) {
+        return parcel->writeCString(name)
+               ?: parcel->writeInt32(get_type_of<std::monostate>::value);
+    }
+
+    template <typename T>
+    static status_t extract(T *val, const char **bufferpptr, const char *bufferptrmax) {
+        static_assert(std::is_trivially_constructible<T>::value);
+        const size_t size = sizeof(*val);
+        if (*bufferpptr + size > bufferptrmax) {
+            ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+            return BAD_VALUE;
+        }
+        memcpy(val, *bufferpptr, size);
+        *bufferpptr += size;
+        return NO_ERROR;
+    }
+    template <> // static
+    status_t extract(std::string *val, const char **bufferpptr, const char *bufferptrmax) {
+        const char *ptr = *bufferpptr;
+        while (*ptr != 0) {
+            if (ptr >= bufferptrmax) {
+                ALOGE("%s: buffer exceeded", __func__);
+                return BAD_VALUE;
+            }
+            ++ptr;
+        }
+        const size_t size = (ptr - *bufferpptr) + 1;
+        *val = *bufferpptr;
+        *bufferpptr += size;
+        return NO_ERROR;
+    }
+    template <> // static
+    status_t extract(std::pair<int64_t, int64_t> *val,
+            const char **bufferpptr, const char *bufferptrmax) {
+        const size_t size = sizeof(val->first) + sizeof(val->second);
+        if (*bufferpptr + size > bufferptrmax) {
+            ALOGE("%s: buffer exceeded with size %zu", __func__, size);
+            return BAD_VALUE;
+        }
+        memcpy(&val->first, *bufferpptr, sizeof(val->first));
+        memcpy(&val->second, *bufferpptr + sizeof(val->first), sizeof(val->second));
+        *bufferpptr += size;
+        return NO_ERROR;
+    }
+    template <> // static
+    status_t extract(std::monostate *, const char **, const char *) {
+        return NO_ERROR;
+    }
 };
 
 /**
@@ -329,7 +576,7 @@
 };
 
 /**
- * MediaMetrics LogItem is a stack allocated media analytics item used for
+ * MediaMetrics LogItem is a stack allocated mediametrics item used for
  * fast logging.  It falls over to a malloc if needed.
  *
  * This is templated with a buffer size to allocate on the stack.
@@ -370,23 +617,8 @@
  * The Item is designed for the service as it has getters.
  */
 class Item : public mediametrics::BaseItem {
-    friend class MediaMetricsJNI;           // TODO: remove this access
-
 public:
 
-     // TODO: remove this duplicate definition when frameworks base is updated.
-            enum Type {
-                kTypeNone = 0,
-                kTypeInt32 = 1,
-                kTypeInt64 = 2,
-                kTypeDouble = 3,
-                kTypeCString = 4,
-                kTypeRate = 5,
-            };
-
-    static constexpr const char * const kKeyNone = "none";
-    static constexpr const char * const kKeyAny = "any";
-
         enum {
             PROTO_V0 = 0,
             PROTO_FIRST = PROTO_V0,
@@ -400,21 +632,14 @@
         : mKey(key) { }
     Item() = default;
 
-    Item(const Item&) = delete;
-    Item &operator=(const Item&) = delete;
-
     bool operator==(const Item& other) const {
-        if (mPropCount != other.mPropCount
-            || mPid != other.mPid
+        if (mPid != other.mPid
             || mUid != other.mUid
             || mPkgName != other.mPkgName
             || mPkgVersionCode != other.mPkgVersionCode
             || mKey != other.mKey
-            || mTimestamp != other.mTimestamp) return false;
-         for (size_t i = 0; i < mPropCount; ++i) {
-             Prop *p = other.findProp(mProps[i].getName());
-             if (p == nullptr || mProps[i] != *p) return false;
-         }
+            || mTimestamp != other.mTimestamp
+            || mProps != other.mProps) return false;
          return true;
     }
     bool operator!=(const Item& other) const {
@@ -435,9 +660,17 @@
         // access functions for the class
         ~Item();
 
-        // reset all contents, discarding any extra data
-        void clear();
-        Item *dup();
+    void clear() {
+        mPid = -1;
+        mUid = -1;
+        mPkgName.clear();
+        mPkgVersionCode = 0;
+        mTimestamp = 0;
+        mKey.clear();
+        mProps.clear();
+    }
+
+    Item *dup() const { return new Item(*this); }
 
     Item &setKey(const char *key) {
         mKey = key;
@@ -446,11 +679,11 @@
     const std::string& getKey() const { return mKey; }
 
     // # of properties in the record
-    size_t count() const { return mPropCount; }
+    size_t count() const { return mProps.size(); }
 
     template<typename S, typename T>
     Item &set(S key, T value) {
-        allocateProp(key)->set(value);
+        findOrAllocateProp(key).set(value);
         return *this;
     }
 
@@ -475,7 +708,7 @@
     // type-mismatch counts as "wasn't there".
     template<typename S, typename T>
     Item &add(S key, T value) {
-        allocateProp(key)->add(value);
+        findOrAllocateProp(key).add(value);
         return *this;
     }
 
@@ -497,7 +730,7 @@
     // NULL parameter value suppresses storage of value.
     template<typename S, typename T>
     bool get(S key, T *value) const {
-        Prop *prop = findProp(key);
+        const Prop *prop = findProp(key);
         return prop != nullptr && prop->get(value);
     }
 
@@ -526,9 +759,9 @@
     }
     // Caller owns the returned string
     bool getCString(const char *key, char **value) const {
-        const char *cs;
-        if (get(key, &cs)) {
-            *value = cs != nullptr ? strdup(cs) : nullptr;
+        std::string s;
+        if (get(key, &s)) {
+            *value = strdup(s.c_str());
             return true;
         }
         return false;
@@ -582,362 +815,192 @@
         const char *toCString();
         const char *toCString(int version);
 
-    protected:
-
-        // merge fields from arg into this
-        // with rules for first/last/add, etc
-        // XXX: document semantics and how they are indicated
-        // caller continues to own 'incoming'
-        bool merge(Item *incoming);
-
 private:
     // handle Parcel version 0
     int32_t writeToParcel0(Parcel *) const;
     int32_t readFromParcel0(const Parcel&);
 
-
-
-    // checks equality even with nullptr.
-    static bool stringEquals(const char *a, const char *b) {
-        if (a == nullptr) {
-            return b == nullptr;
-        } else {
-            return b != nullptr && strcmp(a, b) == 0;
-        }
-    }
-
 public:
 
     class Prop {
-    friend class MediaMetricsJNI;           // TODO: remove this access
     public:
+        using Elem = std::variant<
+                std::monostate,               // kTypeNone
+                int32_t,                      // kTypeInt32
+                int64_t,                      // kTypeInt64
+                double,                       // kTypeDouble
+                std::string,                  // kTypeCString
+                std::pair<int64_t, int64_t>   // kTypeRate
+                >;
+
         Prop() = default;
         Prop(const Prop& other) {
            *this = other;
         }
         Prop& operator=(const Prop& other) {
-            if (other.mName != nullptr) {
-                mName = strdup(other.mName);
-            } else {
-                mName = nullptr;
-            }
-            mType = other.mType;
-            switch (mType) {
-            case kTypeInt32:
-                u.int32Value = other.u.int32Value;
-                break;
-            case kTypeInt64:
-                u.int64Value = other.u.int64Value;
-                break;
-            case kTypeDouble:
-                u.doubleValue = other.u.doubleValue;
-                break;
-            case kTypeCString:
-                u.CStringValue = strdup(other.u.CStringValue);
-                break;
-            case kTypeRate:
-                u.rate = other.u.rate;
-                break;
-            case kTypeNone:
-                break;
-            default:
-                // abort?
-                break;
-            }
+            mName = other.mName;
+            mElem = other.mElem;
             return *this;
         }
+        Prop(Prop&& other) {
+            *this = std::move(other);
+        }
+        Prop& operator=(Prop&& other) {
+            mName = std::move(other.mName);
+            mElem = std::move(other.mElem);
+            return *this;
+        }
+
         bool operator==(const Prop& other) const {
-            if (!stringEquals(mName, other.mName)
-                    || mType != other.mType) return false;
-            switch (mType) {
-            case kTypeInt32:
-                return u.int32Value == other.u.int32Value;
-            case kTypeInt64:
-                return u.int64Value == other.u.int64Value;
-            case kTypeDouble:
-                return u.doubleValue == other.u.doubleValue;
-            case kTypeCString:
-                return stringEquals(u.CStringValue, other.u.CStringValue);
-            case kTypeRate:
-                return u.rate == other.u.rate;
-            case kTypeNone:
-            default:
-                return true;
-            }
+            return mName == other.mName && mElem == other.mElem;
         }
         bool operator!=(const Prop& other) const {
             return !(*this == other);
         }
 
         void clear() {
-            free(mName);
-            mName = nullptr;
-            clearValue();
+            mName.clear();
+            mElem = std::monostate{};
         }
         void clearValue() {
-            if (mType == kTypeCString) {
-                free(u.CStringValue);
-                u.CStringValue = nullptr;
-            }
-            mType = kTypeNone;
-        }
-
-        Type getType() const {
-            return mType;
+            mElem = std::monostate{};
         }
 
         const char *getName() const {
-            return mName;
+            return mName.c_str();
         }
 
         void swap(Prop& other) {
             std::swap(mName, other.mName);
-            std::swap(mType, other.mType);
-            std::swap(u, other.u);
+            std::swap(mElem, other.mElem);
         }
 
         void setName(const char *name) {
-            free(mName);
-            if (name != nullptr) {
-                mName = strdup(name);
-            } else {
-                mName = nullptr;
-            }
+            mName = name;
         }
 
         bool isNamed(const char *name) const {
-            return stringEquals(name, mName);
+            return mName == name;
         }
 
         template <typename T> void visit(T f) const {
-            switch (mType) {
-            case Item::kTypeInt32:
-                f(u.int32Value);
-                return;
-            case Item::kTypeInt64:
-                f(u.int64Value);
-                return;
-            case Item::kTypeDouble:
-                f(u.doubleValue);
-                return;
-            case Item::kTypeRate:
-                f(u.rate);
-                return;
-            case Item::kTypeCString:
-                f(u.CStringValue);
-                return;
-            default:
-                return;
+            std::visit(f, mElem);
+        }
+
+        template <typename T> bool get(T *value) const {
+            auto pval = std::get_if<T>(&mElem);
+            if (pval != nullptr) {
+                *value = *pval;
+                return true;
+            }
+            return false;
+        }
+
+        template <typename T> void set(const T& value) {
+            mElem = value;
+        }
+
+        template <typename T> void add(const T& value) {
+            auto pval = std::get_if<T>(&mElem);
+            if (pval != nullptr) {
+                *pval += value;
+            } else {
+                mElem = value;
             }
         }
 
-        template <typename T> bool get(T *value) const = delete;
-        template <>
-        bool get(int32_t *value) const {
-           if (mType != kTypeInt32) return false;
-           if (value != nullptr) *value = u.int32Value;
-           return true;
-        }
-        template <>
-        bool get(int64_t *value) const {
-           if (mType != kTypeInt64) return false;
-           if (value != nullptr) *value = u.int64Value;
-           return true;
-        }
-        template <>
-        bool get(double *value) const {
-           if (mType != kTypeDouble) return false;
-           if (value != nullptr) *value = u.doubleValue;
-           return true;
-        }
-        template <>
-        bool get(const char** value) const {
-            if (mType != kTypeCString) return false;
-            if (value != nullptr) *value = u.CStringValue;
-            return true;
-        }
-        template <>
-        bool get(std::string* value) const {
-            if (mType != kTypeCString) return false;
-            if (value != nullptr) *value = u.CStringValue;
-            return true;
-        }
-        template <>
-        bool get(std::pair<int64_t, int64_t> *value) const {
-           if (mType != kTypeRate) return false;
-           if (value != nullptr) {
-               *value = u.rate;
-           }
-           return true;
-        }
-
-        template <typename T> void set(const T& value) = delete;
-        template <>
-        void set(const int32_t& value) {
-            mType = kTypeInt32;
-            u.int32Value = value;
-        }
-        template <>
-        void set(const int64_t& value) {
-            mType = kTypeInt64;
-            u.int64Value = value;
-        }
-        template <>
-        void set(const double& value) {
-            mType = kTypeDouble;
-            u.doubleValue = value;
-        }
-        template <>
-        void set(const char* const& value) {
-            if (mType == kTypeCString) {
-                free(u.CStringValue);
+        template <> void add(const std::pair<int64_t, int64_t>& value) {
+            auto pval = std::get_if<std::pair<int64_t, int64_t>>(&mElem);
+            if (pval != nullptr) {
+                pval->first += value.first;
+                pval->second += value.second;
             } else {
-                mType = kTypeCString;
-            }
-            if (value == nullptr) {
-                u.CStringValue = nullptr;
-            } else {
-                size_t len = strlen(value);
-                if (len > UINT16_MAX - 1) {
-                    len = UINT16_MAX - 1;
-                }
-                u.CStringValue = (char *)malloc(len + 1);
-                strncpy(u.CStringValue, value, len);
-                u.CStringValue[len] = 0;
-            }
-        }
-        template <>
-        void set(const std::pair<int64_t, int64_t> &value) {
-            mType = kTypeRate;
-            u.rate = {value.first, value.second};
-        }
-
-        template <typename T> void add(const T& value) = delete;
-        template <>
-        void add(const int32_t& value) {
-            if (mType == kTypeInt32) {
-                u.int32Value += value;
-            } else {
-                mType = kTypeInt32;
-                u.int32Value = value;
-            }
-        }
-        template <>
-        void add(const int64_t& value) {
-            if (mType == kTypeInt64) {
-                u.int64Value += value;
-            } else {
-                mType = kTypeInt64;
-                u.int64Value = value;
-            }
-        }
-        template <>
-        void add(const double& value) {
-            if (mType == kTypeDouble) {
-                u.doubleValue += value;
-            } else {
-                mType = kTypeDouble;
-                u.doubleValue = value;
-            }
-        }
-        template <>
-        void add(const std::pair<int64_t, int64_t>& value) {
-            if (mType == kTypeRate) {
-                u.rate.first += value.first;
-                u.rate.second += value.second;
-            } else {
-                mType = kTypeRate;
-                u.rate = value;
+                mElem = value;
             }
         }
 
-        status_t writeToParcel(Parcel *data) const;
+        status_t writeToParcel(Parcel *parcel) const {
+            return std::visit([this, parcel](auto &value) {
+                    return BaseItem::writeToParcel(mName.c_str(), value, parcel);}, mElem);
+        }
+
+        void toStringBuffer(char *buffer, size_t length) const {
+            return std::visit([this, buffer, length](auto &value) {
+                BaseItem::toStringBuffer(mName.c_str(), value, buffer, length);}, mElem);
+        }
+
+        size_t getByteStringSize() const {
+            return std::visit([this](auto &value) {
+                return BaseItem::sizeOfByteString(mName.c_str(), value);}, mElem);
+        }
+
+        status_t writeToByteString(char **bufferpptr, char *bufferptrmax) const {
+            return std::visit([this, bufferpptr, bufferptrmax](auto &value) {
+                return BaseItem::writeToByteString(mName.c_str(), value, bufferpptr, bufferptrmax);
+            }, mElem);
+        }
+
         status_t readFromParcel(const Parcel& data);
-        void toString(char *buffer, size_t length) const;
-        size_t getByteStringSize() const;
-        status_t writeToByteString(char **bufferpptr, char *bufferptrmax) const;
+
         status_t readFromByteString(const char **bufferpptr, const char *bufferptrmax);
 
-    // TODO: make private (and consider converting to std::variant)
-    // private:
-        char *mName = nullptr;
-        Type mType = kTypeNone;
-        union u__ {
-            u__() { zero(); }
-            u__(u__ &&other) {
-                *this = std::move(other);
-            }
-            u__& operator=(u__ &&other) {
-                memcpy(this, &other, sizeof(*this));
-                other.zero();
-                return *this;
-            }
-            void zero() { memset(this, 0, sizeof(*this)); }
-
-            int32_t int32Value;
-            int64_t int64Value;
-            double doubleValue;
-            char *CStringValue;
-            std::pair<int64_t, int64_t> rate;
-        } u;
+    private:
+        std::string mName;
+        Elem mElem;
     };
 
+    // Iteration of props within item
     class iterator {
     public:
-       iterator(size_t pos, const Item &_item)
-           : i(std::min(pos, _item.count()))
-           , item(_item) { }
-       iterator &operator++() {
-           i = std::min(i + 1, item.count());
-           return *this;
-       }
-       bool operator!=(iterator &other) const {
-           return i != other.i;
-       }
-       Prop &operator*() const {
-           return item.mProps[i];
-       }
+        iterator(const std::map<std::string, Prop>::const_iterator &_it) : it(_it) { }
+        iterator &operator++() {
+            ++it;
+            return *this;
+        }
+        bool operator!=(iterator &other) const {
+            return it != other.it;
+        }
+        const Prop &operator*() const {
+            return it->second;
+        }
 
     private:
-      size_t i;
-      const Item &item;
+        std::map<std::string, Prop>::const_iterator it;
     };
 
     iterator begin() const {
-        return iterator(0, *this);
+        return iterator(mProps.cbegin());
     }
+
     iterator end() const {
-        return iterator(SIZE_MAX, *this);
+        return iterator(mProps.cend());
     }
 
 private:
 
-    // TODO: make prop management class
-    size_t findPropIndex(const char *name) const;
-    Prop *findProp(const char *name) const;
-    Prop *allocateProp();
+    const Prop *findProp(const char *key) const {
+        auto it = mProps.find(key);
+        return it != mProps.end() ? &it->second : nullptr;
+    }
 
-        enum {
-            kGrowProps = 10
-        };
-        bool growProps(int increment = kGrowProps);
-        Prop *allocateProp(const char *name);
-        bool removeProp(const char *name);
-    Prop *allocateProp(const std::string& name) { return allocateProp(name.c_str()); }
-
-        size_t mPropCount = 0;
-        size_t mPropSize = 0;
-        Prop *mProps = nullptr;
+    Prop &findOrAllocateProp(const char *key) {
+        auto it = mProps.find(key);
+        if (it != mProps.end()) return it->second;
+        Prop &prop = mProps[key];
+        prop.setName(key);
+        return prop;
+    }
 
     pid_t         mPid = -1;
     uid_t         mUid = -1;
     std::string   mPkgName;
     int64_t       mPkgVersionCode = 0;
-    std::string   mKey{kKeyNone};
+    std::string   mKey;
     nsecs_t       mTimestamp = 0;
+    std::map<std::string, Prop> mProps;
 };
 
 } // namespace mediametrics
 } // namespace android
 
-#endif
+#endif // ANDROID_MEDIA_MEDIAMETRICSITEM_H
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 001dccf..c10e6e0 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -2002,7 +2002,6 @@
         (*meta)->setInt32(kKeyTimeScale, mMovieTimeScale);
     }
     if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
-        (*meta)->setInt32(kKey64BitFileOffset, mUse64BitFileOffset);
         if (mTrackEveryTimeDurationUs > 0) {
             (*meta)->setInt64(kKeyTrackTimeStatus, mTrackEveryTimeDurationUs);
         }
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index ef9548e..743a3e4 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -31,6 +31,7 @@
 #include <utils/Log.h>
 
 #include <functional>
+#include <fcntl.h>
 
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -64,9 +65,6 @@
 namespace android {
 
 static const int64_t kMinStreamableFileSizeInBytes = 5 * 1024 * 1024;
-static const int64_t kMax32BitFileSize = 0x00ffffffffLL; // 2^32-1 : max FAT32
-                                                         // filesystem file size
-                                                         // used by most SD cards
 static const uint8_t kNalUnitTypeSeqParamSet = 0x07;
 static const uint8_t kNalUnitTypePicParamSet = 0x08;
 static const int64_t kInitialDelayTimeUs     = 700000LL;
@@ -118,7 +116,7 @@
     int64_t getDurationUs() const;
     int64_t getEstimatedTrackSizeBytes() const;
     int32_t getMetaSizeIncrease(int32_t angle, int32_t trackCount) const;
-    void writeTrackHeader(bool use32BitOffset = true);
+    void writeTrackHeader();
     int64_t getMinCttsOffsetTimeUs();
     void bufferChunk(int64_t timestampUs);
     bool isAvc() const { return mIsAvc; }
@@ -136,6 +134,7 @@
     static const char *getFourCCForMime(const char *mime);
     const char *getTrackType() const;
     void resetInternal();
+    int64_t trackMetaDataSize();
 
 private:
     // A helper class to handle faster write box with table entries
@@ -295,20 +294,16 @@
     int64_t mTrackDurationUs;
     int64_t mMaxChunkDurationUs;
     int64_t mLastDecodingTimeUs;
-
     int64_t mEstimatedTrackSizeBytes;
     int64_t mMdatSizeBytes;
     int32_t mTimeScale;
 
     pthread_t mThread;
 
-
     List<MediaBuffer *> mChunkSamples;
 
-    bool                mSamplesHaveSameSize;
+    bool mSamplesHaveSameSize;
     ListTableEntries<uint32_t, 1> *mStszTableEntries;
-
-    ListTableEntries<uint32_t, 1> *mStcoTableEntries;
     ListTableEntries<off64_t, 1> *mCo64TableEntries;
     ListTableEntries<uint32_t, 3> *mStscTableEntries;
     ListTableEntries<uint32_t, 1> *mStssTableEntries;
@@ -352,6 +347,8 @@
     int64_t mStartTimestampUs;
     int64_t mStartTimeRealUs;
     int64_t mFirstSampleTimeRealUs;
+    // Captures negative start offset of a track(track starttime < 0).
+    int64_t mFirstSampleStartOffsetUs;
     int64_t mPreviousTrackTimeUs;
     int64_t mTrackEveryTimeDurationUs;
 
@@ -410,10 +407,8 @@
     void updateTrackSizeEstimate();
     void addOneStscTableEntry(size_t chunkId, size_t sampleId);
     void addOneStssTableEntry(size_t sampleId);
-
-    // Duration is time scale based
-    void addOneSttsTableEntry(size_t sampleCount, int32_t timescaledDur);
-    void addOneCttsTableEntry(size_t sampleCount, int32_t timescaledDur);
+    void addOneSttsTableEntry(size_t sampleCount, int32_t delta /* media time scale based */);
+    void addOneCttsTableEntry(size_t sampleCount, int32_t sampleOffset);
     void addOneElstTableEntry(uint32_t segmentDuration, int32_t mediaTime,
         int16_t mediaRate, int16_t mediaRateFraction);
 
@@ -421,7 +416,7 @@
     void sendTrackSummary(bool hasMultipleTracks);
 
     // Write the boxes
-    void writeStcoBox(bool use32BitOffset);
+    void writeCo64Box();
     void writeStscBox();
     void writeStszBox();
     void writeStssBox();
@@ -447,7 +442,7 @@
     void writeAudioFourCCBox();
     void writeVideoFourCCBox();
     void writeMetadataFourCCBox();
-    void writeStblBox(bool use32BitOffset);
+    void writeStblBox();
     void writeEdtsBox();
 
     Track(const Track &);
@@ -489,14 +484,17 @@
     mStarted = false;
     mWriterThreadStarted = false;
     mSendNotify = false;
+    mWriteSeekErr = false;
+    mFallocateErr = false;
 
     // Reset following variables for all the sessions and they will be
     // initialized in start(MetaData *param).
     mIsRealTimeRecording = true;
     mUse4ByteNalLength = true;
-    mUse32BitOffset = true;
     mOffset = 0;
+    mPreAllocateFileEndOffset = 0;
     mMdatOffset = 0;
+    mMdatEndOffset = 0;
     mInMemoryCache = NULL;
     mInMemoryCacheOffset = 0;
     mInMemoryCacheSize = 0;
@@ -505,10 +503,13 @@
     mStreamableFile = false;
     mTimeScale = -1;
     mHasFileLevelMeta = false;
+    mFileLevelMetaDataSize = 0;
     mPrimaryItemId = 0;
     mAssociationEntryCount = 0;
     mNumGrids = 0;
     mHasRefs = false;
+    mPreAllocFirstTime = true;
+    mPrevAllTracksTotalMetaDataSizeEstimate = 0;
 
     // Following variables only need to be set for the first recording session.
     // And they will stay the same for all the recording sessions.
@@ -530,6 +531,15 @@
         ALOGE("cannot seek mFd: %s (%d) %lld", strerror(errno), errno, (long long)mFd);
         release();
     }
+
+    if (fallocate(mFd, 0, 0, 1) == 0) {
+        ALOGD("PreAllocation enabled");
+        mPreAllocationEnabled = true;
+    } else {
+        ALOGD("PreAllocation disabled. fallocate : %s, %d", strerror(errno), errno);
+        mPreAllocationEnabled = false;
+    }
+
     for (List<Track *>::iterator it = mTracks.begin();
          it != mTracks.end(); ++it) {
         (*it)->resetInternal();
@@ -736,9 +746,8 @@
 
     // If the estimation is wrong, we will pay the price of wasting
     // some reserved space. This should not happen so often statistically.
-    static const int32_t factor = mUse32BitOffset? 1: 2;
-    static const int64_t MIN_MOOV_BOX_SIZE = 3 * 1024;  // 3 KB
-    static const int64_t MAX_MOOV_BOX_SIZE = (180 * 3000000 * 6LL / 8000);
+    static const int64_t MIN_MOOV_BOX_SIZE = 3 * 1024;                      // 3 KibiBytes
+    static const int64_t MAX_MOOV_BOX_SIZE = (180 * 3000000 * 6LL / 8000);  // 395.5 KibiBytes
     int64_t size = MIN_MOOV_BOX_SIZE;
 
     // Max file size limit is set
@@ -781,10 +790,7 @@
          " estimated moov size %" PRId64 " bytes",
          mMaxFileSizeLimitBytes, mMaxFileDurationLimitUs, bitRate, size);
 
-    int64_t estimatedSize = factor * size;
-    CHECK_GE(estimatedSize, 8);
-
-    return estimatedSize;
+    return size;
 }
 
 status_t MPEG4Writer::start(MetaData *param) {
@@ -794,36 +800,24 @@
     mStartMeta = param;
 
     /*
-     * Check mMaxFileSizeLimitBytes at the beginning
-     * since mMaxFileSizeLimitBytes may be implicitly
-     * changed later for 32-bit file offset even if
-     * user does not ask to set it explicitly.
+     * Check mMaxFileSizeLimitBytes at the beginning since mMaxFileSizeLimitBytes may be implicitly
+     * changed later as per filesizebits of filesystem even if user does not set it explicitly.
      */
     if (mMaxFileSizeLimitBytes != 0) {
         mIsFileSizeLimitExplicitlyRequested = true;
     }
 
-    int32_t use64BitOffset;
-    if (param &&
-        param->findInt32(kKey64BitFileOffset, &use64BitOffset) &&
-        use64BitOffset) {
-        mUse32BitOffset = false;
-    }
-
-    if (mUse32BitOffset) {
-        // Implicit 32 bit file size limit
-        if (mMaxFileSizeLimitBytes == 0) {
-            mMaxFileSizeLimitBytes = kMax32BitFileSize;
-        }
-
-        // If file size is set to be larger than the 32 bit file
-        // size limit, treat it as an error.
-        if (mMaxFileSizeLimitBytes > kMax32BitFileSize) {
-            ALOGW("32-bit file size limit (%" PRId64 " bytes) too big. "
-                 "It is changed to %" PRId64 " bytes",
-                mMaxFileSizeLimitBytes, kMax32BitFileSize);
-            mMaxFileSizeLimitBytes = kMax32BitFileSize;
-        }
+    int32_t fileSizeBits = fpathconf(mFd, _PC_FILESIZEBITS);
+    ALOGD("fpathconf _PC_FILESIZEBITS:%" PRId32, fileSizeBits);
+    fileSizeBits = std::min(fileSizeBits, 52 /* cap it below 4 peta bytes */);
+    int64_t maxFileSizeBytes = ((int64_t)1 << fileSizeBits) - 1;
+    if (mMaxFileSizeLimitBytes > maxFileSizeBytes) {
+        mMaxFileSizeLimitBytes = maxFileSizeBytes;
+        ALOGD("File size limit (%" PRId64 " bytes) too big. It is changed to %" PRId64 " bytes",
+              mMaxFileSizeLimitBytes, maxFileSizeBytes);
+    } else if (mMaxFileSizeLimitBytes == 0) {
+        mMaxFileSizeLimitBytes = maxFileSizeBytes;
+        ALOGD("File size limit set to %" PRId64 " bytes implicitly", maxFileSizeBytes);
     }
 
     int32_t use2ByteNalLength;
@@ -850,7 +844,8 @@
 
     if (!param ||
         !param->findInt32(kKeyTimeScale, &mTimeScale)) {
-        mTimeScale = 1000;
+        // Increased by a factor of 10 to improve precision of segment duration in edit list entry.
+        mTimeScale = 10000;
     }
     CHECK_GT(mTimeScale, 0);
     ALOGV("movie time scale: %d", mTimeScale);
@@ -915,7 +910,8 @@
     if (mInMemoryCacheSize == 0) {
         int32_t bitRate = -1;
         if (mHasFileLevelMeta) {
-            mInMemoryCacheSize += estimateFileLevelMetaSize(param);
+            mFileLevelMetaDataSize = estimateFileLevelMetaSize(param);
+            mInMemoryCacheSize += mFileLevelMetaDataSize;
         }
         if (mHasMoovBox) {
             if (param) {
@@ -926,7 +922,7 @@
     }
     if (mStreamableFile) {
         // Reserve a 'free' box only for streamable file
-        lseek64(mFd, mFreeBoxOffset, SEEK_SET);
+        seekOrPostError(mFd, mFreeBoxOffset, SEEK_SET);
         writeInt32(mInMemoryCacheSize);
         write("free", 4);
         mMdatOffset = mFreeBoxOffset + mInMemoryCacheSize;
@@ -935,18 +931,16 @@
     }
 
     mOffset = mMdatOffset;
-    lseek64(mFd, mMdatOffset, SEEK_SET);
-    if (mUse32BitOffset) {
-        write("????mdat", 8);
-    } else {
-        write("\x00\x00\x00\x01mdat????????", 16);
-    }
+    seekOrPostError(mFd, mMdatOffset, SEEK_SET);
+    write("\x00\x00\x00\x01mdat????????", 16);
 
     status_t err = startWriterThread();
     if (err != OK) {
         return err;
     }
 
+    setupAndStartLooper();
+
     err = startTracks(param);
     if (err != OK) {
         return err;
@@ -956,32 +950,30 @@
     return OK;
 }
 
-bool MPEG4Writer::use32BitFileOffset() const {
-    return mUse32BitOffset;
-}
-
 status_t MPEG4Writer::pause() {
     ALOGW("MPEG4Writer: pause is not supported");
     return ERROR_UNSUPPORTED;
 }
 
-void MPEG4Writer::stopWriterThread() {
-    ALOGD("Stopping writer thread");
+status_t MPEG4Writer::stopWriterThread() {
+    ALOGV("Stopping writer thread");
     if (!mWriterThreadStarted) {
-        return;
+        return OK;
     }
-
     {
         Mutex::Autolock autolock(mLock);
-
         mDone = true;
         mChunkReadyCondition.signal();
     }
 
     void *dummy;
-    pthread_join(mThread, &dummy);
+    status_t err = pthread_join(mThread, &dummy);
+    WARN_UNLESS(err == 0, "stopWriterThread pthread_join err: %d", err);
+
+    err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
     mWriterThreadStarted = false;
-    ALOGD("Writer thread stopped");
+    WARN_UNLESS(err == 0, "stopWriterThread pthread_join retVal: %d, writer thread stopped", err);
+    return err;
 }
 
 /*
@@ -1037,12 +1029,21 @@
 }
 
 void MPEG4Writer::release() {
-    close(mFd);
+    ALOGD("release()");
+    if (mPreAllocationEnabled) {
+        truncatePreAllocation();
+    }
+    int retVal = fsync(mFd);
+    WARN_UNLESS(retVal == 0, "fsync retVal:%d", retVal);
+    retVal = close(mFd);
+    WARN_UNLESS(retVal == 0, "close mFd retVal :%d", retVal);
     mFd = -1;
     if (mNextFd != -1) {
-        close(mNextFd);
+        retVal = close(mNextFd);
         mNextFd = -1;
+        WARN_UNLESS(retVal == 0, "close mNextFd retVal :%d", retVal);
     }
+    stopAndReleaseLooper();
     mInitCheck = NO_INIT;
     mStarted = false;
     free(mInMemoryCache);
@@ -1073,16 +1074,19 @@
 }
 
 status_t MPEG4Writer::reset(bool stopSource) {
+    ALOGD("reset()");
+    std::lock_guard<std::mutex> l(mResetMutex);
     if (mInitCheck != OK) {
         return OK;
     } else {
         if (!mWriterThreadStarted ||
             !mStarted) {
+            status_t err = OK;
             if (mWriterThreadStarted) {
-                stopWriterThread();
+                err = stopWriterThread();
             }
             release();
-            return OK;
+            return err;
         }
     }
 
@@ -1092,9 +1096,10 @@
     int32_t nonImageTrackCount = 0;
     for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it) {
-        status_t status = (*it)->stop(stopSource);
-        if (err == OK && status != OK) {
-            err = status;
+        status_t trackErr = (*it)->stop(stopSource);
+        if (err == OK && trackErr != OK) {
+            ALOGW("%s track stopped with an error", (*it)->getTrackType());
+            err = trackErr;
         }
 
         // skip image tracks
@@ -1110,12 +1115,18 @@
         }
     }
 
+
     if (nonImageTrackCount > 1) {
         ALOGD("Duration from tracks range is [%" PRId64 ", %" PRId64 "] us",
             minDurationUs, maxDurationUs);
     }
 
-    stopWriterThread();
+    status_t writerErr = stopWriterThread();
+
+    // TODO: which error to propagage, writerErr or trackErr?
+    if (err == OK && writerErr != OK) {
+        err = writerErr;
+    }
 
     // Do not write out movie header on error.
     if (err != OK) {
@@ -1124,17 +1135,12 @@
     }
 
     // Fix up the size of the 'mdat' chunk.
-    if (mUse32BitOffset) {
-        lseek64(mFd, mMdatOffset, SEEK_SET);
-        uint32_t size = htonl(static_cast<uint32_t>(mOffset - mMdatOffset));
-        ::write(mFd, &size, 4);
-    } else {
-        lseek64(mFd, mMdatOffset + 8, SEEK_SET);
-        uint64_t size = mOffset - mMdatOffset;
-        size = hton64(size);
-        ::write(mFd, &size, 8);
-    }
-    lseek64(mFd, mOffset, SEEK_SET);
+    seekOrPostError(mFd, mMdatOffset + 8, SEEK_SET);
+    uint64_t size = mOffset - mMdatOffset;
+    size = hton64(size);
+    writeOrPostError(mFd, &size, 8);
+    seekOrPostError(mFd, mOffset, SEEK_SET);
+    mMdatEndOffset = mOffset;
 
     // Construct file-level meta and moov box now
     mInMemoryCacheOffset = 0;
@@ -1198,12 +1204,12 @@
     CHECK_LE(mInMemoryCacheOffset + 8, mInMemoryCacheSize);
 
     // Cached box
-    lseek64(mFd, mFreeBoxOffset, SEEK_SET);
+    seekOrPostError(mFd, mFreeBoxOffset, SEEK_SET);
     mOffset = mFreeBoxOffset;
     write(mInMemoryCache, 1, mInMemoryCacheOffset);
 
     // Free box
-    lseek64(mFd, mOffset, SEEK_SET);
+    seekOrPostError(mFd, mOffset, SEEK_SET);
     mFreeBoxOffset = mOffset;
     writeInt32(mInMemoryCacheSize - mInMemoryCacheOffset);
     write("free", 4);
@@ -1272,20 +1278,19 @@
                 std::min(minCttsOffsetTimeUs, (*it)->getMinCttsOffsetTimeUs());
         }
     }
-    ALOGI("Ajust the moov start time from %lld us -> %lld us",
-            (long long)mStartTimestampUs,
-            (long long)(mStartTimestampUs + minCttsOffsetTimeUs - kMaxCttsOffsetTimeUs));
-    // Adjust the global start time.
+    ALOGI("Adjust the moov start time from %lld us -> %lld us", (long long)mStartTimestampUs,
+          (long long)(mStartTimestampUs + minCttsOffsetTimeUs - kMaxCttsOffsetTimeUs));
+    // Adjust movie start time.
     mStartTimestampUs += minCttsOffsetTimeUs - kMaxCttsOffsetTimeUs;
 
-    // Add mStartTimeOffsetBFramesUs(-ve or zero) to the duration of first entry in STTS.
+    // Add mStartTimeOffsetBFramesUs(-ve or zero) to the start offset of tracks.
     mStartTimeOffsetBFramesUs = minCttsOffsetTimeUs - kMaxCttsOffsetTimeUs;
     ALOGV("mStartTimeOffsetBFramesUs :%" PRId32, mStartTimeOffsetBFramesUs);
 
     for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it) {
         if (!(*it)->isHeic()) {
-            (*it)->writeTrackHeader(mUse32BitOffset);
+            (*it)->writeTrackHeader();
         }
     }
     endBox();  // moov
@@ -1376,17 +1381,15 @@
     } else {
         if (tiffHdrOffset > 0) {
             tiffHdrOffset = htonl(tiffHdrOffset);
-            ::write(mFd, &tiffHdrOffset, 4); // exif_tiff_header_offset field
+            writeOrPostError(mFd, &tiffHdrOffset, 4);  // exif_tiff_header_offset field
             mOffset += 4;
         }
 
-        ::write(mFd,
-              (const uint8_t *)buffer->data() + buffer->range_offset(),
-              buffer->range_length());
+        writeOrPostError(mFd, (const uint8_t*)buffer->data() + buffer->range_offset(),
+                         buffer->range_length());
 
         mOffset += buffer->range_length();
     }
-
     *bytesWritten = mOffset - old_offset;
     return old_offset;
 }
@@ -1431,30 +1434,27 @@
 
 void MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
     size_t length = buffer->range_length();
-
     if (mUse4ByteNalLength) {
         uint8_t x = length >> 24;
-        ::write(mFd, &x, 1);
+        writeOrPostError(mFd, &x, 1);
         x = (length >> 16) & 0xff;
-        ::write(mFd, &x, 1);
+        writeOrPostError(mFd, &x, 1);
         x = (length >> 8) & 0xff;
-        ::write(mFd, &x, 1);
+        writeOrPostError(mFd, &x, 1);
         x = length & 0xff;
-        ::write(mFd, &x, 1);
+        writeOrPostError(mFd, &x, 1);
 
-        ::write(mFd,
-              (const uint8_t *)buffer->data() + buffer->range_offset(),
-              length);
+        writeOrPostError(mFd, (const uint8_t*)buffer->data() + buffer->range_offset(), length);
 
         mOffset += length + 4;
     } else {
         CHECK_LT(length, 65536u);
 
         uint8_t x = length >> 8;
-        ::write(mFd, &x, 1);
+        writeOrPostError(mFd, &x, 1);
         x = length & 0xff;
-        ::write(mFd, &x, 1);
-        ::write(mFd, (const uint8_t *)buffer->data() + buffer->range_offset(), length);
+        writeOrPostError(mFd, &x, 1);
+        writeOrPostError(mFd, (const uint8_t*)buffer->data() + buffer->range_offset(), length);
         mOffset += length + 2;
     }
 }
@@ -1476,9 +1476,9 @@
                  it != mBoxes.end(); ++it) {
                 (*it) += mOffset;
             }
-            lseek64(mFd, mOffset, SEEK_SET);
-            ::write(mFd, mInMemoryCache, mInMemoryCacheOffset);
-            ::write(mFd, ptr, bytes);
+            seekOrPostError(mFd, mOffset, SEEK_SET);
+            writeOrPostError(mFd, mInMemoryCache, mInMemoryCacheOffset);
+            writeOrPostError(mFd, ptr, bytes);
             mOffset += (bytes + mInMemoryCacheOffset);
 
             // All subsequent boxes will be written to the end of the file.
@@ -1488,13 +1488,54 @@
             mInMemoryCacheOffset += bytes;
         }
     } else {
-        ::write(mFd, ptr, size * nmemb);
+        writeOrPostError(mFd, ptr, bytes);
         mOffset += bytes;
     }
     return bytes;
 }
 
+void MPEG4Writer::writeOrPostError(int fd, const void* buf, size_t count) {
+    if (mWriteSeekErr == true)
+        return;
+    ssize_t bytesWritten = ::write(fd, buf, count);
+    /* Write as much as possible during stop() execution when there was an error
+     * (mWriteSeekErr == true) in the previous call to write() or lseek64().
+     */
+    if (bytesWritten == count)
+        return;
+    mWriteSeekErr = true;
+    // Note that errno is not changed even when bytesWritten < count.
+    ALOGE("writeOrPostError bytesWritten:%zd, count:%zu, error:%s(%d)", bytesWritten, count,
+          std::strerror(errno), errno);
+
+    // Can't guarantee that file is usable or write would succeed anymore, hence signal to stop.
+    sp<AMessage> msg = new AMessage(kWhatHandleIOError, mReflector);
+    status_t err = msg->post();
+    ALOGE("writeOrPostError post:%d", err);
+}
+
+void MPEG4Writer::seekOrPostError(int fd, off64_t offset, int whence) {
+    if (mWriteSeekErr == true)
+        return;
+    off64_t resOffset = lseek64(fd, offset, whence);
+    /* Allow to seek during stop() execution even when there was an error
+     * (mWriteSeekErr == true) in the previous call to write() or lseek64().
+     */
+    if (resOffset == offset)
+        return;
+    mWriteSeekErr = true;
+    ALOGE("seekOrPostError resOffset:%" PRIu64 ", offset:%" PRIu64 ", error:%s(%d)", resOffset,
+          offset, std::strerror(errno), errno);
+
+    // Can't guarantee that file is usable or seek would succeed anymore, hence signal to stop.
+    sp<AMessage> msg = new AMessage(kWhatHandleIOError, mReflector);
+    status_t err = msg->post();
+    ALOGE("seekOrPostError post:%d", err);
+}
+
 void MPEG4Writer::beginBox(uint32_t id) {
+    ALOGV("beginBox:%" PRIu32, id);
+
     mBoxes.push_back(mWriteBoxToMemory?
             mInMemoryCacheOffset: mOffset);
 
@@ -1503,6 +1544,7 @@
 }
 
 void MPEG4Writer::beginBox(const char *fourcc) {
+    ALOGV("beginBox:%s", fourcc);
     CHECK_EQ(strlen(fourcc), 4u);
 
     mBoxes.push_back(mWriteBoxToMemory?
@@ -1522,10 +1564,11 @@
         int32_t x = htonl(mInMemoryCacheOffset - offset);
         memcpy(mInMemoryCache + offset, &x, 4);
     } else {
-        lseek64(mFd, offset, SEEK_SET);
+        seekOrPostError(mFd, offset, SEEK_SET);
         writeInt32(mOffset - offset);
+        ALOGV("box size:%" PRIu64, mOffset - offset);
         mOffset -= 4;
-        lseek64(mFd, mOffset, SEEK_SET);
+        seekOrPostError(mFd, mOffset, SEEK_SET);
     }
 }
 
@@ -1679,6 +1722,79 @@
     return mStreamableFile;
 }
 
+bool MPEG4Writer::preAllocate(uint64_t wantSize) {
+    if (!mPreAllocationEnabled)
+        return true;
+
+    std::lock_guard<std::mutex> l(mFallocMutex);
+
+    if (mFallocateErr == true)
+        return false;
+
+    // approxMOOVHeadersSize has to be changed whenever its needed in the future.
+    uint64_t approxMOOVHeadersSize = 500;
+    // approxTrackHeadersSize has to be changed whenever its needed in the future.
+    const uint64_t approxTrackHeadersSize = 800;
+
+    uint64_t approxMOOVBoxSize = 0;
+    if (mPreAllocFirstTime) {
+        mPreAllocFirstTime = false;
+        approxMOOVBoxSize = approxMOOVHeadersSize + mFileLevelMetaDataSize + mMoovExtraSize +
+                            (approxTrackHeadersSize * numTracks());
+        ALOGV("firstTimeAllocation approxMOOVBoxSize:%" PRIu64, approxMOOVBoxSize);
+    }
+
+    uint64_t allTracksTotalMetaDataSizeEstimate = 0;
+    for (List<Track *>::iterator it = mTracks.begin(); it != mTracks.end(); ++it) {
+        allTracksTotalMetaDataSizeEstimate += ((*it)->trackMetaDataSize());
+    }
+    ALOGV(" allTracksTotalMetaDataSizeEstimate:%" PRIu64, allTracksTotalMetaDataSizeEstimate);
+
+    /* MOOVBoxSize will increase whenever a sample gets written to the file.  Enough to allocate
+     * the delta increase for each sample after the very first allocation.
+     */
+    uint64_t approxMetaDataSizeIncrease =
+            allTracksTotalMetaDataSizeEstimate - mPrevAllTracksTotalMetaDataSizeEstimate;
+    ALOGV("approxMetaDataSizeIncrease:%" PRIu64  " wantSize:%" PRIu64, approxMetaDataSizeIncrease,
+          wantSize);
+    mPrevAllTracksTotalMetaDataSizeEstimate = allTracksTotalMetaDataSizeEstimate;
+    ALOGV("mPreAllocateFileEndOffset:%" PRIu64 " mOffset:%" PRIu64, mPreAllocateFileEndOffset,
+          mOffset);
+    off64_t lastFileEndOffset = std::max(mPreAllocateFileEndOffset, mOffset);
+    uint64_t preAllocateSize = wantSize + approxMOOVBoxSize + approxMetaDataSizeIncrease;
+    ALOGV("preAllocateSize :%" PRIu64 " lastFileEndOffset:%" PRIu64, preAllocateSize,
+          lastFileEndOffset);
+
+    int res = fallocate(mFd, 0, lastFileEndOffset, preAllocateSize);
+    if (res == -1) {
+        ALOGE("fallocate err:%s, %d, fd:%d", strerror(errno), errno, mFd);
+        sp<AMessage> msg = new AMessage(kWhatHandleFallocateError, mReflector);
+        status_t err = msg->post();
+        mFallocateErr = true;
+        ALOGD("preAllocation post:%d", err);
+    } else {
+        mPreAllocateFileEndOffset = lastFileEndOffset + preAllocateSize;
+        ALOGV("mPreAllocateFileEndOffset:%" PRIu64, mPreAllocateFileEndOffset);
+    }
+    return (res == -1) ? false : true;
+}
+
+bool MPEG4Writer::truncatePreAllocation() {
+    bool status = true;
+    off64_t endOffset = std::max(mMdatEndOffset, mOffset);
+    /* if mPreAllocateFileEndOffset >= endOffset, then preallocation logic works good. (diff >= 0).
+     *  Otherwise, the logic needs to be modified.
+     */
+    ALOGD("ftruncate mPreAllocateFileEndOffset:%" PRId64 " mOffset:%" PRIu64
+          " mMdatEndOffset:%" PRIu64 " diff:%" PRId64, mPreAllocateFileEndOffset, mOffset,
+          mMdatEndOffset, mPreAllocateFileEndOffset - endOffset);
+    if(ftruncate(mFd, endOffset) == -1) {
+        ALOGE("ftruncate err:%s, %d, fd:%d", strerror(errno), errno, mFd);
+        status = false;
+    }
+    return status;
+}
+
 bool MPEG4Writer::exceedsFileSizeLimit() {
     // No limit
     if (mMaxFileSizeLimitBytes == 0) {
@@ -1764,6 +1880,9 @@
     return mStartTimestampUs;
 }
 
+/* Returns negative when reordering is needed because of BFrames or zero otherwise.
+ * CTTS values for tracks with BFrames offsets this negative value.
+ */
 int32_t MPEG4Writer::getStartTimeOffsetBFramesUs() {
     Mutex::Autolock autoLock(mLock);
     return mStartTimeOffsetBFramesUs;
@@ -1792,7 +1911,6 @@
       mEstimatedTrackSizeBytes(0),
       mSamplesHaveSameSize(true),
       mStszTableEntries(new ListTableEntries<uint32_t, 1>(1000)),
-      mStcoTableEntries(new ListTableEntries<uint32_t, 1>(1000)),
       mCo64TableEntries(new ListTableEntries<off64_t, 1>(1000)),
       mStscTableEntries(new ListTableEntries<uint32_t, 3>(1000)),
       mStssTableEntries(new ListTableEntries<uint32_t, 1>(1000)),
@@ -1807,6 +1925,8 @@
       mGotAllCodecSpecificData(false),
       mReachedEOS(false),
       mStartTimestampUs(-1),
+      mFirstSampleTimeRealUs(0),
+      mFirstSampleStartOffsetUs(0),
       mRotation(0),
       mDimgRefs("dimg"),
       mImageItemId(0),
@@ -1872,15 +1992,11 @@
     mIsMalformed = false;
     mTrackDurationUs = 0;
     mEstimatedTrackSizeBytes = 0;
-    mSamplesHaveSameSize = 0;
+    mSamplesHaveSameSize = false;
     if (mStszTableEntries != NULL) {
         delete mStszTableEntries;
         mStszTableEntries = new ListTableEntries<uint32_t, 1>(1000);
     }
-    if (mStcoTableEntries != NULL) {
-        delete mStcoTableEntries;
-        mStcoTableEntries = new ListTableEntries<uint32_t, 1>(1000);
-    }
     if (mCo64TableEntries != NULL) {
         delete mCo64TableEntries;
         mCo64TableEntries = new ListTableEntries<off64_t, 1>(1000);
@@ -1908,25 +2024,24 @@
     mReachedEOS = false;
 }
 
+int64_t MPEG4Writer::Track::trackMetaDataSize() {
+    int64_t co64BoxSizeBytes = mCo64TableEntries->count() * 8;
+    int64_t stszBoxSizeBytes = mStszTableEntries->count() * 4;
+    int64_t trackMetaDataSize = mStscTableEntries->count() * 12 +  // stsc box size
+                                mStssTableEntries->count() * 4 +   // stss box size
+                                mSttsTableEntries->count() * 8 +   // stts box size
+                                mCttsTableEntries->count() * 8 +   // ctts box size
+                                mElstTableEntries->count() * 12 +  // elst box size
+                                co64BoxSizeBytes +                 // stco box size
+                                stszBoxSizeBytes;                  // stsz box size
+    return trackMetaDataSize;
+}
+
+
 void MPEG4Writer::Track::updateTrackSizeEstimate() {
     mEstimatedTrackSizeBytes = mMdatSizeBytes;  // media data size
-
     if (!isHeic() && !mOwner->isFileStreamable()) {
-        uint32_t stcoBoxCount = (mOwner->use32BitFileOffset()
-                                ? mStcoTableEntries->count()
-                                : mCo64TableEntries->count());
-        int64_t stcoBoxSizeBytes = stcoBoxCount * 4;
-        int64_t stszBoxSizeBytes = mSamplesHaveSameSize? 4: (mStszTableEntries->count() * 4);
-
-        // Reserved free space is not large enough to hold
-        // all meta data and thus wasted.
-        mEstimatedTrackSizeBytes += mStscTableEntries->count() * 12 +  // stsc box size
-                                    mStssTableEntries->count() * 4 +   // stss box size
-                                    mSttsTableEntries->count() * 8 +   // stts box size
-                                    mCttsTableEntries->count() * 8 +   // ctts box size
-                                    mElstTableEntries->count() * 12 +   // elst box size
-                                    stcoBoxSizeBytes +           // stco box size
-                                    stszBoxSizeBytes;            // stsz box size
+        mEstimatedTrackSizeBytes += trackMetaDataSize();
     }
 }
 
@@ -1941,24 +2056,20 @@
     mStssTableEntries->add(htonl(sampleId));
 }
 
-void MPEG4Writer::Track::addOneSttsTableEntry(
-        size_t sampleCount, int32_t duration) {
-
-    if (duration == 0) {
+void MPEG4Writer::Track::addOneSttsTableEntry(size_t sampleCount, int32_t delta) {
+    if (delta == 0) {
         ALOGW("0-duration samples found: %zu", sampleCount);
     }
     mSttsTableEntries->add(htonl(sampleCount));
-    mSttsTableEntries->add(htonl(duration));
+    mSttsTableEntries->add(htonl(delta));
 }
 
-void MPEG4Writer::Track::addOneCttsTableEntry(
-        size_t sampleCount, int32_t duration) {
-
+void MPEG4Writer::Track::addOneCttsTableEntry(size_t sampleCount, int32_t sampleOffset) {
     if (!mIsVideo) {
         return;
     }
     mCttsTableEntries->add(htonl(sampleCount));
-    mCttsTableEntries->add(htonl(duration));
+    mCttsTableEntries->add(htonl(sampleOffset));
 }
 
 void MPEG4Writer::Track::addOneElstTableEntry(
@@ -1971,16 +2082,30 @@
     mElstTableEntries->add(htonl((((uint32_t)mediaRate) << 16) | (uint32_t)mediaRateFraction));
 }
 
-status_t MPEG4Writer::setNextFd(int fd) {
-    ALOGV("addNextFd");
-    Mutex::Autolock l(mLock);
-    if (mLooper == NULL) {
-        mReflector = new AHandlerReflector<MPEG4Writer>(this);
+void MPEG4Writer::setupAndStartLooper() {
+    if (mLooper == nullptr) {
         mLooper = new ALooper;
-        mLooper->registerHandler(mReflector);
+        mLooper->setName("MP4WriterLooper");
         mLooper->start();
+        mReflector = new AHandlerReflector<MPEG4Writer>(this);
+        mLooper->registerHandler(mReflector);
     }
+}
 
+void MPEG4Writer::stopAndReleaseLooper() {
+    if (mLooper != nullptr) {
+        if (mReflector != nullptr) {
+            ALOGD("unregisterHandler");
+            mLooper->unregisterHandler(mReflector->id());
+            mReflector.clear();
+        }
+        mLooper->stop();
+        mLooper.clear();
+    }
+}
+
+status_t MPEG4Writer::setNextFd(int fd) {
+    Mutex::Autolock l(mLock);
     if (mNextFd != -1) {
         // No need to set a new FD yet.
         return INVALID_OPERATION;
@@ -2021,12 +2146,7 @@
 
 void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
     CHECK(!mIsHeic);
-    if (mOwner->use32BitFileOffset()) {
-        uint32_t value = offset;
-        mStcoTableEntries->add(htonl(value));
-    } else {
-        mCo64TableEntries->add(hton64(offset));
-    }
+    mCo64TableEntries->add(hton64(offset));
 }
 
 void MPEG4Writer::Track::addItemOffsetAndSize(off64_t offset, size_t size, bool isExif) {
@@ -2195,6 +2315,22 @@
             notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_NEXT_OUTPUT_FILE_STARTED, 0);
             break;
         }
+        // ::write() or lseek64() wasn't a success, file could be malformed
+        case kWhatHandleIOError: {
+            ALOGE("kWhatHandleIOError");
+            // Stop tracks' threads and main writer thread.
+            notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_RECORDER_ERROR_UNKNOWN, ERROR_MALFORMED);
+            stop();
+            break;
+        }
+        // fallocate() failed, hence notify app about it and stop().
+        case kWhatHandleFallocateError: {
+            ALOGE("kWhatHandleFallocateError");
+            //TODO: introduce new MEDIA_RECORDER_INFO_STOPPED instead MEDIA_RECORDER_INFO_UNKNOWN?
+            notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_UNKNOWN, ERROR_IO);
+            stop();
+            break;
+        }
         default:
         TRESPASS();
     }
@@ -2236,7 +2372,6 @@
     stop();
 
     delete mStszTableEntries;
-    delete mStcoTableEntries;
     delete mCo64TableEntries;
     delete mStscTableEntries;
     delete mSttsTableEntries;
@@ -2245,7 +2380,6 @@
     delete mElstTableEntries;
 
     mStszTableEntries = NULL;
-    mStcoTableEntries = NULL;
     mCo64TableEntries = NULL;
     mStscTableEntries = NULL;
     mSttsTableEntries = NULL;
@@ -2385,7 +2519,6 @@
             if (interChunkTimeUs > it->mPrevChunkTimestampUs) {
                 it->mMaxInterChunkDurUs = interChunkTimeUs;
             }
-
             return true;
         }
     }
@@ -2544,10 +2677,11 @@
     mDone = true;
 
     void *dummy;
-    pthread_join(mThread, &dummy);
-    status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
-
-    ALOGD("%s track stopped. %s source", getTrackType(), stopSource ? "Stop" : "Not Stop");
+    status_t err = pthread_join(mThread, &dummy);
+    WARN_UNLESS(err == 0, "track::stop: pthread_join status:%d", err);
+    err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+    WARN_UNLESS(err == 0, "%s track stopped. Status :%d. %s source", getTrackType(), err,
+                stopSource ? "Stop" : "Not Stop");
     return err;
 }
 
@@ -2959,11 +3093,11 @@
     uint32_t lastSamplesPerChunk = 0;
 
     if (mIsAudio) {
-        prctl(PR_SET_NAME, (unsigned long)"AudioTrackEncoding", 0, 0, 0);
+        prctl(PR_SET_NAME, (unsigned long)"AudioTrackWriterThread", 0, 0, 0);
     } else if (mIsVideo) {
-        prctl(PR_SET_NAME, (unsigned long)"VideoTrackEncoding", 0, 0, 0);
+        prctl(PR_SET_NAME, (unsigned long)"VideoTrackWriterThread", 0, 0, 0);
     } else {
-        prctl(PR_SET_NAME, (unsigned long)"MetadataTrackEncoding", 0, 0, 0);
+        prctl(PR_SET_NAME, (unsigned long)"MetadataTrackWriterThread", 0, 0, 0);
     }
 
     if (mOwner->isRealTimeRecording()) {
@@ -2983,6 +3117,7 @@
             continue;
         }
 
+
         // If the codec specific data has not been received yet, delay pause.
         // After the codec specific data is received, discard what we received
         // when the track is to be paused.
@@ -2993,7 +3128,6 @@
         }
 
         ++count;
-
         int32_t isCodecConfig;
         if (buffer->meta_data().findInt32(kKeyIsCodecConfig, &isCodecConfig)
                 && isCodecConfig) {
@@ -3060,6 +3194,18 @@
             }
         }
 
+        /*
+         * Reserve space in the file for the current sample + to be written MOOV box. If reservation
+         * for a new sample fails, preAllocate(...) stops muxing session completely. Stop() could
+         * write MOOV box successfully as space for the same was reserved in the prior call.
+         * Release the current buffer/sample only here.
+         */
+        if (!mOwner->preAllocate(buffer->range_length())) {
+            buffer->release();
+            buffer = nullptr;
+            break;
+        }
+
         ++nActualFrames;
 
         // Make a deep copy of the MediaBuffer and Metadata and release
@@ -3071,7 +3217,6 @@
         meta_data = new MetaData(buffer->meta_data());
         buffer->release();
         buffer = NULL;
-
         if (isExif) {
             copy->meta_data().setInt32(kKeyExifTiffOffset, tiffHdrOffset);
         }
@@ -3119,10 +3264,10 @@
         if (mOwner->approachingFileSizeLimit()) {
             mOwner->notifyApproachingLimit();
         }
-
         int32_t isSync = false;
         meta_data->findInt32(kKeyIsSyncFrame, &isSync);
         CHECK(meta_data->findInt64(kKeyTime, &timestampUs));
+        timestampUs += mFirstSampleStartOffsetUs;
 
         // For video, skip the first several non-key frames until getting the first key frame.
         if (mIsVideo && !mGotStartKeyFrame && !isSync) {
@@ -3134,10 +3279,13 @@
             mGotStartKeyFrame = true;
         }
 ////////////////////////////////////////////////////////////////////////////////
-
         if (!mIsHeic) {
             if (mStszTableEntries->count() == 0) {
                 mFirstSampleTimeRealUs = systemTime() / 1000;
+                if (timestampUs < 0 && mFirstSampleStartOffsetUs == 0) {
+                    mFirstSampleStartOffsetUs = -timestampUs;
+                    timestampUs = 0;
+                }
                 mOwner->setStartTimestampUs(timestampUs);
                 mStartTimestampUs = timestampUs;
                 previousPausedDurationUs = mStartTimestampUs;
@@ -3309,17 +3457,17 @@
                 }
             }
             mStszTableEntries->add(htonl(sampleSize));
+
             if (mStszTableEntries->count() > 2) {
 
                 // Force the first sample to have its own stts entry so that
                 // we can adjust its value later to maintain the A/V sync.
-                if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
+                if (lastDurationTicks && currDurationTicks != lastDurationTicks) {
                     addOneSttsTableEntry(sampleCount, lastDurationTicks);
                     sampleCount = 1;
                 } else {
                     ++sampleCount;
                 }
-
             }
             if (mSamplesHaveSameSize) {
                 if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
@@ -3352,11 +3500,7 @@
             if (mIsHeic) {
                 addItemOffsetAndSize(offset, bytesWritten, isExif);
             } else {
-                uint32_t count = (mOwner->use32BitFileOffset()
-                            ? mStcoTableEntries->count()
-                            : mCo64TableEntries->count());
-
-                if (count == 0) {
+                if (mCo64TableEntries->count() == 0) {
                     addChunkOffset(offset);
                 }
             }
@@ -3392,9 +3536,7 @@
                 }
             }
         }
-
     }
-
     if (isTrackMalFormed()) {
         dumpTimeStamps();
         err = ERROR_MALFORMED;
@@ -3426,17 +3568,10 @@
             ++sampleCount;  // Count for the last sample
         }
 
-        if (mStszTableEntries->count() <= 2) {
-            addOneSttsTableEntry(1, lastDurationTicks);
-            if (sampleCount - 1 > 0) {
-                addOneSttsTableEntry(sampleCount - 1, lastDurationTicks);
-            }
-        } else {
-            addOneSttsTableEntry(sampleCount, lastDurationTicks);
-        }
+        addOneSttsTableEntry(sampleCount, lastDurationTicks);
 
-        // The last ctts box may not have been written yet, and this
-        // is to make sure that we write out the last ctts box.
+        // The last ctts box entry may not have been written yet, and this
+        // is to make sure that we write out the last ctts box entry.
         if (currCttsOffsetTimeTicks == lastCttsOffsetTimeTicks) {
             if (cttsSampleCount > 0) {
                 addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
@@ -3687,7 +3822,7 @@
                       "Metadata";
 }
 
-void MPEG4Writer::Track::writeTrackHeader(bool use32BitOffset) {
+void MPEG4Writer::Track::writeTrackHeader() {
     uint32_t now = getMpeg4Time();
     mOwner->beginBox("trak");
         writeTkhdBox(now);
@@ -3704,7 +3839,7 @@
                     writeNmhdBox();
                 }
                 writeDinfBox();
-                writeStblBox(use32BitOffset);
+                writeStblBox();
             mOwner->endBox();  // minf
         mOwner->endBox();  // mdia
     mOwner->endBox();  // trak
@@ -3720,7 +3855,7 @@
     return mMinCttsOffsetTimeUs;
 }
 
-void MPEG4Writer::Track::writeStblBox(bool use32BitOffset) {
+void MPEG4Writer::Track::writeStblBox() {
     mOwner->beginBox("stbl");
     mOwner->beginBox("stsd");
     mOwner->writeInt32(0);               // version=0, flags=0
@@ -3740,7 +3875,7 @@
     }
     writeStszBox();
     writeStscBox();
-    writeStcoBox(use32BitOffset);
+    writeCo64Box();
     mOwner->endBox();  // stbl
 }
 
@@ -4099,19 +4234,122 @@
     mOwner->endBox();
 }
 
-void MPEG4Writer::Track::writeEdtsBox(){
+void MPEG4Writer::Track::writeEdtsBox() {
     ALOGV("%s : getStartTimeOffsetTimeUs of track:%" PRId64 " us", getTrackType(),
         getStartTimeOffsetTimeUs());
 
-    // Prepone video playback.
-    if (mMinCttsOffsetTicks != mMaxCttsOffsetTicks) {
-        int32_t mvhdTimeScale = mOwner->getTimeScale();
-        uint32_t tkhdDuration = (getDurationUs() * mvhdTimeScale + 5E5) / 1E6;
-        int64_t mediaTime = ((kMaxCttsOffsetTimeUs - getMinCttsOffsetTimeUs())
-            * mTimeScale + 5E5) / 1E6;
-        if (tkhdDuration > 0 && mediaTime > 0) {
-            addOneElstTableEntry(tkhdDuration, mediaTime, 1, 0);
+    int32_t mvhdTimeScale = mOwner->getTimeScale();
+    ALOGV("mvhdTimeScale:%" PRId32, mvhdTimeScale);
+    /* trackStartOffsetUs of this track is the sum of longest offset needed by a track among all
+     * tracks with B frames in this movie and the start offset of this track.
+     */
+    int64_t trackStartOffsetUs = getStartTimeOffsetTimeUs();
+    ALOGV("trackStartOffsetUs:%" PRIu64, trackStartOffsetUs);
+
+    // Longest offset needed by a track among all tracks with B frames.
+    int32_t movieStartOffsetBFramesUs = mOwner->getStartTimeOffsetBFramesUs();
+    ALOGV("movieStartOffsetBFramesUs:%" PRId32, movieStartOffsetBFramesUs);
+
+    // This media/track's real duration (sum of duration of all samples in this track).
+    uint32_t tkhdDurationTicks = (mTrackDurationUs * mvhdTimeScale + 5E5) / 1E6;
+    ALOGV("mTrackDurationUs:%" PRId64 "us", mTrackDurationUs);
+
+    int64_t movieStartTimeUs = mOwner->getStartTimestampUs();
+    ALOGV("movieStartTimeUs:%" PRId64, movieStartTimeUs);
+
+    int64_t trackStartTimeUs = movieStartTimeUs + trackStartOffsetUs;
+    ALOGV("trackStartTimeUs:%" PRId64, trackStartTimeUs);
+
+    if (movieStartOffsetBFramesUs == 0) {
+        // No B frames in any tracks.
+        if (trackStartOffsetUs > 0) {
+            // Track with positive start offset.
+            uint32_t segDuration = (trackStartOffsetUs * mvhdTimeScale + 5E5) / 1E6;
+            ALOGV("segDuration:%" PRIu64 "us", trackStartOffsetUs);
+            /* The first entry is an empty edit (indicated by media_time equal to -1), and its
+             * duration (segment_duration) is equal to the difference of the presentation times of
+             * the earliest media sample among all tracks and the earliest media sample of the track.
+             */
+            ALOGV("Empty edit list entry");
+            addOneElstTableEntry(segDuration, -1, 1, 0);
+            addOneElstTableEntry(tkhdDurationTicks, 0, 1, 0);
+        } else if (mFirstSampleStartOffsetUs > 0) {
+            // Track with start time < 0 / negative start offset.
+            ALOGV("Normal edit list entry");
+            int32_t mediaTime = (mFirstSampleStartOffsetUs * mTimeScale + 5E5) / 1E6;
+            int32_t firstSampleOffsetTicks =
+                    (mFirstSampleStartOffsetUs * mvhdTimeScale + 5E5) / 1E6;
+            // samples before 0 don't count in for duration, hence subtract firstSampleOffsetTicks.
+            addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTime, 1, 0);
+        } else {
+            // Track starting at zero.
+            ALOGV("No edit list entry required for this track");
         }
+    } else if (movieStartOffsetBFramesUs < 0) {
+        // B frames present in at least one of the tracks.
+        ALOGV("writeEdtsBox - Reordered frames(B frames) present");
+        if (trackStartOffsetUs == std::abs(movieStartOffsetBFramesUs)) {
+            // Track starting at 0, no start offset.
+            // TODO : need to take care of mFirstSampleStartOffsetUs > 0 and trackStartOffsetUs > 0
+            // separately
+            if (mMinCttsOffsetTicks == mMaxCttsOffsetTicks) {
+                // Video with no B frame or non-video track.
+                if (mFirstSampleStartOffsetUs > 0) {
+                    // Track with start time < 0 / negative start offset.
+                    ALOGV("Normal edit list entry");
+                    ALOGV("mFirstSampleStartOffsetUs:%" PRId64 "us", mFirstSampleStartOffsetUs);
+                    int32_t mediaTimeTicks = (mFirstSampleStartOffsetUs * mTimeScale + 5E5) / 1E6;
+                    int32_t firstSampleOffsetTicks =
+                            (mFirstSampleStartOffsetUs * mvhdTimeScale + 5E5) / 1E6;
+                    // Samples before 0 don't count for duration, subtract firstSampleOffsetTicks.
+                    addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTimeTicks,
+                                         1, 0);
+                }
+            } else {
+                // Track with B Frames.
+                int32_t mediaTimeTicks = (trackStartOffsetUs * mTimeScale + 5E5) / 1E6;
+                ALOGV("mediaTime:%" PRId64 "us", trackStartOffsetUs);
+                ALOGV("Normal edit list entry to negate start offset by B Frames in others tracks");
+                addOneElstTableEntry(tkhdDurationTicks, mediaTimeTicks, 1, 0);
+            }
+        } else if (trackStartOffsetUs > std::abs(movieStartOffsetBFramesUs)) {
+            // Track with start offset.
+            ALOGV("Tracks starting > 0");
+            int32_t editDurationTicks = 0;
+            if (mMinCttsOffsetTicks == mMaxCttsOffsetTicks) {
+                // Video with no B frame or non-video track.
+                editDurationTicks =
+                        ((trackStartOffsetUs + movieStartOffsetBFramesUs) * mvhdTimeScale + 5E5) /
+                        1E6;
+                ALOGV("editDuration:%" PRId64 "us", (trackStartOffsetUs + movieStartOffsetBFramesUs));
+            } else {
+                // Track with B frame.
+                int32_t trackStartOffsetBFramesUs = getMinCttsOffsetTimeUs() - kMaxCttsOffsetTimeUs;
+                ALOGV("trackStartOffsetBFramesUs:%" PRId32, trackStartOffsetBFramesUs);
+                editDurationTicks =
+                        ((trackStartOffsetUs + movieStartOffsetBFramesUs +
+                          trackStartOffsetBFramesUs) * mvhdTimeScale + 5E5) / 1E6;
+                ALOGV("editDuration:%" PRId64 "us", (trackStartOffsetUs + movieStartOffsetBFramesUs + trackStartOffsetBFramesUs));
+            }
+            ALOGV("editDurationTicks:%" PRIu32, editDurationTicks);
+            if (editDurationTicks > 0) {
+                ALOGV("Empty edit list entry");
+                addOneElstTableEntry(editDurationTicks, -1, 1, 0);
+                addOneElstTableEntry(tkhdDurationTicks, 0, 1, 0);
+            } else if (editDurationTicks < 0) {
+                // Only video tracks with B Frames would hit this case.
+                ALOGV("Edit list entry to negate start offset by B frames in other tracks");
+                addOneElstTableEntry(tkhdDurationTicks, std::abs(editDurationTicks), 1, 0);
+            } else {
+                ALOGV("No edit list entry needed for this track");
+            }
+        } else {
+            // Not expecting this case as we adjust negative start timestamps to zero.
+            ALOGW("trackStartOffsetUs < std::abs(movieStartOffsetBFramesUs)");
+        }
+    } else {
+        // Neither B frames present nor absent! or any other case?.
+        ALOGW("movieStartOffsetBFramesUs > 0");
     }
 
     if (mElstTableEntries->count() == 0) {
@@ -4253,19 +4491,6 @@
 void MPEG4Writer::Track::writeSttsBox() {
     mOwner->beginBox("stts");
     mOwner->writeInt32(0);  // version=0, flags=0
-    if (mMinCttsOffsetTicks == mMaxCttsOffsetTicks) {
-        // For non-vdeio tracks or video tracks without ctts table,
-        // adjust duration of first sample for tracks to account for
-        // first sample not starting at the media start time.
-        // TODO: consider signaling this using some offset
-        // as this is not quite correct.
-        uint32_t duration;
-        CHECK(mSttsTableEntries->get(duration, 1));
-        duration = htonl(duration);  // Back to host byte order
-        int32_t startTimeOffsetScaled = (((getStartTimeOffsetTimeUs() +
-            mOwner->getStartTimeOffsetBFramesUs()) * mTimeScale) + 500000LL) / 1000000LL;
-        mSttsTableEntries->set(htonl((int32_t)duration + startTimeOffsetScaled), 1);
-    }
     mSttsTableEntries->write(mOwner);
     mOwner->endBox();  // stts
 }
@@ -4286,7 +4511,9 @@
 
     mOwner->beginBox("ctts");
     mOwner->writeInt32(0);  // version=0, flags=0
-    int64_t deltaTimeUs = kMaxCttsOffsetTimeUs - getStartTimeOffsetTimeUs();
+    // Adjust ctts entries to have only offset needed for reordering frames.
+    int64_t deltaTimeUs = mMinCttsOffsetTimeUs;
+    ALOGV("ctts deltaTimeUs:%" PRId64, deltaTimeUs);
     int64_t delta = (deltaTimeUs * mTimeScale + 500000LL) / 1000000LL;
     mCttsTableEntries->adjustEntries([delta](size_t /* ix */, uint32_t (&value)[2]) {
         // entries are <count, ctts> pairs; adjust only ctts
@@ -4327,14 +4554,10 @@
     mOwner->endBox();  // stsc
 }
 
-void MPEG4Writer::Track::writeStcoBox(bool use32BitOffset) {
-    mOwner->beginBox(use32BitOffset? "stco": "co64");
+void MPEG4Writer::Track::writeCo64Box() {
+    mOwner->beginBox("co64");
     mOwner->writeInt32(0);  // version=0, flags=0
-    if (use32BitOffset) {
-        mStcoTableEntries->write(mOwner);
-    } else {
-        mCo64TableEntries->write(mOwner);
-    }
+    mCo64TableEntries->write(mOwner);
     mOwner->endBox();  // stco or co64
 }
 
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 7e337de..3808d7e 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -48,7 +48,8 @@
 
 MediaMuxer::MediaMuxer(int fd, OutputFormat format)
     : mFormat(format),
-      mState(UNINITIALIZED) {
+      mState(UNINITIALIZED),
+      mError(OK) {
     if (isMp4Format(format)) {
         mWriter = new MPEG4Writer(fd);
     } else if (format == OUTPUT_FORMAT_WEBM) {
@@ -58,6 +59,7 @@
     }
 
     if (mWriter != NULL) {
+        mWriter->setMuxerListener(this);
         mFileMeta = new MetaData;
         if (format == OUTPUT_FORMAT_HEIF) {
             // Note that the key uses recorder file types.
@@ -156,14 +158,22 @@
 status_t MediaMuxer::stop() {
     Mutex::Autolock autoLock(mMuxerLock);
 
-    if (mState == STARTED) {
+    if (mState == STARTED || mState == ERROR) {
         mState = STOPPED;
         for (size_t i = 0; i < mTrackList.size(); i++) {
             if (mTrackList[i]->stop() != OK) {
                 return INVALID_OPERATION;
             }
         }
-        return mWriter->stop();
+        status_t err = mWriter->stop();
+        if (err != OK || mError != OK) {
+            ALOGE("stop err: %d, mError:%d", err, mError);
+        }
+        // Prioritize mError over err.
+        if (mError != OK) {
+            err = mError;
+        }
+        return err;
     } else {
         ALOGE("stop() is called in invalid state %d", mState);
         return INVALID_OPERATION;
@@ -212,4 +222,29 @@
     return currentTrack->pushBuffer(mediaBuffer);
 }
 
+void MediaMuxer::notify(int msg, int ext1, int ext2) {
+    switch (msg) {
+        case MEDIA_RECORDER_EVENT_ERROR:
+        case MEDIA_RECORDER_TRACK_EVENT_ERROR: {
+            Mutex::Autolock autoLock(mMuxerLock);
+            mState = ERROR;
+            mError = ext2;
+            ALOGW("message received msg=%d, ext1=%d, ext2=%d", msg, ext1, ext2);
+            break;
+        }
+        case MEDIA_RECORDER_EVENT_INFO: {
+            if (ext1 == MEDIA_RECORDER_INFO_UNKNOWN) {
+                Mutex::Autolock autoLock(mMuxerLock);
+                mState = ERROR;
+                mError = ext2;
+                ALOGW("message received msg=%d, ext1=%d, ext2=%d", msg, ext1, ext2);
+            }
+            break;
+        }
+        default:
+            // Ignore INFO and other notifications for now.
+            break;
+    }
+}
+
 }  // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 6f19023..0564835 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -25,6 +25,7 @@
 #include <utils/threads.h>
 #include <media/stagefright/foundation/AHandlerReflector.h>
 #include <media/stagefright/foundation/ALooper.h>
+#include <mutex>
 
 namespace android {
 
@@ -58,6 +59,10 @@
     void writeFourcc(const char *fourcc);
     void write(const void *data, size_t size);
     inline size_t write(const void *ptr, size_t size, size_t nmemb);
+    // Write to file system by calling ::write() or post error message to looper on failure.
+    void writeOrPostError(int fd, const void *buf, size_t count);
+    // Seek in the file by calling ::lseek64() or post error message to looper on failure.
+    void seekOrPostError(int fd, off64_t offset, int whence);
     void endBox();
     uint32_t interleaveDuration() const { return mInterleaveDurationUs; }
     status_t setInterleaveDuration(uint32_t duration);
@@ -80,6 +85,8 @@
 
     enum {
         kWhatSwitch                          = 'swch',
+        kWhatHandleIOError                   = 'ioer',
+        kWhatHandleFallocateError            = 'faer'
     };
 
     int  mFd;
@@ -88,14 +95,15 @@
     status_t mInitCheck;
     bool mIsRealTimeRecording;
     bool mUse4ByteNalLength;
-    bool mUse32BitOffset;
     bool mIsFileSizeLimitExplicitlyRequested;
     bool mPaused;
     bool mStarted;  // Writer thread + track threads started successfully
     bool mWriterThreadStarted;  // Only writer thread started successfully
     bool mSendNotify;
     off64_t mOffset;
-    off_t mMdatOffset;
+    off64_t mPreAllocateFileEndOffset;  //End of file offset during preallocation.
+    off64_t mMdatOffset;
+    off64_t mMdatEndOffset;  // End offset of mdat atom.
     uint8_t *mInMemoryCache;
     off64_t mInMemoryCacheOffset;
     off64_t mInMemoryCacheSize;
@@ -106,17 +114,24 @@
     uint32_t mInterleaveDurationUs;
     int32_t mTimeScale;
     int64_t mStartTimestampUs;
-    int32_t mStartTimeOffsetBFramesUs; // Start time offset when B Frames are present
+    int32_t mStartTimeOffsetBFramesUs;  // Longest offset needed for reordering tracks with B Frames
     int mLatitudex10000;
     int mLongitudex10000;
     bool mAreGeoTagsAvailable;
     int32_t mStartTimeOffsetMs;
     bool mSwitchPending;
+    bool mWriteSeekErr;
+    bool mFallocateErr;
+    bool mPreAllocationEnabled;
 
     sp<ALooper> mLooper;
     sp<AHandlerReflector<MPEG4Writer> > mReflector;
 
     Mutex mLock;
+    std::mutex mResetMutex;
+    std::mutex mFallocMutex;
+    bool mPreAllocFirstTime; // Pre-allocate space for file and track headers only once per file.
+    uint64_t mPrevAllTracksTotalMetaDataSizeEstimate;
 
     List<Track *> mTracks;
 
@@ -200,6 +215,7 @@
     } ItemProperty;
 
     bool mHasFileLevelMeta;
+    uint64_t mFileLevelMetaDataSize;
     bool mHasMoovBox;
     uint32_t mPrimaryItemId;
     uint32_t mAssociationEntryCount;
@@ -210,9 +226,11 @@
 
     // Writer thread handling
     status_t startWriterThread();
-    void stopWriterThread();
+    status_t stopWriterThread();
     static void *ThreadWrapper(void *me);
     void threadFunc();
+    void setupAndStartLooper();
+    void stopAndReleaseLooper();
 
     // Buffer a single chunk to be written out later.
     void bufferChunk(const Chunk& chunk);
@@ -263,7 +281,6 @@
     void addRefs_l(uint16_t itemId, const ItemRefs &);
 
     bool exceedsFileSizeLimit();
-    bool use32BitFileOffset() const;
     bool exceedsFileDurationLimit();
     bool approachingFileSizeLimit();
     bool isFileStreamable() const;
@@ -284,6 +301,16 @@
     void writeIlst();
     void writeMoovLevelMetaBox();
 
+    /*
+     * Allocate space needed for MOOV atom in advance and maintain just enough before write
+     * of any data.  Stop writing and save MOOV atom if there was any error.
+     */
+    bool preAllocate(uint64_t wantSize);
+    /*
+     * Truncate file as per the size used for meta data and actual data in a session.
+     */
+    bool truncatePreAllocation();
+
     // HEIF writing
     void writeIlocBox();
     void writeInfeBox(uint16_t itemId, const char *type, uint32_t flags);
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index 69d6cde..98df9b5 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -117,21 +117,24 @@
     status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
                              int64_t timeUs, uint32_t flags) ;
 
+    void notify(int msg, int ext1, int ext2);
+
 private:
     const OutputFormat mFormat;
     sp<MediaWriter> mWriter;
     Vector< sp<MediaAdapter> > mTrackList;  // Each track has its MediaAdapter.
     sp<MetaData> mFileMeta;  // Metadata for the whole file.
-
     Mutex mMuxerLock;
 
     enum State {
         UNINITIALIZED,
         INITIALIZED,
         STARTED,
-        STOPPED
+        STOPPED,
+        ERROR
     };
     State mState;
+    status_t mError;
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaMuxer);
 };
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index cacb901..08e54b3 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -21,6 +21,7 @@
 #include <utils/RefBase.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/IMediaRecorderClient.h>
+#include <media/stagefright/MediaMuxer.h>
 
 namespace android {
 
@@ -36,7 +37,7 @@
     virtual status_t stop() = 0;
     virtual status_t pause() = 0;
     virtual status_t setCaptureRate(float /* captureFps */) {
-        ALOGW("setCaptureRate unsupported");
+        ALOG(LOG_WARN, "MediaWriter", "setCaptureRate unsupported");
         return ERROR_UNSUPPORTED;
     }
 
@@ -45,6 +46,7 @@
     virtual void setListener(const sp<IMediaRecorderClient>& listener) {
         mListener = listener;
     }
+    virtual void setMuxerListener(const wp<MediaMuxer>& muxer) { mMuxer = muxer; }
 
     virtual status_t dump(int /*fd*/, const Vector<String16>& /*args*/) {
         return OK;
@@ -59,11 +61,17 @@
     int64_t mMaxFileSizeLimitBytes;
     int64_t mMaxFileDurationLimitUs;
     sp<IMediaRecorderClient> mListener;
+    wp<MediaMuxer> mMuxer;
 
     void notify(int msg, int ext1, int ext2) {
-        if (mListener != NULL) {
+        ALOG(LOG_VERBOSE, "MediaWriter", "notify msg:%d, ext1:%d, ext2:%d", msg, ext1, ext2);
+        if (mListener != nullptr) {
             mListener->notify(msg, ext1, ext2);
         }
+        sp<MediaMuxer> muxer = mMuxer.promote();
+        if (muxer != nullptr) {
+            muxer->notify(msg, ext1, ext2);
+        }
     }
 private:
     MediaWriter(const MediaWriter &);
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index e17093a..b9cd18a 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -113,8 +113,6 @@
     kKeyVideoProfile      = 'vprf',  // int32_t
     kKeyVideoLevel        = 'vlev',  // int32_t
 
-    // Set this key to enable authoring files in 64-bit offset
-    kKey64BitFileOffset   = 'fobt',  // int32_t (bool)
     kKey2ByteNalLength    = '2NAL',  // int32_t (bool)
 
     // Identify the file output format for authoring
diff --git a/media/tests/benchmark/README.md b/media/tests/benchmark/README.md
index 6627462..05fbe6f 100644
--- a/media/tests/benchmark/README.md
+++ b/media/tests/benchmark/README.md
@@ -145,3 +145,12 @@
 ```
 adb shell /data/local/tmp/C2DecoderTest -P /data/local/tmp/MediaBenchmark/res/
 ```
+## C2 Encoder
+
+The test encodes input stream and benchmarks the codec2 encoders available in device.
+
+Setup steps are same as [extractor](#extractor).
+
+```
+adb shell /data/local/tmp/C2EncoderTest -P /data/local/tmp/MediaBenchmark/res/
+```
diff --git a/media/tests/benchmark/src/native/common/BenchmarkCommon.h b/media/tests/benchmark/src/native/common/BenchmarkCommon.h
index 8153a86..e13158a 100644
--- a/media/tests/benchmark/src/native/common/BenchmarkCommon.h
+++ b/media/tests/benchmark/src/native/common/BenchmarkCommon.h
@@ -21,6 +21,7 @@
 #include <mutex>
 #include <queue>
 #include <thread>
+#include <iostream>
 
 #include <media/NdkMediaCodec.h>
 #include <media/NdkMediaError.h>
diff --git a/media/tests/benchmark/src/native/encoder/Android.bp b/media/tests/benchmark/src/native/encoder/Android.bp
index 239f378..57865ef 100644
--- a/media/tests/benchmark/src/native/encoder/Android.bp
+++ b/media/tests/benchmark/src/native/encoder/Android.bp
@@ -31,3 +31,23 @@
 
     ldflags: ["-Wl,-Bsymbolic"]
 }
+
+cc_library_static {
+    name: "libmediabenchmark_codec2_encoder",
+    defaults: [
+        "libmediabenchmark_common-defaults",
+        "libmediabenchmark_codec2_common-defaults",
+    ],
+
+    srcs: ["C2Encoder.cpp"],
+
+    static_libs: [
+        "libmediabenchmark_codec2_common",
+        "libmediabenchmark_extractor",
+        "libmediabenchmark_decoder",
+    ],
+
+    export_include_dirs: ["."],
+
+    ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/encoder/C2Encoder.cpp b/media/tests/benchmark/src/native/encoder/C2Encoder.cpp
new file mode 100644
index 0000000..33429ef
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/C2Encoder.cpp
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2Encoder"
+
+#include "C2Encoder.h"
+
+int32_t C2Encoder::createCodec2Component(string compName, AMediaFormat *format) {
+    ALOGV("In %s", __func__);
+    mListener.reset(new CodecListener(
+            [this](std::list<std::unique_ptr<C2Work>> &workItems) { handleWorkDone(workItems); }));
+    if (!mListener) return -1;
+
+    const char *mime = nullptr;
+    AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime);
+    if (!mime) {
+        ALOGE("Error in AMediaFormat_getString");
+        return -1;
+    }
+    // Configure the plugin with Input properties
+    std::vector<C2Param *> configParam;
+    if (!strncmp(mime, "audio/", 6)) {
+        mIsAudioEncoder = true;
+        int32_t numChannels;
+        if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &mSampleRate)) {
+            ALOGE("AMEDIAFORMAT_KEY_SAMPLE_RATE not set");
+            return -1;
+        }
+        if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &numChannels)) {
+            ALOGE("AMEDIAFORMAT_KEY_CHANNEL_COUNT not set");
+            return -1;
+        }
+        C2StreamSampleRateInfo::input sampleRateInfo(0u, mSampleRate);
+        C2StreamChannelCountInfo::input channelCountInfo(0u, numChannels);
+        configParam.push_back(&sampleRateInfo);
+        configParam.push_back(&channelCountInfo);
+    } else {
+        mIsAudioEncoder = false;
+        if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_WIDTH, &mWidth)) {
+            ALOGE("AMEDIAFORMAT_KEY_WIDTH not set");
+            return -1;
+        }
+        if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_HEIGHT, &mHeight)) {
+            ALOGE("AMEDIAFORMAT_KEY_HEIGHT not set");
+            return -1;
+        }
+        C2StreamPictureSizeInfo::input inputSize(0u, mWidth, mHeight);
+        configParam.push_back(&inputSize);
+
+        if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_FRAME_RATE, &mFrameRate) ||
+            (mFrameRate <= 0)) {
+            mFrameRate = KDefaultFrameRate;
+        }
+    }
+
+    int64_t sTime = mStats->getCurTime();
+    mComponent = mClient->CreateComponentByName(compName.c_str(), mListener, &mClient);
+    if (mComponent == nullptr) {
+        ALOGE("Create component failed for %s", compName.c_str());
+        return -1;
+    }
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    int32_t status = mComponent->config(configParam, C2_DONT_BLOCK, &failures);
+    if (failures.size() != 0) {
+        ALOGE("Invalid Configuration");
+        return -1;
+    }
+
+    status |= mComponent->start();
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setInitTime(timeTaken);
+    return status;
+}
+
+// In encoder components, fetch the size of input buffer allocated
+int32_t C2Encoder::getInputMaxBufSize() {
+    int32_t bitStreamInfo[1] = {0};
+    std::vector<std::unique_ptr<C2Param>> inParams;
+    c2_status_t status = mComponent->query({}, {C2StreamMaxBufferSizeInfo::input::PARAM_TYPE},
+                                           C2_DONT_BLOCK, &inParams);
+    if (status != C2_OK && inParams.size() == 0) {
+        ALOGE("Query MaxBufferSizeInfo failed => %d", status);
+        return status;
+    } else {
+        size_t offset = sizeof(C2Param);
+        for (size_t i = 0; i < inParams.size(); ++i) {
+            C2Param *param = inParams[i].get();
+            bitStreamInfo[i] = *(int32_t *)((uint8_t *)param + offset);
+        }
+    }
+    mInputMaxBufSize = bitStreamInfo[0];
+    if (mInputMaxBufSize < 0) {
+        ALOGE("Invalid mInputMaxBufSize %d\n", mInputMaxBufSize);
+        return -1;
+    }
+    return status;
+}
+
+int32_t C2Encoder::encodeFrames(ifstream &eleStream, size_t inputBufferSize) {
+    ALOGV("In %s", __func__);
+    int32_t frameSize = 0;
+    if (!mIsAudioEncoder) {
+        frameSize = mWidth * mHeight * 3 / 2;
+    } else {
+        frameSize = DEFAULT_AUDIO_FRAME_SIZE;
+        if (getInputMaxBufSize() != 0) return -1;
+        if (frameSize > mInputMaxBufSize) {
+            frameSize = mInputMaxBufSize;
+        }
+    }
+    int32_t numFrames = (inputBufferSize + frameSize - 1) / frameSize;
+    // Temporary buffer to read data from the input file
+    char *data = (char *)malloc(frameSize);
+    if (!data) {
+        ALOGE("Insufficient memory to read from input file");
+        return -1;
+    }
+
+    typedef std::unique_lock<std::mutex> ULock;
+    uint64_t presentationTimeUs = 0;
+    size_t offset = 0;
+    c2_status_t status = C2_OK;
+
+    mStats->setStartTime();
+    while (numFrames > 0) {
+        std::unique_ptr<C2Work> work;
+        // Prepare C2Work
+        {
+            ULock l(mQueueLock);
+            if (mWorkQueue.empty()) mQueueCondition.wait_for(l, MAX_RETRY * TIME_OUT);
+            if (!mWorkQueue.empty()) {
+                mStats->addInputTime();
+                work.swap(mWorkQueue.front());
+                mWorkQueue.pop_front();
+            } else {
+                cout << "Wait for generating C2Work exceeded timeout" << endl;
+                return -1;
+            }
+        }
+
+        if (mIsAudioEncoder) {
+            presentationTimeUs = mNumInputFrame * frameSize * (1000000 / mSampleRate);
+        } else {
+            presentationTimeUs = mNumInputFrame * (1000000 / mFrameRate);
+        }
+        uint32_t flags = 0;
+        if (numFrames == 1) flags |= C2FrameData::FLAG_END_OF_STREAM;
+
+        work->input.flags = (C2FrameData::flags_t)flags;
+        work->input.ordinal.timestamp = presentationTimeUs;
+        work->input.ordinal.frameIndex = mNumInputFrame;
+        work->input.buffers.clear();
+
+        if (inputBufferSize - offset < frameSize) {
+            frameSize = inputBufferSize - offset;
+        }
+        eleStream.read(data, frameSize);
+        if (eleStream.gcount() != frameSize) {
+            ALOGE("read() from file failed. Incorrect bytes read");
+            return -1;
+        }
+        offset += frameSize;
+
+        if (frameSize) {
+            if (mIsAudioEncoder) {
+                std::shared_ptr<C2LinearBlock> block;
+                status = mLinearPool->fetchLinearBlock(
+                        frameSize, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+                if (status != C2_OK || !block) {
+                    cout << "fetchLinearBlock failed : " << status << endl;
+                    return status;
+                }
+                C2WriteView view = block->map().get();
+                if (view.error() != C2_OK) {
+                    cout << "C2LinearBlock::map() failed : " << view.error() << endl;
+                    return view.error();
+                }
+
+                memcpy(view.base(), data, frameSize);
+                work->input.buffers.emplace_back(new LinearBuffer(block));
+            } else {
+                std::shared_ptr<C2GraphicBlock> block;
+                status = mGraphicPool->fetchGraphicBlock(
+                        mWidth, mHeight, HAL_PIXEL_FORMAT_YV12,
+                        {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+                if (status != C2_OK || !block) {
+                    cout << "fetchGraphicBlock failed : " << status << endl;
+                    return status;
+                }
+                C2GraphicView view = block->map().get();
+                if (view.error() != C2_OK) {
+                    cout << "C2GraphicBlock::map() failed : " << view.error() << endl;
+                    return view.error();
+                }
+
+                uint8_t *pY = view.data()[C2PlanarLayout::PLANE_Y];
+                uint8_t *pU = view.data()[C2PlanarLayout::PLANE_U];
+                uint8_t *pV = view.data()[C2PlanarLayout::PLANE_V];
+                memcpy(pY, data, mWidth * mHeight);
+                memcpy(pU, data + mWidth * mHeight, (mWidth * mHeight >> 2));
+                memcpy(pV, data + (mWidth * mHeight * 5 >> 2), mWidth * mHeight >> 2);
+                work->input.buffers.emplace_back(new GraphicBuffer(block));
+            }
+            mStats->addFrameSize(frameSize);
+        }
+
+        work->worklets.clear();
+        work->worklets.emplace_back(new C2Worklet);
+
+        std::list<std::unique_ptr<C2Work>> items;
+        items.push_back(std::move(work));
+        // queue() invokes process() function of C2 Plugin.
+        status = mComponent->queue(&items);
+        if (status != C2_OK) {
+            ALOGE("queue failed");
+            return status;
+        }
+        ALOGV("Frame #%d size = %d queued", mNumInputFrame, frameSize);
+        numFrames--;
+        mNumInputFrame++;
+    }
+    free(data);
+    return status;
+}
+
+void C2Encoder::deInitCodec() {
+    ALOGV("In %s", __func__);
+    if (!mComponent) return;
+
+    int64_t sTime = mStats->getCurTime();
+    mComponent->stop();
+    mComponent->release();
+    mComponent = nullptr;
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setDeInitTime(timeTaken);
+}
+
+void C2Encoder::dumpStatistics(string inputReference, int64_t durationUs) {
+    string operation = "c2encode";
+    mStats->dumpStatistics(operation, inputReference, durationUs);
+}
+
+void C2Encoder::resetEncoder() {
+    mIsAudioEncoder = false;
+    mNumInputFrame = 0;
+    mEos = false;
+    if (mStats) mStats->reset();
+}
diff --git a/media/tests/benchmark/src/native/encoder/C2Encoder.h b/media/tests/benchmark/src/native/encoder/C2Encoder.h
new file mode 100644
index 0000000..a4ca097
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/C2Encoder.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __C2_ENCODER_H__
+#define __C2_ENCODER_H__
+
+#include <stdio.h>
+#include <algorithm>
+#include <fstream>
+
+#include "BenchmarkC2Common.h"
+
+#define DEFAULT_AUDIO_FRAME_SIZE 4096
+
+constexpr int32_t KDefaultFrameRate = 25;
+
+class C2Encoder : public BenchmarkC2Common {
+  public:
+    C2Encoder()
+        : mIsAudioEncoder(false),
+          mWidth(0),
+          mHeight(0),
+          mNumInputFrame(0),
+          mComponent(nullptr) {}
+
+    int32_t createCodec2Component(string codecName, AMediaFormat *format);
+
+    int32_t encodeFrames(ifstream &eleStream, size_t inputBufferSize);
+
+    int32_t getInputMaxBufSize();
+
+    void deInitCodec();
+
+    void dumpStatistics(string inputReference, int64_t durationUs);
+
+    void resetEncoder();
+
+  private:
+    bool mIsAudioEncoder;
+
+    int32_t mWidth;
+    int32_t mHeight;
+    int32_t mFrameRate;
+    int32_t mSampleRate;
+
+    int32_t mNumInputFrame;
+    int32_t mInputMaxBufSize;
+
+    std::shared_ptr<android::Codec2Client::Listener> mListener;
+    std::shared_ptr<android::Codec2Client::Component> mComponent;
+};
+
+#endif  // __C2_ENCODER_H__
diff --git a/media/tests/benchmark/tests/Android.bp b/media/tests/benchmark/tests/Android.bp
index 128d055..de6a8bf 100644
--- a/media/tests/benchmark/tests/Android.bp
+++ b/media/tests/benchmark/tests/Android.bp
@@ -92,3 +92,20 @@
         "libmediabenchmark_codec2_decoder",
     ],
 }
+
+cc_test {
+    name: "C2EncoderTest",
+    gtest: true,
+    defaults: [
+        "libmediabenchmark_codec2_common-defaults",
+    ],
+
+    srcs: ["C2EncoderTest.cpp"],
+
+    static_libs: [
+        "libmediabenchmark_extractor",
+        "libmediabenchmark_decoder",
+        "libmediabenchmark_codec2_common",
+        "libmediabenchmark_codec2_encoder",
+    ],
+}
diff --git a/media/tests/benchmark/tests/C2EncoderTest.cpp b/media/tests/benchmark/tests/C2EncoderTest.cpp
new file mode 100644
index 0000000..7eb5ff2
--- /dev/null
+++ b/media/tests/benchmark/tests/C2EncoderTest.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2EncoderTest"
+
+#include <sys/stat.h>
+#include <fstream>
+#include <iostream>
+#include <limits>
+
+#include "BenchmarkTestEnvironment.h"
+#include "C2Encoder.h"
+#include "Decoder.h"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class C2EncoderTest : public ::testing::TestWithParam<pair<string, string>> {
+  public:
+    C2EncoderTest() : mEncoder(nullptr) { setupC2EncoderTest(); }
+
+    void setupC2EncoderTest();
+
+    vector<string> mCodecList;
+    C2Encoder *mEncoder;
+};
+
+void C2EncoderTest::setupC2EncoderTest() {
+    mEncoder = new C2Encoder();
+    ASSERT_NE(mEncoder, nullptr) << "C2Encoder creation failed";
+
+    int32_t status = mEncoder->setupCodec2();
+    ASSERT_EQ(status, 0) << "Codec2 setup failed";
+
+    mCodecList = mEncoder->getSupportedComponentList(true /* isEncoder*/);
+    ASSERT_GT(mCodecList.size(), 0) << "Codec2 client didn't recognise any component";
+}
+
+TEST_P(C2EncoderTest, Codec2Encode) {
+    ALOGV("Encodes the input using codec2 framework");
+    string inputFile = gEnv->getRes() + GetParam().first;
+    FILE *inputFp = fopen(inputFile.c_str(), "rb");
+    ASSERT_NE(inputFp, nullptr) << "Unable to open input file for reading";
+
+    Decoder *decoder = new Decoder();
+    ASSERT_NE(decoder, nullptr) << "Decoder creation failed";
+
+    Extractor *extractor = decoder->getExtractor();
+    ASSERT_NE(extractor, nullptr) << "Extractor creation failed";
+
+    // Read file properties
+    struct stat buf;
+    stat(inputFile.c_str(), &buf);
+    size_t fileSize = buf.st_size;
+    int32_t fd = fileno(inputFp);
+
+    ASSERT_LE(fileSize, kMaxBufferSize)
+            << "Input file size is greater than the threshold memory dedicated to the test";
+
+    int32_t trackCount = extractor->initExtractor(fd, fileSize);
+    ASSERT_GT(trackCount, 0) << "initExtractor failed";
+
+    for (int curTrack = 0; curTrack < trackCount; curTrack++) {
+        int32_t status = extractor->setupTrackFormat(curTrack);
+        ASSERT_EQ(status, 0) << "Track Format invalid";
+
+        uint8_t *inputBuffer = (uint8_t *)malloc(fileSize);
+        ASSERT_NE(inputBuffer, nullptr) << "Insufficient memory";
+
+        vector<AMediaCodecBufferInfo> frameInfo;
+        AMediaCodecBufferInfo info;
+        uint32_t inputBufferOffset = 0;
+
+        // Get frame data
+        while (1) {
+            status = extractor->getFrameSample(info);
+            if (status || !info.size) break;
+            // copy the meta data and buffer to be passed to decoder
+            ASSERT_LE(inputBufferOffset + info.size, fileSize) << "Memory allocated not sufficient";
+
+            memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+            frameInfo.push_back(info);
+            inputBufferOffset += info.size;
+        }
+
+        string decName = "";
+        string outputFileName = "decode.out";
+        FILE *outFp = fopen(outputFileName.c_str(), "wb");
+        ASSERT_NE(outFp, nullptr) << "Unable to open output file" << outputFileName
+                                  << " for dumping decoder's output";
+
+        decoder->setupDecoder();
+        status = decoder->decode(inputBuffer, frameInfo, decName, false /*asyncMode */, outFp);
+        ASSERT_EQ(status, AMEDIA_OK) << "Decode returned error : " << status;
+
+        // Encode the given input stream for all C2 codecs supported by device
+        AMediaFormat *format = extractor->getFormat();
+        ifstream eleStream;
+        eleStream.open(outputFileName.c_str(), ifstream::binary | ifstream::ate);
+        ASSERT_EQ(eleStream.is_open(), true) << outputFileName.c_str() << " - file not found";
+        size_t eleSize = eleStream.tellg();
+
+        for (string codecName : mCodecList) {
+            if (codecName.find(GetParam().second) != string::npos) {
+                status = mEncoder->createCodec2Component(codecName, format);
+                ASSERT_EQ(status, 0) << "Create component failed for " << codecName;
+
+                // Send the inputs to C2 Encoder and wait till all buffers are returned.
+                eleStream.seekg(0, ifstream::beg);
+                status = mEncoder->encodeFrames(eleStream, eleSize);
+                ASSERT_EQ(status, 0) << "Encoder failed for " << codecName;
+
+                mEncoder->waitOnInputConsumption();
+                ASSERT_TRUE(mEncoder->mEos) << "Test Failed. Didn't receive EOS \n";
+
+                mEncoder->deInitCodec();
+                int64_t durationUs = extractor->getClipDuration();
+                cout << "codec: " << codecName << endl;
+                mEncoder->dumpStatistics(GetParam().first, durationUs);
+                mEncoder->resetEncoder();
+            }
+        }
+
+        // Destroy the decoder for the given input
+        decoder->deInitCodec();
+        decoder->resetDecoder();
+        free(inputBuffer);
+    }
+    fclose(inputFp);
+    extractor->deInitExtractor();
+    delete decoder;
+    delete mEncoder;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        AudioEncoderTest, C2EncoderTest,
+        ::testing::Values(
+                make_pair("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "aac"),
+                make_pair("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "amrnb"),
+                make_pair("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "amrwb"),
+                make_pair("bbb_44100hz_2ch_600kbps_flac_30sec.mp4", "flac"),
+                make_pair("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "opus")));
+
+INSTANTIATE_TEST_SUITE_P(
+        VideoEncoderTest, C2EncoderTest,
+        ::testing::Values(
+                make_pair("crowd_1920x1080_25fps_4000kbps_vp9.webm", "vp9"),
+                make_pair("crowd_1920x1080_25fps_4000kbps_vp8.webm", "vp8"),
+                make_pair("crowd_176x144_25fps_6000kbps_mpeg4.mp4", "mpeg4"),
+                make_pair("crowd_176x144_25fps_6000kbps_h263.3gp", "h263"),
+                make_pair("crowd_1920x1080_25fps_6700kbps_h264.ts", "avc"),
+                make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", "hevc")));
+
+int main(int argc, char **argv) {
+    gEnv = new BenchmarkTestEnvironment();
+    ::testing::AddGlobalTestEnvironment(gEnv);
+    ::testing::InitGoogleTest(&argc, argv);
+    int status = gEnv->initFromOptions(argc, argv);
+    if (status == 0) {
+        status = RUN_ALL_TESTS();
+        ALOGV("C2 Encoder Test result = %d\n", status);
+    }
+    return status;
+}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index e953c8e..d415377 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -3529,7 +3529,7 @@
 
     if (lStatus == NO_ERROR || lStatus == ALREADY_EXISTS) {
         // Check CPU and memory usage
-        sp<EffectModule> effect = handle->effect().promote();
+        sp<EffectBase> effect = handle->effect().promote();
         if (effect != nullptr) {
             status_t rStatus = effect->updatePolicyState();
             if (rStatus != NO_ERROR) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index eb851ec..5f55ddb 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -533,6 +533,7 @@
     class AsyncCallbackThread;
     class Track;
     class RecordTrack;
+    class EffectBase;
     class EffectModule;
     class EffectHandle;
     class EffectChain;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 2a83fd3..4d37c94 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -58,71 +58,115 @@
 namespace android {
 
 // ----------------------------------------------------------------------------
-//  EffectModule implementation
+//  EffectBase implementation
 // ----------------------------------------------------------------------------
 
 #undef LOG_TAG
-#define LOG_TAG "AudioFlinger::EffectModule"
+#define LOG_TAG "AudioFlinger::EffectBase"
 
-AudioFlinger::EffectModule::EffectModule(const sp<AudioFlinger::EffectCallbackInterface>& callback,
+AudioFlinger::EffectBase::EffectBase(const sp<AudioFlinger::EffectCallbackInterface>& callback,
                                         effect_descriptor_t *desc,
                                         int id,
                                         audio_session_t sessionId,
                                         bool pinned)
     : mPinned(pinned),
       mCallback(callback), mId(id), mSessionId(sessionId),
-      mDescriptor(*desc),
-      // clear mConfig to ensure consistent initial value of buffer framecount
-      // in case buffers are associated by setInBuffer() or setOutBuffer()
-      // prior to configure().
-      mConfig{{}, {}},
-      mStatus(NO_INIT), mState(IDLE),
-      mMaxDisableWaitCnt(1), // set by configure(), should be >= 1
-      mDisableWaitCnt(0),    // set by process() and updateState()
-      mSuspended(false),
-      mOffloaded(false)
-#ifdef FLOAT_EFFECT_CHAIN
-      , mSupportsFloat(false)
-#endif
+      mDescriptor(*desc)
 {
-    ALOGV("Constructor %p pinned %d", this, pinned);
-    int lStatus;
-
-    // create effect engine from effect factory
-    mStatus = callback->createEffectHal(
-            &desc->uuid, sessionId, AUDIO_PORT_HANDLE_NONE, &mEffectInterface);
-    if (mStatus != NO_ERROR) {
-        return;
-    }
-    lStatus = init();
-    if (lStatus < 0) {
-        mStatus = lStatus;
-        goto Error;
-    }
-
-    setOffloaded(callback->isOffload(), callback->io());
-    ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
-
-    return;
-Error:
-    mEffectInterface.clear();
-    ALOGV("Constructor Error %d", mStatus);
 }
 
-AudioFlinger::EffectModule::~EffectModule()
+// must be called with EffectModule::mLock held
+status_t AudioFlinger::EffectBase::setEnabled_l(bool enabled)
 {
-    ALOGV("Destructor %p", this);
-    if (mEffectInterface != 0) {
-        char uuidStr[64];
-        AudioEffect::guidToString(&mDescriptor.uuid, uuidStr, sizeof(uuidStr));
-        ALOGW("EffectModule %p destructor called with unreleased interface, effect %s",
-                this, uuidStr);
-        release_l();
-    }
 
+    ALOGV("setEnabled %p enabled %d", this, enabled);
+
+    if (enabled != isEnabled()) {
+        switch (mState) {
+        // going from disabled to enabled
+        case IDLE:
+            mState = STARTING;
+            break;
+        case STOPPED:
+            mState = RESTART;
+            break;
+        case STOPPING:
+            mState = ACTIVE;
+            break;
+
+        // going from enabled to disabled
+        case RESTART:
+            mState = STOPPED;
+            break;
+        case STARTING:
+            mState = IDLE;
+            break;
+        case ACTIVE:
+            mState = STOPPING;
+            break;
+        case DESTROYED:
+            return NO_ERROR; // simply ignore as we are being destroyed
+        }
+        for (size_t i = 1; i < mHandles.size(); i++) {
+            EffectHandle *h = mHandles[i];
+            if (h != NULL && !h->disconnected()) {
+                h->setEnabled(enabled);
+            }
+        }
+    }
+    return NO_ERROR;
 }
 
-status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle)
+status_t AudioFlinger::EffectBase::setEnabled(bool enabled, bool fromHandle)
+{
+    status_t status;
+    {
+        Mutex::Autolock _l(mLock);
+        status = setEnabled_l(enabled);
+    }
+    if (fromHandle) {
+        if (enabled) {
+            if (status != NO_ERROR) {
+                mCallback->checkSuspendOnEffectEnabled(this, false, false /*threadLocked*/);
+            } else {
+                mCallback->onEffectEnable(this);
+            }
+        } else {
+            mCallback->onEffectDisable(this);
+        }
+    }
+    return status;
+}
+
+bool AudioFlinger::EffectBase::isEnabled() const
+{
+    switch (mState) {
+    case RESTART:
+    case STARTING:
+    case ACTIVE:
+        return true;
+    case IDLE:
+    case STOPPING:
+    case STOPPED:
+    case DESTROYED:
+    default:
+        return false;
+    }
+}
+
+void AudioFlinger::EffectBase::setSuspended(bool suspended)
+{
+    Mutex::Autolock _l(mLock);
+    mSuspended = suspended;
+}
+
+bool AudioFlinger::EffectBase::suspended() const
+{
+    Mutex::Autolock _l(mLock);
+    return mSuspended;
+}
+
+status_t AudioFlinger::EffectBase::addHandle(EffectHandle *handle)
 {
     status_t status;
 
@@ -161,7 +205,7 @@
     return status;
 }
 
-status_t AudioFlinger::EffectModule::updatePolicyState()
+status_t AudioFlinger::EffectBase::updatePolicyState()
 {
     status_t status = NO_ERROR;
     bool doRegister = false;
@@ -217,13 +261,13 @@
 }
 
 
-ssize_t AudioFlinger::EffectModule::removeHandle(EffectHandle *handle)
+ssize_t AudioFlinger::EffectBase::removeHandle(EffectHandle *handle)
 {
     Mutex::Autolock _l(mLock);
     return removeHandle_l(handle);
 }
 
-ssize_t AudioFlinger::EffectModule::removeHandle_l(EffectHandle *handle)
+ssize_t AudioFlinger::EffectBase::removeHandle_l(EffectHandle *handle)
 {
     size_t size = mHandles.size();
     size_t i;
@@ -247,19 +291,15 @@
         }
     }
 
-    // Prevent calls to process() and other functions on effect interface from now on.
-    // The effect engine will be released by the destructor when the last strong reference on
-    // this object is released which can happen after next process is called.
     if (mHandles.size() == 0 && !mPinned) {
         mState = DESTROYED;
-        mEffectInterface->close();
     }
 
     return mHandles.size();
 }
 
 // must be called with EffectModule::mLock held
-AudioFlinger::EffectHandle *AudioFlinger::EffectModule::controlHandle_l()
+AudioFlinger::EffectHandle *AudioFlinger::EffectBase::controlHandle_l()
 {
     // the first valid handle in the list has control over the module
     for (size_t i = 0; i < mHandles.size(); i++) {
@@ -273,7 +313,7 @@
 }
 
 // unsafe method called when the effect parent thread has been destroyed
-ssize_t AudioFlinger::EffectModule::disconnectHandle(EffectHandle *handle, bool unpinIfLast)
+ssize_t AudioFlinger::EffectBase::disconnectHandle(EffectHandle *handle, bool unpinIfLast)
 {
     ALOGV("disconnect() %p handle %p", this, handle);
     if (mCallback->disconnectEffectHandle(handle, unpinIfLast)) {
@@ -290,6 +330,253 @@
     return numHandles;
 }
 
+bool AudioFlinger::EffectBase::purgeHandles()
+{
+    bool enabled = false;
+    Mutex::Autolock _l(mLock);
+    EffectHandle *handle = controlHandle_l();
+    if (handle != NULL) {
+        enabled = handle->enabled();
+    }
+    mHandles.clear();
+    return enabled;
+}
+
+void AudioFlinger::EffectBase::checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) {
+    mCallback->checkSuspendOnEffectEnabled(this, enabled, threadLocked);
+}
+
+static String8 effectFlagsToString(uint32_t flags) {
+    String8 s;
+
+    s.append("conn. mode: ");
+    switch (flags & EFFECT_FLAG_TYPE_MASK) {
+    case EFFECT_FLAG_TYPE_INSERT: s.append("insert"); break;
+    case EFFECT_FLAG_TYPE_AUXILIARY: s.append("auxiliary"); break;
+    case EFFECT_FLAG_TYPE_REPLACE: s.append("replace"); break;
+    case EFFECT_FLAG_TYPE_PRE_PROC: s.append("preproc"); break;
+    case EFFECT_FLAG_TYPE_POST_PROC: s.append("postproc"); break;
+    default: s.append("unknown/reserved"); break;
+    }
+    s.append(", ");
+
+    s.append("insert pref: ");
+    switch (flags & EFFECT_FLAG_INSERT_MASK) {
+    case EFFECT_FLAG_INSERT_ANY: s.append("any"); break;
+    case EFFECT_FLAG_INSERT_FIRST: s.append("first"); break;
+    case EFFECT_FLAG_INSERT_LAST: s.append("last"); break;
+    case EFFECT_FLAG_INSERT_EXCLUSIVE: s.append("exclusive"); break;
+    default: s.append("unknown/reserved"); break;
+    }
+    s.append(", ");
+
+    s.append("volume mgmt: ");
+    switch (flags & EFFECT_FLAG_VOLUME_MASK) {
+    case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break;
+    case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break;
+    case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break;
+    case EFFECT_FLAG_VOLUME_MONITOR: s.append("monitors volume"); break;
+    default: s.append("unknown/reserved"); break;
+    }
+    s.append(", ");
+
+    uint32_t devind = flags & EFFECT_FLAG_DEVICE_MASK;
+    if (devind) {
+        s.append("device indication: ");
+        switch (devind) {
+        case EFFECT_FLAG_DEVICE_IND: s.append("requires updates"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    s.append("input mode: ");
+    switch (flags & EFFECT_FLAG_INPUT_MASK) {
+    case EFFECT_FLAG_INPUT_DIRECT: s.append("direct"); break;
+    case EFFECT_FLAG_INPUT_PROVIDER: s.append("provider"); break;
+    case EFFECT_FLAG_INPUT_BOTH: s.append("direct+provider"); break;
+    default: s.append("not set"); break;
+    }
+    s.append(", ");
+
+    s.append("output mode: ");
+    switch (flags & EFFECT_FLAG_OUTPUT_MASK) {
+    case EFFECT_FLAG_OUTPUT_DIRECT: s.append("direct"); break;
+    case EFFECT_FLAG_OUTPUT_PROVIDER: s.append("provider"); break;
+    case EFFECT_FLAG_OUTPUT_BOTH: s.append("direct+provider"); break;
+    default: s.append("not set"); break;
+    }
+    s.append(", ");
+
+    uint32_t accel = flags & EFFECT_FLAG_HW_ACC_MASK;
+    if (accel) {
+        s.append("hardware acceleration: ");
+        switch (accel) {
+        case EFFECT_FLAG_HW_ACC_SIMPLE: s.append("non-tunneled"); break;
+        case EFFECT_FLAG_HW_ACC_TUNNEL: s.append("tunneled"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    uint32_t modeind = flags & EFFECT_FLAG_AUDIO_MODE_MASK;
+    if (modeind) {
+        s.append("mode indication: ");
+        switch (modeind) {
+        case EFFECT_FLAG_AUDIO_MODE_IND: s.append("required"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    uint32_t srcind = flags & EFFECT_FLAG_AUDIO_SOURCE_MASK;
+    if (srcind) {
+        s.append("source indication: ");
+        switch (srcind) {
+        case EFFECT_FLAG_AUDIO_SOURCE_IND: s.append("required"); break;
+        default: s.append("unknown/reserved"); break;
+        }
+        s.append(", ");
+    }
+
+    if (flags & EFFECT_FLAG_OFFLOAD_MASK) {
+        s.append("offloadable, ");
+    }
+
+    int len = s.length();
+    if (s.length() > 2) {
+        (void) s.lockBuffer(len);
+        s.unlockBuffer(len - 2);
+    }
+    return s;
+}
+
+void AudioFlinger::EffectBase::dump(int fd, const Vector<String16>& args __unused)
+{
+    String8 result;
+
+    result.appendFormat("\tEffect ID %d:\n", mId);
+
+    bool locked = AudioFlinger::dumpTryLock(mLock);
+    // failed to lock - AudioFlinger is probably deadlocked
+    if (!locked) {
+        result.append("\t\tCould not lock Fx mutex:\n");
+    }
+
+    result.append("\t\tSession State Registered Enabled Suspended:\n");
+    result.appendFormat("\t\t%05d   %03d   %s          %s       %s\n",
+            mSessionId, mState, mPolicyRegistered ? "y" : "n",
+            mPolicyEnabled ? "y" : "n", mSuspended ? "y" : "n");
+
+    result.append("\t\tDescriptor:\n");
+    char uuidStr[64];
+    AudioEffect::guidToString(&mDescriptor.uuid, uuidStr, sizeof(uuidStr));
+    result.appendFormat("\t\t- UUID: %s\n", uuidStr);
+    AudioEffect::guidToString(&mDescriptor.type, uuidStr, sizeof(uuidStr));
+    result.appendFormat("\t\t- TYPE: %s\n", uuidStr);
+    result.appendFormat("\t\t- apiVersion: %08X\n\t\t- flags: %08X (%s)\n",
+            mDescriptor.apiVersion,
+            mDescriptor.flags,
+            effectFlagsToString(mDescriptor.flags).string());
+    result.appendFormat("\t\t- name: %s\n",
+            mDescriptor.name);
+
+    result.appendFormat("\t\t- implementor: %s\n",
+            mDescriptor.implementor);
+
+    result.appendFormat("\t\t%zu Clients:\n", mHandles.size());
+    result.append("\t\t\t  Pid Priority Ctrl Locked client server\n");
+    char buffer[256];
+    for (size_t i = 0; i < mHandles.size(); ++i) {
+        EffectHandle *handle = mHandles[i];
+        if (handle != NULL && !handle->disconnected()) {
+            handle->dumpToBuffer(buffer, sizeof(buffer));
+            result.append(buffer);
+        }
+    }
+    if (locked) {
+        mLock.unlock();
+    }
+
+    write(fd, result.string(), result.length());
+}
+
+// ----------------------------------------------------------------------------
+//  EffectModule implementation
+// ----------------------------------------------------------------------------
+
+#undef LOG_TAG
+#define LOG_TAG "AudioFlinger::EffectModule"
+
+AudioFlinger::EffectModule::EffectModule(const sp<AudioFlinger::EffectCallbackInterface>& callback,
+                                         effect_descriptor_t *desc,
+                                         int id,
+                                         audio_session_t sessionId,
+                                         bool pinned)
+    : EffectBase(callback, desc, id, sessionId, pinned),
+      // clear mConfig to ensure consistent initial value of buffer framecount
+      // in case buffers are associated by setInBuffer() or setOutBuffer()
+      // prior to configure().
+      mConfig{{}, {}},
+      mStatus(NO_INIT),
+      mMaxDisableWaitCnt(1), // set by configure(), should be >= 1
+      mDisableWaitCnt(0),    // set by process() and updateState()
+      mOffloaded(false)
+#ifdef FLOAT_EFFECT_CHAIN
+      , mSupportsFloat(false)
+#endif
+{
+    ALOGV("Constructor %p pinned %d", this, pinned);
+    int lStatus;
+
+    // create effect engine from effect factory
+    mStatus = callback->createEffectHal(
+            &desc->uuid, sessionId, AUDIO_PORT_HANDLE_NONE, &mEffectInterface);
+    if (mStatus != NO_ERROR) {
+        return;
+    }
+    lStatus = init();
+    if (lStatus < 0) {
+        mStatus = lStatus;
+        goto Error;
+    }
+
+    setOffloaded(callback->isOffload(), callback->io());
+    ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
+
+    return;
+Error:
+    mEffectInterface.clear();
+    ALOGV("Constructor Error %d", mStatus);
+}
+
+AudioFlinger::EffectModule::~EffectModule()
+{
+    ALOGV("Destructor %p", this);
+    if (mEffectInterface != 0) {
+        char uuidStr[64];
+        AudioEffect::guidToString(&mDescriptor.uuid, uuidStr, sizeof(uuidStr));
+        ALOGW("EffectModule %p destructor called with unreleased interface, effect %s",
+                this, uuidStr);
+        release_l();
+    }
+
+}
+
+ssize_t AudioFlinger::EffectModule::removeHandle_l(EffectHandle *handle)
+{
+    ssize_t status = EffectBase::removeHandle_l(handle);
+
+    // Prevent calls to process() and other functions on effect interface from now on.
+    // The effect engine will be released by the destructor when the last strong reference on
+    // this object is released which can happen after next process is called.
+    if (status == 0 && !mPinned) {
+        mEffectInterface->close();
+    }
+
+    return status;
+}
+
 bool AudioFlinger::EffectModule::updateState() {
     Mutex::Autolock _l(mLock);
 
@@ -950,89 +1237,6 @@
     return status;
 }
 
-void AudioFlinger::EffectModule::checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) {
-    mCallback->checkSuspendOnEffectEnabled(this, enabled, threadLocked);
-}
-
-status_t AudioFlinger::EffectModule::setEnabled(bool enabled, bool fromHandle)
-{
-    status_t status;
-    {
-        Mutex::Autolock _l(mLock);
-        status = setEnabled_l(enabled);
-    }
-    if (fromHandle) {
-        if (enabled) {
-            if (status != NO_ERROR) {
-                mCallback->checkSuspendOnEffectEnabled(this, false, false /*threadLocked*/);
-            } else {
-                mCallback->onEffectEnable(this);
-            }
-        } else {
-            mCallback->onEffectDisable(this);
-        }
-    }
-    return status;
-}
-
-// must be called with EffectModule::mLock held
-status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled)
-{
-
-    ALOGV("setEnabled %p enabled %d", this, enabled);
-
-    if (enabled != isEnabled()) {
-        switch (mState) {
-        // going from disabled to enabled
-        case IDLE:
-            mState = STARTING;
-            break;
-        case STOPPED:
-            mState = RESTART;
-            break;
-        case STOPPING:
-            mState = ACTIVE;
-            break;
-
-        // going from enabled to disabled
-        case RESTART:
-            mState = STOPPED;
-            break;
-        case STARTING:
-            mState = IDLE;
-            break;
-        case ACTIVE:
-            mState = STOPPING;
-            break;
-        case DESTROYED:
-            return NO_ERROR; // simply ignore as we are being destroyed
-        }
-        for (size_t i = 1; i < mHandles.size(); i++) {
-            EffectHandle *h = mHandles[i];
-            if (h != NULL && !h->disconnected()) {
-                h->setEnabled(enabled);
-            }
-        }
-    }
-    return NO_ERROR;
-}
-
-bool AudioFlinger::EffectModule::isEnabled() const
-{
-    switch (mState) {
-    case RESTART:
-    case STARTING:
-    case ACTIVE:
-        return true;
-    case IDLE:
-    case STOPPING:
-    case STOPPED:
-    case DESTROYED:
-    default:
-        return false;
-    }
-}
-
 bool AudioFlinger::EffectModule::isProcessEnabled() const
 {
     if (mStatus != NO_ERROR) {
@@ -1274,30 +1478,6 @@
     return status;
 }
 
-void AudioFlinger::EffectModule::setSuspended(bool suspended)
-{
-    Mutex::Autolock _l(mLock);
-    mSuspended = suspended;
-}
-
-bool AudioFlinger::EffectModule::suspended() const
-{
-    Mutex::Autolock _l(mLock);
-    return mSuspended;
-}
-
-bool AudioFlinger::EffectModule::purgeHandles()
-{
-    bool enabled = false;
-    Mutex::Autolock _l(mLock);
-    EffectHandle *handle = controlHandle_l();
-    if (handle != NULL) {
-        enabled = handle->enabled();
-    }
-    mHandles.clear();
-    return enabled;
-}
-
 status_t AudioFlinger::EffectModule::setOffloaded(bool offloaded, audio_io_handle_t io)
 {
     Mutex::Autolock _l(mLock);
@@ -1337,111 +1517,6 @@
     return mOffloaded;
 }
 
-String8 effectFlagsToString(uint32_t flags) {
-    String8 s;
-
-    s.append("conn. mode: ");
-    switch (flags & EFFECT_FLAG_TYPE_MASK) {
-    case EFFECT_FLAG_TYPE_INSERT: s.append("insert"); break;
-    case EFFECT_FLAG_TYPE_AUXILIARY: s.append("auxiliary"); break;
-    case EFFECT_FLAG_TYPE_REPLACE: s.append("replace"); break;
-    case EFFECT_FLAG_TYPE_PRE_PROC: s.append("preproc"); break;
-    case EFFECT_FLAG_TYPE_POST_PROC: s.append("postproc"); break;
-    default: s.append("unknown/reserved"); break;
-    }
-    s.append(", ");
-
-    s.append("insert pref: ");
-    switch (flags & EFFECT_FLAG_INSERT_MASK) {
-    case EFFECT_FLAG_INSERT_ANY: s.append("any"); break;
-    case EFFECT_FLAG_INSERT_FIRST: s.append("first"); break;
-    case EFFECT_FLAG_INSERT_LAST: s.append("last"); break;
-    case EFFECT_FLAG_INSERT_EXCLUSIVE: s.append("exclusive"); break;
-    default: s.append("unknown/reserved"); break;
-    }
-    s.append(", ");
-
-    s.append("volume mgmt: ");
-    switch (flags & EFFECT_FLAG_VOLUME_MASK) {
-    case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break;
-    case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break;
-    case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break;
-    case EFFECT_FLAG_VOLUME_MONITOR: s.append("monitors volume"); break;
-    default: s.append("unknown/reserved"); break;
-    }
-    s.append(", ");
-
-    uint32_t devind = flags & EFFECT_FLAG_DEVICE_MASK;
-    if (devind) {
-        s.append("device indication: ");
-        switch (devind) {
-        case EFFECT_FLAG_DEVICE_IND: s.append("requires updates"); break;
-        default: s.append("unknown/reserved"); break;
-        }
-        s.append(", ");
-    }
-
-    s.append("input mode: ");
-    switch (flags & EFFECT_FLAG_INPUT_MASK) {
-    case EFFECT_FLAG_INPUT_DIRECT: s.append("direct"); break;
-    case EFFECT_FLAG_INPUT_PROVIDER: s.append("provider"); break;
-    case EFFECT_FLAG_INPUT_BOTH: s.append("direct+provider"); break;
-    default: s.append("not set"); break;
-    }
-    s.append(", ");
-
-    s.append("output mode: ");
-    switch (flags & EFFECT_FLAG_OUTPUT_MASK) {
-    case EFFECT_FLAG_OUTPUT_DIRECT: s.append("direct"); break;
-    case EFFECT_FLAG_OUTPUT_PROVIDER: s.append("provider"); break;
-    case EFFECT_FLAG_OUTPUT_BOTH: s.append("direct+provider"); break;
-    default: s.append("not set"); break;
-    }
-    s.append(", ");
-
-    uint32_t accel = flags & EFFECT_FLAG_HW_ACC_MASK;
-    if (accel) {
-        s.append("hardware acceleration: ");
-        switch (accel) {
-        case EFFECT_FLAG_HW_ACC_SIMPLE: s.append("non-tunneled"); break;
-        case EFFECT_FLAG_HW_ACC_TUNNEL: s.append("tunneled"); break;
-        default: s.append("unknown/reserved"); break;
-        }
-        s.append(", ");
-    }
-
-    uint32_t modeind = flags & EFFECT_FLAG_AUDIO_MODE_MASK;
-    if (modeind) {
-        s.append("mode indication: ");
-        switch (modeind) {
-        case EFFECT_FLAG_AUDIO_MODE_IND: s.append("required"); break;
-        default: s.append("unknown/reserved"); break;
-        }
-        s.append(", ");
-    }
-
-    uint32_t srcind = flags & EFFECT_FLAG_AUDIO_SOURCE_MASK;
-    if (srcind) {
-        s.append("source indication: ");
-        switch (srcind) {
-        case EFFECT_FLAG_AUDIO_SOURCE_IND: s.append("required"); break;
-        default: s.append("unknown/reserved"); break;
-        }
-        s.append(", ");
-    }
-
-    if (flags & EFFECT_FLAG_OFFLOAD_MASK) {
-        s.append("offloadable, ");
-    }
-
-    int len = s.length();
-    if (s.length() > 2) {
-        (void) s.lockBuffer(len);
-        s.unlockBuffer(len - 2);
-    }
-    return s;
-}
-
 static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
     std::stringstream ss;
 
@@ -1457,38 +1532,16 @@
     return ss.str();
 }
 
-void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args)
 {
+    EffectBase::dump(fd, args);
+
     String8 result;
-
-    result.appendFormat("\tEffect ID %d:\n", mId);
-
     bool locked = AudioFlinger::dumpTryLock(mLock);
-    // failed to lock - AudioFlinger is probably deadlocked
-    if (!locked) {
-        result.append("\t\tCould not lock Fx mutex:\n");
-    }
 
-    result.append("\t\tSession Status State Registered Enabled Suspended Engine:\n");
-    result.appendFormat("\t\t%05d   %03d    %03d   %s          %s       %s         %p\n",
-            mSessionId, mStatus, mState, mPolicyRegistered ? "y" : "n", mPolicyEnabled ? "y" : "n",
-            mSuspended ? "y" : "n", mEffectInterface.get());
-
-    result.append("\t\tDescriptor:\n");
-    char uuidStr[64];
-    AudioEffect::guidToString(&mDescriptor.uuid, uuidStr, sizeof(uuidStr));
-    result.appendFormat("\t\t- UUID: %s\n", uuidStr);
-    AudioEffect::guidToString(&mDescriptor.type, uuidStr, sizeof(uuidStr));
-    result.appendFormat("\t\t- TYPE: %s\n", uuidStr);
-    result.appendFormat("\t\t- apiVersion: %08X\n\t\t- flags: %08X (%s)\n",
-            mDescriptor.apiVersion,
-            mDescriptor.flags,
-            effectFlagsToString(mDescriptor.flags).string());
-    result.appendFormat("\t\t- name: %s\n",
-            mDescriptor.name);
-
-    result.appendFormat("\t\t- implementor: %s\n",
-            mDescriptor.implementor);
+    result.append("\t\tStatus Engine:\n");
+    result.appendFormat("\t\t%03d    %p\n",
+            mStatus, mEffectInterface.get());
 
     result.appendFormat("\t\t- data: %s\n", mSupportsFloat ? "float" : "int16");
 
@@ -1522,17 +1575,6 @@
             dumpInOutBuffer(false /* isInput */, mOutConversionBuffer).c_str());
 #endif
 
-    result.appendFormat("\t\t%zu Clients:\n", mHandles.size());
-    result.append("\t\t\t  Pid Priority Ctrl Locked client server\n");
-    char buffer[256];
-    for (size_t i = 0; i < mHandles.size(); ++i) {
-        EffectHandle *handle = mHandles[i];
-        if (handle != NULL && !handle->disconnected()) {
-            handle->dumpToBuffer(buffer, sizeof(buffer));
-            result.append(buffer);
-        }
-    }
-
     write(fd, result.string(), result.length());
 
     if (mEffectInterface != 0) {
@@ -1552,7 +1594,7 @@
 #undef LOG_TAG
 #define LOG_TAG "AudioFlinger::EffectHandle"
 
-AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect,
+AudioFlinger::EffectHandle::EffectHandle(const sp<EffectBase>& effect,
                                         const sp<AudioFlinger::Client>& client,
                                         const sp<IEffectClient>& effectClient,
                                         int32_t priority)
@@ -1593,7 +1635,7 @@
 {
     AutoMutex _l(mLock);
     ALOGV("enable %p", this);
-    sp<EffectModule> effect = mEffect.promote();
+    sp<EffectBase> effect = mEffect.promote();
     if (effect == 0 || mDisconnected) {
         return DEAD_OBJECT;
     }
@@ -1631,7 +1673,7 @@
 {
     ALOGV("disable %p", this);
     AutoMutex _l(mLock);
-    sp<EffectModule> effect = mEffect.promote();
+    sp<EffectBase> effect = mEffect.promote();
     if (effect == 0 || mDisconnected) {
         return DEAD_OBJECT;
     }
@@ -1672,7 +1714,7 @@
     }
     mDisconnected = true;
     {
-        sp<EffectModule> effect = mEffect.promote();
+        sp<EffectBase> effect = mEffect.promote();
         if (effect != 0) {
             if (effect->disconnectHandle(this, unpinIfLast) > 0) {
                 ALOGW("%s Effect handle %p disconnected after thread destruction",
@@ -1740,7 +1782,7 @@
     }
 
     AutoMutex _l(mLock);
-    sp<EffectModule> effect = mEffect.promote();
+    sp<EffectBase> effect = mEffect.promote();
     if (effect == 0 || mDisconnected) {
         return DEAD_OBJECT;
     }
@@ -2670,12 +2712,13 @@
 }
 
 bool AudioFlinger::EffectChain::EffectCallback::updateOrphanEffectChains(
-        const sp<AudioFlinger::EffectModule>& effect) {
+        const sp<AudioFlinger::EffectBase>& effect) {
     sp<AudioFlinger> af = mAudioFlinger.promote();
     if (af == nullptr) {
         return false;
     }
-    return af->updateOrphanEffectChains(effect);
+    // in EffectChain context, an EffectBase is always from an EffectModule so static cast is safe
+    return af->updateOrphanEffectChains(effect->asEffectModule());
 }
 
 status_t AudioFlinger::EffectChain::EffectCallback::allocateHalBuffer(
@@ -2806,7 +2849,7 @@
 }
 
 void AudioFlinger::EffectChain::EffectCallback::checkSuspendOnEffectEnabled(
-        const sp<EffectModule>& effect, bool enabled, bool threadLocked) {
+        const sp<EffectBase>& effect, bool enabled, bool threadLocked) {
     sp<ThreadBase> t = mThread.promote();
     if (t == nullptr) {
         return;
@@ -2817,18 +2860,20 @@
     if (c == nullptr) {
         return;
     }
-    c->checkSuspendOnEffectEnabled(effect, enabled);
+    // in EffectChain context, an EffectBase is always from an EffectModule so static cast is safe
+    c->checkSuspendOnEffectEnabled(effect->asEffectModule(), enabled);
 }
 
-void AudioFlinger::EffectChain::EffectCallback::onEffectEnable(const sp<EffectModule>& effect) {
+void AudioFlinger::EffectChain::EffectCallback::onEffectEnable(const sp<EffectBase>& effect) {
     sp<ThreadBase> t = mThread.promote();
     if (t == nullptr) {
         return;
     }
-    t->onEffectEnable(effect);
+    // in EffectChain context, an EffectBase is always from an EffectModule so static cast is safe
+    t->onEffectEnable(effect->asEffectModule());
 }
 
-void AudioFlinger::EffectChain::EffectCallback::onEffectDisable(const sp<EffectModule>& effect) {
+void AudioFlinger::EffectChain::EffectCallback::onEffectDisable(const sp<EffectBase>& effect) {
     checkSuspendOnEffectEnabled(effect, false, false /*threadLocked*/);
 
     sp<ThreadBase> t = mThread.promote();
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 0b0f9f5..ea51c2c 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -45,17 +45,17 @@
     virtual status_t removeEffectFromHal(sp<EffectHalInterface> effect) = 0;
     virtual void setVolumeForOutput(float left, float right) const = 0;
     virtual bool disconnectEffectHandle(EffectHandle *handle, bool unpinIfLast) = 0;
-    virtual void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
+    virtual void checkSuspendOnEffectEnabled(const sp<EffectBase>& effect,
                                              bool enabled,
                                              bool threadLocked) = 0;
-    virtual void onEffectEnable(const sp<EffectModule>& effect) = 0;
-    virtual void onEffectDisable(const sp<EffectModule>& effect) = 0;
+    virtual void onEffectEnable(const sp<EffectBase>& effect) = 0;
+    virtual void onEffectDisable(const sp<EffectBase>& effect) = 0;
 
     // Methods usually implemented with help from AudioFlinger: pay attention to mutex locking order
     virtual status_t createEffectHal(const effect_uuid_t *pEffectUuid,
                     int32_t sessionId, int32_t deviceId, sp<EffectHalInterface> *effect) = 0;
     virtual status_t allocateHalBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) = 0;
-    virtual bool updateOrphanEffectChains(const sp<EffectModule>& effect) = 0;
+    virtual bool updateOrphanEffectChains(const sp<EffectBase>& effect) = 0;
 
     // Methods usually implemented with help from EffectChain: pay attention to mutex locking order
     virtual uint32_t strategy() const = 0;
@@ -65,11 +65,11 @@
     virtual wp<EffectChain> chain() const = 0;
 };
 
-// EffectModule and EffectChain classes both have their own mutex to protect
+// EffectBase(EffectModule) and EffectChain classes both have their own mutex to protect
 // state changes or resource modifications. Always respect the following order
 // if multiple mutexes must be acquired to avoid cross deadlock:
-// AudioFlinger -> ThreadBase -> EffectChain -> EffectModule
-// AudioHandle -> ThreadBase -> EffectChain -> EffectModule
+// AudioFlinger -> ThreadBase -> EffectChain -> EffectBase(EffectModule)
+// AudioHandle -> ThreadBase -> EffectChain -> EffectBase(EffectModule)
 
 // NOTE: When implementing the EffectCallbackInterface, in an EffectChain or other, it is important
 // to pay attention to this locking order as some callback methods can be called from a state where
@@ -80,23 +80,19 @@
 // Threadbase mutex locked to avoid cross deadlock with other clients calling AudioPolicyService
 // methods that in turn call AudioFlinger thus locking the same mutexes in the reverse order.
 
-// The EffectModule class is a wrapper object controlling the effect engine implementation
-// in the effect library. It prevents concurrent calls to process() and command() functions
-// from different client threads. It keeps a list of EffectHandle objects corresponding
-// to all client applications using this effect and notifies applications of effect state,
-// control or parameter changes. It manages the activation state machine to send appropriate
-// reset, enable, disable commands to effect engine and provide volume
-// ramping when effects are activated/deactivated.
-// When controlling an auxiliary effect, the EffectModule also provides an input buffer used by
-// the attached track(s) to accumulate their auxiliary channel.
-class EffectModule : public RefBase {
+
+// The EffectBase class contains common properties, state and behavior for and EffectModule or
+// other derived classes managing an audio effect instance within the effect framework.
+// It also contains the class mutex (see comment on locking order above).
+class EffectBase : public RefBase {
 public:
-    EffectModule(const sp<EffectCallbackInterface>& chain,
-                    effect_descriptor_t *desc,
-                    int id,
-                    audio_session_t sessionId,
-                    bool pinned);
-    virtual ~EffectModule();
+    EffectBase(const sp<EffectCallbackInterface>& callback,
+               effect_descriptor_t *desc,
+               int id,
+               audio_session_t sessionId,
+               bool pinned);
+
+    ~EffectBase() override = default;
 
     enum effect_state {
         IDLE,
@@ -108,69 +104,14 @@
         DESTROYED
     };
 
-    int         id() const { return mId; }
-    void process();
-    bool updateState();
-    status_t command(uint32_t cmdCode,
-                     uint32_t cmdSize,
-                     void *pCmdData,
-                     uint32_t *replySize,
-                     void *pReplyData);
-
-    void reset_l();
-    status_t configure();
-    status_t init();
+    int id() const { return mId; }
     effect_state state() const {
         return mState;
     }
-    uint32_t status() {
-        return mStatus;
-    }
     audio_session_t sessionId() const {
         return mSessionId;
     }
-    status_t    setEnabled(bool enabled, bool fromHandle);
-    status_t    setEnabled_l(bool enabled);
-    bool isEnabled() const;
-    bool isProcessEnabled() const;
-    bool isOffloadedOrDirect() const;
-    bool isVolumeControlEnabled() const;
-
-    void        setInBuffer(const sp<EffectBufferHalInterface>& buffer);
-    int16_t     *inBuffer() const {
-        return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
-    }
-    void        setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
-    int16_t     *outBuffer() const {
-        return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
-    }
-    void        setCallback(const sp<EffectCallbackInterface>& callback) { mCallback = callback; }
-
-    status_t addHandle(EffectHandle *handle);
-    ssize_t  disconnectHandle(EffectHandle *handle, bool unpinIfLast);
-    ssize_t removeHandle(EffectHandle *handle);
-    ssize_t removeHandle_l(EffectHandle *handle);
-
     const effect_descriptor_t& desc() const { return mDescriptor; }
-    sp<EffectCallbackInterface>&     callback() { return mCallback; }
-
-    status_t         setDevices(const AudioDeviceTypeAddrVector &devices);
-    status_t         setInputDevice(const AudioDeviceTypeAddr &device);
-    status_t         setVolume(uint32_t *left, uint32_t *right, bool controller);
-    status_t         setMode(audio_mode_t mode);
-    status_t         setAudioSource(audio_source_t source);
-    status_t         start();
-    status_t         stop();
-    void             setSuspended(bool suspended);
-    bool             suspended() const;
-
-    EffectHandle*    controlHandle_l();
-
-    bool             isPinned() const { return mPinned; }
-    void             unPin() { mPinned = false; }
-    bool             purgeHandles();
-    void             lock() { mLock.lock(); }
-    void             unlock() { mLock.unlock(); }
     bool             isOffloadable() const
                         { return (mDescriptor.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) != 0; }
     bool             isImplementationSoftware() const
@@ -183,19 +124,141 @@
     bool             isVolumeMonitor() const
                         { return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
                             == EFFECT_FLAG_VOLUME_MONITOR; }
-    status_t         setOffloaded(bool offloaded, audio_io_handle_t io);
-    bool             isOffloaded() const;
-    void             addEffectToHal_l();
-    void             release_l();
+
+    virtual status_t setEnabled(bool enabled, bool fromHandle);
+    status_t    setEnabled_l(bool enabled);
+    bool isEnabled() const;
+
+    void             setSuspended(bool suspended);
+    bool             suspended() const;
+
+    virtual status_t command(uint32_t cmdCode __unused,
+                 uint32_t cmdSize __unused,
+                 void *pCmdData __unused,
+                 uint32_t *replySize __unused,
+                 void *pReplyData __unused) { return NO_ERROR; };
+
+    void setCallback(const sp<EffectCallbackInterface>& callback) { mCallback = callback; }
+    sp<EffectCallbackInterface>&     callback() { return mCallback; }
+
+    status_t addHandle(EffectHandle *handle);
+    ssize_t disconnectHandle(EffectHandle *handle, bool unpinIfLast);
+    ssize_t removeHandle(EffectHandle *handle);
+    virtual ssize_t removeHandle_l(EffectHandle *handle);
+    EffectHandle* controlHandle_l();
+    bool purgeHandles();
+
+    void             checkSuspendOnEffectEnabled(bool enabled, bool threadLocked);
+
+    bool             isPinned() const { return mPinned; }
+    void             unPin() { mPinned = false; }
+
+    void             lock() { mLock.lock(); }
+    void             unlock() { mLock.unlock(); }
 
     status_t         updatePolicyState();
-    void             checkSuspendOnEffectEnabled(bool enabled, bool threadLocked);
+
+    virtual          sp<EffectModule> asEffectModule() { return nullptr; }
 
     void             dump(int fd, const Vector<String16>& args);
 
 private:
     friend class AudioFlinger;      // for mHandles
-    bool                mPinned;
+    bool             mPinned = false;
+
+    DISALLOW_COPY_AND_ASSIGN(EffectBase);
+
+mutable Mutex                 mLock;      // mutex for process, commands and handles list protection
+    sp<EffectCallbackInterface> mCallback; // parent effect chain
+    const int                 mId;        // this instance unique ID
+    const audio_session_t     mSessionId; // audio session ID
+    const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
+    effect_state              mState = IDLE; // current activation state
+    // effect is suspended: temporarily disabled by framework
+    bool                      mSuspended = false;
+
+    Vector<EffectHandle *>    mHandles;   // list of client handles
+                // First handle in mHandles has highest priority and controls the effect module
+
+    // Audio policy effect state management
+    // Mutex protecting transactions with audio policy manager as mLock cannot
+    // be held to avoid cross deadlocks with audio policy mutex
+    Mutex                     mPolicyLock;
+    // Effect is registered in APM or not
+    bool                      mPolicyRegistered = false;
+    // Effect enabled state communicated to APM. Enabled state corresponds to
+    // state requested by the EffectHandle with control
+    bool                      mPolicyEnabled = false;
+};
+
+// The EffectModule class is a wrapper object controlling the effect engine implementation
+// in the effect library. It prevents concurrent calls to process() and command() functions
+// from different client threads. It keeps a list of EffectHandle objects corresponding
+// to all client applications using this effect and notifies applications of effect state,
+// control or parameter changes. It manages the activation state machine to send appropriate
+// reset, enable, disable commands to effect engine and provide volume
+// ramping when effects are activated/deactivated.
+// When controlling an auxiliary effect, the EffectModule also provides an input buffer used by
+// the attached track(s) to accumulate their auxiliary channel.
+class EffectModule : public EffectBase {
+public:
+    EffectModule(const sp<EffectCallbackInterface>& callabck,
+                    effect_descriptor_t *desc,
+                    int id,
+                    audio_session_t sessionId,
+                    bool pinned);
+    virtual ~EffectModule();
+
+    void process();
+    bool updateState();
+    status_t command(uint32_t cmdCode,
+                     uint32_t cmdSize,
+                     void *pCmdData,
+                     uint32_t *replySize,
+                     void *pReplyData) override;
+
+    void reset_l();
+    status_t configure();
+    status_t init();
+
+    uint32_t status() {
+        return mStatus;
+    }
+
+    bool isProcessEnabled() const;
+    bool isOffloadedOrDirect() const;
+    bool isVolumeControlEnabled() const;
+
+    void        setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+    int16_t     *inBuffer() const {
+        return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
+    }
+    void        setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+    int16_t     *outBuffer() const {
+        return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
+    }
+
+    ssize_t removeHandle_l(EffectHandle *handle) override;
+
+    status_t         setDevices(const AudioDeviceTypeAddrVector &devices);
+    status_t         setInputDevice(const AudioDeviceTypeAddr &device);
+    status_t         setVolume(uint32_t *left, uint32_t *right, bool controller);
+    status_t         setMode(audio_mode_t mode);
+    status_t         setAudioSource(audio_source_t source);
+    status_t         start();
+    status_t         stop();
+
+    status_t         setOffloaded(bool offloaded, audio_io_handle_t io);
+    bool             isOffloaded() const;
+    void             addEffectToHal_l();
+    void             release_l();
+
+    sp<EffectModule> asEffectModule() override { return this; }
+
+    void             dump(int fd, const Vector<String16>& args);
+
+private:
+    friend class AudioFlinger;      // for mHandles
 
     // Maximum time allocated to effect engines to complete the turn off sequence
     static const uint32_t MAX_DISABLE_TIME_MS = 10000;
@@ -207,23 +270,15 @@
     status_t removeEffectFromHal_l();
     status_t sendSetAudioDevicesCommand(const AudioDeviceTypeAddrVector &devices, uint32_t cmdCode);
 
-mutable Mutex               mLock;      // mutex for process, commands and handles list protection
-    sp<EffectCallbackInterface>     mCallback;     // parent effect chain
-    const int           mId;        // this instance unique ID
-    const audio_session_t mSessionId; // audio session ID
-    const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
     effect_config_t     mConfig;    // input and output audio configuration
     sp<EffectHalInterface> mEffectInterface; // Effect module HAL
     sp<EffectBufferHalInterface> mInBuffer;  // Buffers for interacting with HAL
     sp<EffectBufferHalInterface> mOutBuffer;
     status_t            mStatus;    // initialization status
-    effect_state        mState;     // current activation state
-    Vector<EffectHandle *> mHandles;    // list of client handles
                 // First handle in mHandles has highest priority and controls the effect module
     uint32_t mMaxDisableWaitCnt;    // maximum grace period before forcing an effect off after
                                     // sending disable command.
     uint32_t mDisableWaitCnt;       // current process() calls count during disable period.
-    bool     mSuspended;            // effect is suspended: temporarily disabled by framework
     bool     mOffloaded;            // effect is currently offloaded to the audio DSP
 
 #ifdef FLOAT_EFFECT_CHAIN
@@ -251,16 +306,6 @@
     static constexpr pid_t INVALID_PID = (pid_t)-1;
     // this tid is allowed to call setVolume() without acquiring the mutex.
     pid_t mSetVolumeReentrantTid = INVALID_PID;
-
-    // Audio policy effect state management
-    // Mutex protecting transactions with audio policy manager as mLock cannot
-    // be held to avoid cross deadlocks with audio policy mutex
-    Mutex   mPolicyLock;
-    // Effect is registered in APM or not
-    bool    mPolicyRegistered = false;
-    // Effect enabled state communicated to APM. Enabled state corresponds to
-    // state requested by the EffectHandle with control
-    bool    mPolicyEnabled = false;
 };
 
 // The EffectHandle class implements the IEffect interface. It provides resources
@@ -272,7 +317,7 @@
 class EffectHandle: public android::BnEffect {
 public:
 
-    EffectHandle(const sp<EffectModule>& effect,
+    EffectHandle(const sp<EffectBase>& effect,
             const sp<AudioFlinger::Client>& client,
             const sp<IEffectClient>& effectClient,
             int32_t priority);
@@ -310,9 +355,9 @@
     bool enabled() const { return mEnabled; }
 
     // Getters
-    wp<EffectModule> effect() const { return mEffect; }
+    wp<EffectBase> effect() const { return mEffect; }
     int id() const {
-        sp<EffectModule> effect = mEffect.promote();
+        sp<EffectBase> effect = mEffect.promote();
         if (effect == 0) {
             return 0;
         }
@@ -329,7 +374,7 @@
     DISALLOW_COPY_AND_ASSIGN(EffectHandle);
 
     Mutex mLock;                        // protects IEffect method calls
-    wp<EffectModule> mEffect;           // pointer to controlled EffectModule
+    wp<EffectBase> mEffect;           // pointer to controlled EffectModule
     sp<IEffectClient> mEffectClient;    // callback interface for client notifications
     /*const*/ sp<Client> mClient;       // client for shared memory allocation, see disconnect()
     sp<IMemory>         mCblkMemory;    // shared memory for control block
@@ -472,7 +517,7 @@
         status_t createEffectHal(const effect_uuid_t *pEffectUuid,
                int32_t sessionId, int32_t deviceId, sp<EffectHalInterface> *effect) override;
         status_t allocateHalBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
-        bool updateOrphanEffectChains(const sp<EffectModule>& effect) override;
+        bool updateOrphanEffectChains(const sp<EffectBase>& effect) override;
 
         audio_io_handle_t io() const override;
         bool isOutput() const override;
@@ -492,13 +537,13 @@
         void setVolumeForOutput(float left, float right) const override;
 
         // check if effects should be suspended/restored when a given effect is enable/disabled
-        void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
+        void checkSuspendOnEffectEnabled(const sp<EffectBase>& effect,
                               bool enabled, bool threadLocked) override;
         void resetVolume() override;
         uint32_t strategy() const override;
         int32_t activeTrackCnt() const override;
-        void onEffectEnable(const sp<EffectModule>& effect) override;
-        void onEffectDisable(const sp<EffectModule>& effect) override;
+        void onEffectEnable(const sp<EffectBase>& effect) override;
+        void onEffectDisable(const sp<EffectBase>& effect) override;
 
         wp<EffectChain> chain() const override { return mChain; }
 
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 53e2dd5..dbd761a 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -448,18 +448,6 @@
         *mPlayback.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
     }
 
-    // use a pseudo LCM between input and output framecount
-    size_t playbackFrameCount = mPlayback.thread()->frameCount();
-    int playbackShift = __builtin_ctz(playbackFrameCount);
-    size_t recordFrameCount = mRecord.thread()->frameCount();
-    int shift = __builtin_ctz(recordFrameCount);
-    if (playbackShift < shift) {
-        shift = playbackShift;
-    }
-    size_t frameCount = (playbackFrameCount * recordFrameCount) >> shift;
-    ALOGV("%s() playframeCount %zu recordFrameCount %zu frameCount %zu",
-            __func__, playbackFrameCount, recordFrameCount, frameCount);
-
     // create a special record track to capture from record thread
     uint32_t channelCount = mPlayback.thread()->channelCount();
     audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
@@ -503,7 +491,15 @@
     }
 
     sp<RecordThread::PatchRecord> tempRecordTrack;
+    const size_t playbackFrameCount = mPlayback.thread()->frameCount();
+    const size_t recordFrameCount = mRecord.thread()->frameCount();
+    size_t frameCount = 0;
     if ((inputFlags & AUDIO_INPUT_FLAG_DIRECT) && (outputFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
+        // PassthruPatchRecord producesBufferOnDemand, so use
+        // maximum of playback and record thread framecounts
+        frameCount = std::max(playbackFrameCount, recordFrameCount);
+        ALOGV("%s() playframeCount %zu recordFrameCount %zu frameCount %zu",
+            __func__, playbackFrameCount, recordFrameCount, frameCount);
         tempRecordTrack = new RecordThread::PassthruPatchRecord(
                                                  mRecord.thread().get(),
                                                  sampleRate,
@@ -512,6 +508,16 @@
                                                  frameCount,
                                                  inputFlags);
     } else {
+        // use a pseudo LCM between input and output framecount
+        int playbackShift = __builtin_ctz(playbackFrameCount);
+        int shift = __builtin_ctz(recordFrameCount);
+        if (playbackShift < shift) {
+            shift = playbackShift;
+        }
+        frameCount = (playbackFrameCount * recordFrameCount) >> shift;
+        ALOGV("%s() playframeCount %zu recordFrameCount %zu frameCount %zu",
+            __func__, playbackFrameCount, recordFrameCount, frameCount);
+
         tempRecordTrack = new RecordThread::PatchRecord(
                                                  mRecord.thread().get(),
                                                  sampleRate,
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a6d6914..b9afba8 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1419,11 +1419,11 @@
     sp<EffectModule> effect;
     {
         Mutex::Autolock _l(mLock);
-
-        effect = handle->effect().promote();
-        if (effect == nullptr) {
+        sp<EffectBase> effectBase = handle->effect().promote();
+        if (effectBase == nullptr) {
             return;
         }
+        effect = static_cast<EffectModule *>(effectBase.get());
         // restore suspended effects if the disconnected handle was enabled and the last one.
         remove = (effect->removeHandle(handle) == 0) && (!effect->isPinned() || unpinIfLast);
         if (remove) {
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
index 27bd3ff..1dec6f4 100644
--- a/services/audiopolicy/config/audio_policy_volumes.xml
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -181,6 +181,16 @@
                                                 ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
                                                 ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ASSISTANT" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ASSISTANT" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                                ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ASSISTANT" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ASSISTANT" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ASSISTANT" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                                ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEADSET"
                                             ref="FULL_SCALE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_SPEAKER"
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 01efc7a..46b950c 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -106,48 +106,33 @@
 
 engineConfig::ParsingResult EngineBase::loadAudioPolicyEngineConfig()
 {
-    auto loadProductStrategies =
-            [](auto& strategyConfigs, auto& productStrategies, auto& volumeGroups) {
-        for (auto& strategyConfig : strategyConfigs) {
-            sp<ProductStrategy> strategy = new ProductStrategy(strategyConfig.name);
-            for (const auto &group : strategyConfig.attributesGroups) {
-                const auto &iter = std::find_if(begin(volumeGroups), end(volumeGroups),
-                                         [&group](const auto &volumeGroup) {
-                        return group.volumeGroup == volumeGroup.second->getName(); });
-                ALOG_ASSERT(iter != end(volumeGroups), "Invalid Volume Group Name %s",
-                            group.volumeGroup.c_str());
-                if (group.stream != AUDIO_STREAM_DEFAULT) {
-                    iter->second->addSupportedStream(group.stream);
-                }
-                for (const auto &attr : group.attributesVect) {
-                    strategy->addAttributes({group.stream, iter->second->getId(), attr});
-                    iter->second->addSupportedAttributes(attr);
-                }
-            }
-            product_strategy_t strategyId = strategy->getId();
-            productStrategies[strategyId] = strategy;
-        }
-    };
-    auto loadVolumeGroups = [](auto &volumeConfigs, auto &volumeGroups) {
-        for (auto &volumeConfig : volumeConfigs) {
-            sp<VolumeGroup> volumeGroup = new VolumeGroup(volumeConfig.name, volumeConfig.indexMin,
-                                                          volumeConfig.indexMax);
-            volumeGroups[volumeGroup->getId()] = volumeGroup;
+    auto loadVolumeConfig = [](auto &volumeGroups, auto &volumeConfig) {
+        sp<VolumeGroup> volumeGroup = new VolumeGroup(volumeConfig.name, volumeConfig.indexMin,
+                                                      volumeConfig.indexMax);
+        volumeGroups[volumeGroup->getId()] = volumeGroup;
 
-            for (auto &configCurve : volumeConfig.volumeCurves) {
-                device_category deviceCat = DEVICE_CATEGORY_SPEAKER;
-                if (!DeviceCategoryConverter::fromString(configCurve.deviceCategory, deviceCat)) {
-                    ALOGE("%s: Invalid %s", __FUNCTION__, configCurve.deviceCategory.c_str());
-                    continue;
-                }
-                sp<VolumeCurve> curve = new VolumeCurve(deviceCat);
-                for (auto &point : configCurve.curvePoints) {
-                    curve->add({point.index, point.attenuationInMb});
-                }
-                volumeGroup->add(curve);
+        for (auto &configCurve : volumeConfig.volumeCurves) {
+            device_category deviceCat = DEVICE_CATEGORY_SPEAKER;
+            if (!DeviceCategoryConverter::fromString(configCurve.deviceCategory, deviceCat)) {
+                ALOGE("%s: Invalid %s", __FUNCTION__, configCurve.deviceCategory.c_str());
+                continue;
             }
+            sp<VolumeCurve> curve = new VolumeCurve(deviceCat);
+            for (auto &point : configCurve.curvePoints) {
+                curve->add({point.index, point.attenuationInMb});
+            }
+            volumeGroup->add(curve);
+        }
+        return volumeGroup;
+    };
+    auto addSupportedStreamAttributes = [](auto &group, auto &volumeGroup, auto &strategy) {
+        volumeGroup->addSupportedStream(group.stream);
+        for (const auto &attr : group.attributesVect) {
+            strategy->addAttributes({group.stream, volumeGroup->getId(), attr});
+            volumeGroup->addSupportedAttributes(attr);
         }
     };
+
     auto result = engineConfig::parse();
     if (result.parsedConfig == nullptr) {
         ALOGW("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
@@ -157,9 +142,37 @@
                   static_cast<size_t>(ret == NO_ERROR ? 0 : 1)};
     }
     ALOGE_IF(result.nbSkippedElement != 0, "skipped %zu elements", result.nbSkippedElement);
-    loadVolumeGroups(result.parsedConfig->volumeGroups, mVolumeGroups);
-    loadProductStrategies(result.parsedConfig->productStrategies, mProductStrategies,
-                          mVolumeGroups);
+
+    engineConfig::VolumeGroup defaultVolumeConfig;
+    for (auto &volumeConfig : result.parsedConfig->volumeGroups) {
+        // save default volume config for streams not defined in configuration
+        if (volumeConfig.name.compare("AUDIO_STREAM_MUSIC") == 0) {
+            defaultVolumeConfig = volumeConfig;
+        }
+        loadVolumeConfig(mVolumeGroups, volumeConfig);
+    }
+    for (auto& strategyConfig : result.parsedConfig->productStrategies) {
+        sp<ProductStrategy> strategy = new ProductStrategy(strategyConfig.name);
+        for (const auto &group : strategyConfig.attributesGroups) {
+            const auto &iter = std::find_if(begin(mVolumeGroups), end(mVolumeGroups),
+                                         [&group](const auto &volumeGroup) {
+                    return group.volumeGroup == volumeGroup.second->getName(); });
+            if (group.stream != AUDIO_STREAM_DEFAULT) {
+                if (iter == end(mVolumeGroups)) {
+                    ALOGW("%s: No configuration of %s found, using default volume configuration"
+                            , __FUNCTION__, group.volumeGroup.c_str());
+                    defaultVolumeConfig.name = group.volumeGroup;
+                    sp<VolumeGroup> volumeGroup =
+                            loadVolumeConfig(mVolumeGroups, defaultVolumeConfig);
+                    addSupportedStreamAttributes(group, volumeGroup, strategy);
+                } else {
+                    addSupportedStreamAttributes(group, iter->second, strategy);
+                }
+            }
+        }
+        product_strategy_t strategyId = strategy->getId();
+        mProductStrategies[strategyId] = strategy;
+    }
     mProductStrategies.initialize();
     return result;
 }
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index 20c57ee..fbce801 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -81,6 +81,10 @@
     },
     {"STRATEGY_MEDIA",
      {
+         {"assistant", AUDIO_STREAM_ASSISTANT, "AUDIO_STREAM_ASSISTANT",
+          {{AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
+            AUDIO_SOURCE_DEFAULT, 0, ""}}
+         },
          {"music", AUDIO_STREAM_MUSIC, "AUDIO_STREAM_MUSIC",
           {
               {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, 0, ""},
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
index 9398743..b1c0dcf 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -72,6 +72,12 @@
             <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
             <Attributes></Attributes>
         </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_ASSISTANT" volumeGroup="assistant">
+            <Attributes>
+                <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+                <Usage value="AUDIO_USAGE_ASSISTANT"/>
+            </Attributes>
+        </AttributesGroup>
         <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
             <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
         </AttributesGroup>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
index 707a184..0f9614e 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -205,7 +205,16 @@
         <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
         <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
     </volumeGroup>
-
+    <volumeGroup>
+        <name>assistant</name>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID"  ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
     <volumeGroup>
         <name>rerouting</name>
         <indexMin>0</indexMin>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
index 9398743..b1c0dcf 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -72,6 +72,12 @@
             <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
             <Attributes></Attributes>
         </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_ASSISTANT" volumeGroup="assistant">
+            <Attributes>
+                <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+                <Usage value="AUDIO_USAGE_ASSISTANT"/>
+            </Attributes>
+        </AttributesGroup>
         <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
             <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
         </AttributesGroup>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
index 707a184..a259950 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -207,6 +207,17 @@
     </volumeGroup>
 
     <volumeGroup>
+        <name>assistant</name>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID"  ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
         <name>rerouting</name>
         <indexMin>0</indexMin>
         <indexMax>1</indexMax>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 2124646..d4d59d6 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2787,16 +2787,6 @@
     return mEffects.unregisterEffect(id);
 }
 
-void AudioPolicyManager::cleanUpEffectsForIo(audio_io_handle_t io)
-{
-    EffectDescriptorCollection effects = mEffects.getEffectsForIo(io);
-    for (size_t i = 0; i < effects.size(); i++) {
-        ALOGW("%s removing stale effect %s, id %d on closed IO %d",
-              __func__, effects.valueAt(i)->mDesc.name, effects.keyAt(i), io);
-        unregisterEffect(effects.keyAt(i));
-    }
-}
-
 status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled)
 {
     sp<EffectDescriptor> effect = mEffects.getEffect(id);
@@ -5043,8 +5033,6 @@
             setMsdPatch();
         }
     }
-
-    cleanUpEffectsForIo(output);
 }
 
 void AudioPolicyManager::closeInput(audio_io_handle_t input)
@@ -5076,8 +5064,6 @@
             mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
         mpClientInterface->setSoundTriggerCaptureState(false);
     }
-
-    cleanUpEffectsForIo(input);
 }
 
 SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevices(
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index f6e4fc1..d6c1016 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -886,8 +886,6 @@
                 int delayMs,
                 uid_t uid,
                 sp<AudioPatch> *patchDescPtr);
-
-        void cleanUpEffectsForIo(audio_io_handle_t io);
 };
 
 };
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 4947714..60caa31 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -562,7 +562,8 @@
     AUDIO_STREAM_BLUETOOTH_SCO_TAG,
     AUDIO_STREAM_ENFORCED_AUDIBLE_TAG,
     AUDIO_STREAM_DTMF_TAG,
-    AUDIO_STREAM_TTS_TAG
+    AUDIO_STREAM_TTS_TAG,
+    AUDIO_STREAM_ASSISTANT_TAG
 };
 
 // returns the audio_stream_t enum corresponding to the output stream name or
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index e4a19ea..2a8349c 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -800,6 +800,8 @@
                 (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VIRTUAL_SOURCE,
                                      AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
                 (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
                                      AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"}
                 )
         );
@@ -841,6 +843,8 @@
                 (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
                                      AUDIO_SOURCE_DEFAULT, 0, ""},
                 (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
                                      AUDIO_SOURCE_DEFAULT, 0, ""}
                 )
         );
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 9cc15cd..6052a06 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -56,7 +56,9 @@
         "device3/StatusTracker.cpp",
         "device3/Camera3BufferManager.cpp",
         "device3/Camera3StreamSplitter.cpp",
+        "device3/CoordinateMapper.cpp",
         "device3/DistortionMapper.cpp",
+        "device3/ZoomRatioMapper.cpp",
         "gui/RingBufferConsumer.cpp",
         "utils/CameraThreadState.cpp",
         "hidl/AidlCameraDeviceCallbacks.cpp",
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 7ed4c3d..23f7884 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -40,6 +40,7 @@
 #include <utils/Trace.h>
 
 #include "api2/HeicCompositeStream.h"
+#include "device3/ZoomRatioMapper.h"
 
 namespace android {
 
@@ -232,6 +233,15 @@
     return deviceInfo->hasFlashUnit();
 }
 
+bool CameraProviderManager::supportNativeZoomRatio(const std::string &id) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return false;
+
+    return deviceInfo->supportNativeZoomRatio();
+}
+
 status_t CameraProviderManager::getResourceCost(const std::string &id,
         CameraResourceCost* cost) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -2035,6 +2045,13 @@
                 __FUNCTION__, strerror(-res), res);
     }
 
+    res = camera3::ZoomRatioMapper::overrideZoomRatioTags(
+            &mCameraCharacteristics, &mSupportNativeZoomRatio);
+    if (OK != res) {
+        ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+    }
+
     camera_metadata_entry flashAvailable =
             mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
     if (flashAvailable.count == 1 &&
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 2ef1f6f..651b8a1 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -187,6 +187,11 @@
     bool hasFlashUnit(const std::string &id) const;
 
     /**
+     * Return true if the camera device has native zoom ratio support.
+     */
+    bool supportNativeZoomRatio(const std::string &id) const;
+
+    /**
      * Return the resource cost of this camera device
      */
     status_t getResourceCost(const std::string &id,
@@ -416,6 +421,7 @@
             sp<ProviderInfo> mParentProvider;
 
             bool hasFlashUnit() const { return mHasFlashUnit; }
+            bool supportNativeZoomRatio() const { return mSupportNativeZoomRatio; }
             virtual status_t setTorchMode(bool enabled) = 0;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
             virtual bool isAPI1Compatible() const = 0;
@@ -449,10 +455,11 @@
                     mIsLogicalCamera(false), mResourceCost(resourceCost),
                     mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
                     mParentProvider(parentProvider), mHasFlashUnit(false),
-                    mPublicCameraIds(publicCameraIds) {}
+                    mSupportNativeZoomRatio(false), mPublicCameraIds(publicCameraIds) {}
             virtual ~DeviceInfo();
         protected:
-            bool mHasFlashUnit;
+            bool mHasFlashUnit; // const after constructor
+            bool mSupportNativeZoomRatio; // const after constructor
             const std::vector<std::string>& mPublicCameraIds;
 
             template<class InterfaceT>
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 4e5c8d6..1c5281d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -133,6 +133,7 @@
         session->close();
         return res;
     }
+    mSupportNativeZoomRatio = manager->supportNativeZoomRatio(mId.string());
 
     std::vector<std::string> physicalCameraIds;
     bool isLogical = manager->isLogicalCamera(mId.string(), &physicalCameraIds);
@@ -147,8 +148,11 @@
                 return res;
             }
 
-            if (DistortionMapper::isDistortionSupported(mPhysicalDeviceInfoMap[physicalId])) {
-                mDistortionMappers[physicalId].setupStaticInfo(mPhysicalDeviceInfoMap[physicalId]);
+            bool usePrecorrectArray =
+                    DistortionMapper::isDistortionSupported(mPhysicalDeviceInfoMap[physicalId]);
+            if (usePrecorrectArray) {
+                res = mDistortionMappers[physicalId].setupStaticInfo(
+                        mPhysicalDeviceInfoMap[physicalId]);
                 if (res != OK) {
                     SET_ERR_L("Unable to read camera %s's calibration fields for distortion "
                             "correction", physicalId.c_str());
@@ -156,6 +160,15 @@
                     return res;
                 }
             }
+
+            res = mZoomRatioMappers[physicalId].initZoomRatioTags(
+                    &mPhysicalDeviceInfoMap[physicalId],
+                    mSupportNativeZoomRatio, usePrecorrectArray);
+            if (res != OK) {
+                SET_ERR_L("Failed to initialize camera %s's zoomRatio tags: %s (%d)",
+                        physicalId.c_str(), strerror(-res), res);
+                return res;
+            }
         }
     }
 
@@ -331,13 +344,23 @@
         }
     }
 
-    if (DistortionMapper::isDistortionSupported(mDeviceInfo)) {
+    bool usePrecorrectArray = DistortionMapper::isDistortionSupported(mDeviceInfo);
+    if (usePrecorrectArray) {
         res = mDistortionMappers[mId.c_str()].setupStaticInfo(mDeviceInfo);
         if (res != OK) {
             SET_ERR_L("Unable to read necessary calibration fields for distortion correction");
             return res;
         }
     }
+
+    res = mZoomRatioMappers[mId.c_str()].initZoomRatioTags(&mDeviceInfo,
+            mSupportNativeZoomRatio, usePrecorrectArray);
+    if (res != OK) {
+        SET_ERR_L("Failed to initialize zoomRatio tags: %s (%d)",
+                strerror(-res), res);
+        return res;
+    }
+
     return OK;
 }
 
@@ -2124,6 +2147,15 @@
         set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
         mRequestTemplateCache[templateId].acquire(rawRequest);
 
+        // Override the template request with zoomRatioMapper
+        res = mZoomRatioMappers[mId.c_str()].initZoomRatioInTemplate(
+                &mRequestTemplateCache[templateId]);
+        if (res != OK) {
+            CLOGE("Failed to update zoom ratio for template %d: %s (%d)",
+                    templateId, strerror(-res), res);
+            return res;
+        }
+
         *request = mRequestTemplateCache[templateId];
         mLastTemplateId = templateId;
     }
@@ -3146,14 +3178,15 @@
         int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
         bool hasAppCallback, nsecs_t maxExpectedDuration,
         std::set<String8>& physicalCameraIds, bool isStillCapture,
-        bool isZslCapture, const SurfaceMap& outputSurfaces) {
+        bool isZslCapture, const std::set<std::string>& cameraIdsWithZoom,
+        const SurfaceMap& outputSurfaces) {
     ATRACE_CALL();
     Mutex::Autolock l(mInFlightLock);
 
     ssize_t res;
     res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
             hasAppCallback, maxExpectedDuration, physicalCameraIds, isStillCapture, isZslCapture,
-            outputSurfaces));
+            cameraIdsWithZoom, outputSurfaces));
     if (res < 0) return res;
 
     if (mInFlightMap.size() == 1) {
@@ -3504,7 +3537,7 @@
         CaptureResultExtras &resultExtras,
         CameraMetadata &collectedPartialResult,
         uint32_t frameNumber,
-        bool reprocess, bool zslStillCapture,
+        bool reprocess, bool zslStillCapture, const std::set<std::string>& cameraIdsWithZoom,
         const std::vector<PhysicalCaptureResultInfo>& physicalMetadatas) {
     ATRACE_CALL();
     if (pendingMetadata.isEmpty())
@@ -3573,19 +3606,39 @@
             mDistortionMappers[mId.c_str()].correctCaptureResult(&captureResult.mMetadata);
     if (res != OK) {
         SET_ERR("Unable to correct capture result metadata for frame %d: %s (%d)",
-                frameNumber, strerror(res), res);
+                frameNumber, strerror(-res), res);
         return;
     }
+
+    // Fix up result metadata to account for zoom ratio availabilities between
+    // HAL and app.
+    bool zoomRatioIs1 = cameraIdsWithZoom.find(mId.c_str()) == cameraIdsWithZoom.end();
+    res = mZoomRatioMappers[mId.c_str()].updateCaptureResult(
+            &captureResult.mMetadata, zoomRatioIs1);
+    if (res != OK) {
+        SET_ERR("Failed to update capture result zoom ratio metadata for frame %d: %s (%d)",
+                frameNumber, strerror(-res), res);
+        return;
+    }
+
     for (auto& physicalMetadata : captureResult.mPhysicalMetadatas) {
         String8 cameraId8(physicalMetadata.mPhysicalCameraId);
-        if (mDistortionMappers.find(cameraId8.c_str()) == mDistortionMappers.end()) {
-            continue;
+        if (mDistortionMappers.find(cameraId8.c_str()) != mDistortionMappers.end()) {
+            res = mDistortionMappers[cameraId8.c_str()].correctCaptureResult(
+                    &physicalMetadata.mPhysicalCameraMetadata);
+            if (res != OK) {
+                SET_ERR("Unable to correct physical capture result metadata for frame %d: %s (%d)",
+                        frameNumber, strerror(-res), res);
+                return;
+            }
         }
-        res = mDistortionMappers[cameraId8.c_str()].correctCaptureResult(
-                &physicalMetadata.mPhysicalCameraMetadata);
+
+        zoomRatioIs1 = cameraIdsWithZoom.find(cameraId8.c_str()) == cameraIdsWithZoom.end();
+        res = mZoomRatioMappers[cameraId8.c_str()].updateCaptureResult(
+                &physicalMetadata.mPhysicalCameraMetadata, zoomRatioIs1);
         if (res != OK) {
-            SET_ERR("Unable to correct physical capture result metadata for frame %d: %s (%d)",
-                    frameNumber, strerror(res), res);
+            SET_ERR("Failed to update camera %s's physical zoom ratio metadata for "
+                    "frame %d: %s(%d)", cameraId8.c_str(), frameNumber, strerror(-res), res);
             return;
         }
     }
@@ -3790,7 +3843,7 @@
                 sendCaptureResult(metadata, request.resultExtras,
                     collectedPartialResult, frameNumber,
                     hasInputBufferInRequest, request.zslCapture && request.stillCapture,
-                    request.physicalMetadatas);
+                    request.cameraIdsWithZoom, request.physicalMetadatas);
             }
         }
 
@@ -4008,7 +4061,7 @@
                 sendCaptureResult(r.pendingMetadata, r.resultExtras,
                     r.collectedPartialResult, msg.frame_number,
                     r.hasInputBuffer, r.zslCapture && r.stillCapture,
-                    r.physicalMetadatas);
+                    r.cameraIdsWithZoom, r.physicalMetadatas);
             }
             bool timestampIncreasing = !(r.zslCapture || r.hasInputBuffer);
             returnOutputBuffers(r.pendingOutputBuffers.array(),
@@ -5610,6 +5663,7 @@
                 // request in a batch as new
                 !(batchedRequest && i > 0);
         if (newRequest) {
+            std::set<std::string> cameraIdsWithZoom;
             /**
              * HAL workaround:
              * Insert a dummy trigger ID if a trigger is set but no trigger ID is
@@ -5642,6 +5696,28 @@
                             return INVALID_OPERATION;
                         }
                     }
+
+                    for (it = captureRequest->mSettingsList.begin();
+                            it != captureRequest->mSettingsList.end(); it++) {
+                        if (parent->mZoomRatioMappers.find(it->cameraId) ==
+                                parent->mZoomRatioMappers.end()) {
+                            continue;
+                        }
+
+                        camera_metadata_entry_t e = it->metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+                        if (e.count > 0 && e.data.f[0] != 1.0f) {
+                            cameraIdsWithZoom.insert(it->cameraId);
+                        }
+
+                        res = parent->mZoomRatioMappers[it->cameraId].updateCaptureRequest(
+                            &(it->metadata));
+                        if (res != OK) {
+                            SET_ERR("RequestThread: Unable to correct capture requests "
+                                    "for zoom ratio for request %d: %s (%d)",
+                                    halRequest->frame_number, strerror(-res), res);
+                            return INVALID_OPERATION;
+                        }
+                    }
                 }
             }
 
@@ -5652,6 +5728,7 @@
             captureRequest->mSettingsList.begin()->metadata.sort();
             halRequest->settings = captureRequest->mSettingsList.begin()->metadata.getAndLock();
             mPrevRequest = captureRequest;
+            mPrevCameraIdsWithZoom = cameraIdsWithZoom;
             ALOGVV("%s: Request settings are NEW", __FUNCTION__);
 
             IF_ALOGV() {
@@ -5839,7 +5916,7 @@
                 /*hasInput*/halRequest->input_buffer != NULL,
                 hasCallback,
                 calculateMaxExpectedDuration(halRequest->settings),
-                requestedPhysicalCameras, isStillCapture, isZslCapture,
+                requestedPhysicalCameras, isStillCapture, isZslCapture, mPrevCameraIdsWithZoom,
                 (mUseHalBufManager) ? uniqueSurfaceIdMap :
                                       SurfaceMap{});
         ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index eabc44d..5faabd1 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -46,6 +46,7 @@
 #include "device3/StatusTracker.h"
 #include "device3/Camera3BufferManager.h"
 #include "device3/DistortionMapper.h"
+#include "device3/ZoomRatioMapper.h"
 #include "utils/TagMonitor.h"
 #include "utils/LatencyHistogram.h"
 #include <camera_metadata_hidden.h>
@@ -442,6 +443,7 @@
     sp<HalInterface> mInterface;
 
     CameraMetadata             mDeviceInfo;
+    bool                       mSupportNativeZoomRatio;
     std::unordered_map<std::string, CameraMetadata> mPhysicalDeviceInfoMap;
 
     CameraMetadata             mRequestTemplateCache[CAMERA3_TEMPLATE_COUNT];
@@ -980,6 +982,7 @@
 
         sp<CaptureRequest> mPrevRequest;
         int32_t            mPrevTriggers;
+        std::set<std::string> mPrevCameraIdsWithZoom;
 
         uint32_t           mFrameNumber;
 
@@ -1078,6 +1081,9 @@
         // Indicates a ZSL capture request
         bool zslCapture;
 
+        // Requested camera ids (both logical and physical) with zoomRatio != 1.0f
+        std::set<std::string> cameraIdsWithZoom;
+
         // What shared surfaces an output should go to
         SurfaceMap outputSurfaces;
 
@@ -1099,7 +1105,7 @@
         InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
                 bool hasAppCallback, nsecs_t maxDuration,
                 const std::set<String8>& physicalCameraIdSet, bool isStillCapture,
-                bool isZslCapture,
+                bool isZslCapture, const std::set<std::string>& idsWithZoom,
                 const SurfaceMap& outSurfaces = SurfaceMap{}) :
                 shutterTimestamp(0),
                 sensorTimestamp(0),
@@ -1114,6 +1120,7 @@
                 physicalCameraIds(physicalCameraIdSet),
                 stillCapture(isStillCapture),
                 zslCapture(isZslCapture),
+                cameraIdsWithZoom(idsWithZoom),
                 outputSurfaces(outSurfaces) {
         }
     };
@@ -1131,7 +1138,7 @@
     status_t registerInFlight(uint32_t frameNumber,
             int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
             bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds,
-            bool isStillCapture, bool isZslCapture,
+            bool isStillCapture, bool isZslCapture, const std::set<std::string>& cameraIdsWithZoom,
             const SurfaceMap& outputSurfaces);
 
     /**
@@ -1256,6 +1263,7 @@
             CaptureResultExtras &resultExtras,
             CameraMetadata &collectedPartialResult, uint32_t frameNumber,
             bool reprocess, bool zslStillCapture,
+            const std::set<std::string>& cameraIdsWithZoom,
             const std::vector<PhysicalCaptureResultInfo>& physicalMetadatas);
 
     bool isLastFullResult(const InFlightRequest& inFlightRequest);
@@ -1288,6 +1296,11 @@
     // logical camera and its physical subcameras.
     std::unordered_map<std::string, camera3::DistortionMapper> mDistortionMappers;
 
+    /**
+     * Zoom ratio mapper support
+     */
+    std::unordered_map<std::string, camera3::ZoomRatioMapper> mZoomRatioMappers;
+
     // Debug tracker for metadata tag value changes
     // - Enabled with the -m <taglist> option to dumpsys, such as
     //   dumpsys -m android.control.aeState,android.control.aeMode
diff --git a/services/camera/libcameraservice/device3/CoordinateMapper.cpp b/services/camera/libcameraservice/device3/CoordinateMapper.cpp
new file mode 100644
index 0000000..d62f397
--- /dev/null
+++ b/services/camera/libcameraservice/device3/CoordinateMapper.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <system/camera_metadata_tags.h>
+
+#include "device3/CoordinateMapper.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Metadata keys to correct when adjusting coordinates for distortion correction
+ */
+
+// Both capture request and result
+constexpr std::array<uint32_t, 3> CoordinateMapper::kMeteringRegionsToCorrect = {
+    ANDROID_CONTROL_AF_REGIONS,
+    ANDROID_CONTROL_AE_REGIONS,
+    ANDROID_CONTROL_AWB_REGIONS
+};
+
+// Both capture request and result
+constexpr std::array<uint32_t, 1> CoordinateMapper::kRectsToCorrect = {
+    ANDROID_SCALER_CROP_REGION,
+};
+
+// Only for capture result
+constexpr std::array<uint32_t, 2> CoordinateMapper::kResultPointsToCorrectNoClamp = {
+    ANDROID_STATISTICS_FACE_RECTANGLES, // Says rectangles, is really points
+    ANDROID_STATISTICS_FACE_LANDMARKS,
+};
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/CoordinateMapper.h b/services/camera/libcameraservice/device3/CoordinateMapper.h
new file mode 100644
index 0000000..5164856
--- /dev/null
+++ b/services/camera/libcameraservice/device3/CoordinateMapper.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_COORDINATEMAPPER_H
+#define ANDROID_SERVERS_COORDINATEMAPPER_H
+
+#include <array>
+
+namespace android {
+
+namespace camera3 {
+
+class CoordinateMapper {
+    // Right now only stores metadata tags containing 2D coordinates
+    // to be corrected.
+protected:
+    // Metadata key lists to correct
+
+    // Both capture request and result
+    static const std::array<uint32_t, 3> kMeteringRegionsToCorrect;
+
+    // Both capture request and result
+    static const std::array<uint32_t, 1> kRectsToCorrect;
+
+    // Only for capture results; don't clamp
+    static const std::array<uint32_t, 2> kResultPointsToCorrectNoClamp;
+}; // class CoordinateMapper
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index ae7af8e..8132225 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -27,41 +27,14 @@
 
 namespace camera3 {
 
-/**
- * Metadata keys to correct when adjusting coordinates for distortion correction
- */
-
-// Both capture request and result
-constexpr std::array<uint32_t, 3> DistortionMapper::kMeteringRegionsToCorrect = {
-    ANDROID_CONTROL_AF_REGIONS,
-    ANDROID_CONTROL_AE_REGIONS,
-    ANDROID_CONTROL_AWB_REGIONS
-};
-
-// Only capture request
-constexpr std::array<uint32_t, 1> DistortionMapper::kRequestRectsToCorrect = {
-    ANDROID_SCALER_CROP_REGION,
-};
-
-// Only for capture result
-constexpr std::array<uint32_t, 1> DistortionMapper::kResultRectsToCorrect = {
-    ANDROID_SCALER_CROP_REGION,
-};
-
-// Only for capture result
-constexpr std::array<uint32_t, 2> DistortionMapper::kResultPointsToCorrectNoClamp = {
-    ANDROID_STATISTICS_FACE_RECTANGLES, // Says rectangles, is really points
-    ANDROID_STATISTICS_FACE_LANDMARKS,
-};
-
 
 DistortionMapper::DistortionMapper() : mValidMapping(false), mValidGrids(false) {
 }
 
-bool DistortionMapper::isDistortionSupported(const CameraMetadata &result) {
+bool DistortionMapper::isDistortionSupported(const CameraMetadata &deviceInfo) {
     bool isDistortionCorrectionSupported = false;
     camera_metadata_ro_entry_t distortionCorrectionModes =
-            result.find(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES);
+            deviceInfo.find(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES);
     for (size_t i = 0; i < distortionCorrectionModes.count; i++) {
         if (distortionCorrectionModes.data.u8[i] !=
                 ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
@@ -124,7 +97,7 @@
                 if (res != OK) return res;
             }
         }
-        for (auto rect : kRequestRectsToCorrect) {
+        for (auto rect : kRectsToCorrect) {
             e = request->find(rect);
             res = mapCorrectedRectToRaw(e.data.i32, e.count / 4, /*clamp*/true);
             if (res != OK) return res;
@@ -160,7 +133,7 @@
                 if (res != OK) return res;
             }
         }
-        for (auto rect : kResultRectsToCorrect) {
+        for (auto rect : kRectsToCorrect) {
             e = result->find(rect);
             res = mapRawRectToCorrected(e.data.i32, e.count / 4, /*clamp*/true);
             if (res != OK) return res;
@@ -390,7 +363,6 @@
     return OK;
 }
 
-
 status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount, bool clamp,
         bool simple) const {
     if (!mValidMapping) return INVALID_OPERATION;
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.h b/services/camera/libcameraservice/device3/DistortionMapper.h
index 4c0a1a6..a255003 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.h
+++ b/services/camera/libcameraservice/device3/DistortionMapper.h
@@ -22,6 +22,7 @@
 #include <mutex>
 
 #include "camera/CameraMetadata.h"
+#include "device3/CoordinateMapper.h"
 
 namespace android {
 
@@ -31,7 +32,7 @@
  * Utilities to transform between raw (distorted) and warped (corrected) coordinate systems
  * for cameras that support geometric distortion
  */
-class DistortionMapper {
+class DistortionMapper : private CoordinateMapper {
   public:
     DistortionMapper();
 
@@ -150,20 +151,6 @@
     // Fuzziness for float inequality tests
     constexpr static float kFloatFuzz = 1e-4;
 
-    // Metadata key lists to correct
-
-    // Both capture request and result
-    static const std::array<uint32_t, 3> kMeteringRegionsToCorrect;
-
-    // Only capture request
-    static const std::array<uint32_t, 1> kRequestRectsToCorrect;
-
-    // Only capture result
-    static const std::array<uint32_t, 1> kResultRectsToCorrect;
-
-    // Only for capture results; don't clamp
-    static const std::array<uint32_t, 2> kResultPointsToCorrectNoClamp;
-
     // Single implementation for various mapCorrectedToRaw methods
     template<typename T>
     status_t mapCorrectedToRawImpl(T* coordPairs, int coordCount, bool clamp, bool simple) const;
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
new file mode 100644
index 0000000..7718819
--- /dev/null
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-ZoomRatioMapper"
+//#define LOG_NDEBUG 0
+
+#include <algorithm>
+
+#include "device3/ZoomRatioMapper.h"
+
+namespace android {
+
+namespace camera3 {
+
+ZoomRatioMapper::ZoomRatioMapper() : mHalSupportsZoomRatio(false) {
+}
+
+status_t ZoomRatioMapper::initZoomRatioInTemplate(CameraMetadata *request) {
+    camera_metadata_entry_t entry;
+    entry = request->find(ANDROID_CONTROL_ZOOM_RATIO);
+    float defaultZoomRatio = 1.0f;
+    if (entry.count == 0) {
+        return request->update(ANDROID_CONTROL_ZOOM_RATIO, &defaultZoomRatio, 1);
+    }
+    return OK;
+}
+
+status_t ZoomRatioMapper::overrideZoomRatioTags(
+        CameraMetadata* deviceInfo, bool* supportNativeZoomRatio) {
+    if (deviceInfo == nullptr || supportNativeZoomRatio == nullptr) {
+        return BAD_VALUE;
+    }
+
+    camera_metadata_entry_t entry;
+    entry = deviceInfo->find(ANDROID_CONTROL_ZOOM_RATIO_RANGE);
+    if (entry.count != 2 && entry.count != 0) return BAD_VALUE;
+
+    // Hal has zoom ratio support
+    if (entry.count == 2) {
+        *supportNativeZoomRatio = true;
+        return OK;
+    }
+
+    // Hal has no zoom ratio support
+    *supportNativeZoomRatio = false;
+
+    entry = deviceInfo->find(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM);
+    if (entry.count != 1) {
+        ALOGI("%s: Camera device doesn't support SCALER_AVAILABLE_MAX_DIGITAL_ZOOM key!",
+                __FUNCTION__);
+        return OK;
+    }
+
+    float zoomRange[] = {1.0f, entry.data.f[0]};
+    status_t res = deviceInfo->update(ANDROID_CONTROL_ZOOM_RATIO_RANGE, zoomRange, 2);
+    if (res != OK) {
+        ALOGE("%s: Failed to update CONTROL_ZOOM_RATIO_RANGE key: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    std::vector<int32_t> requestKeys;
+    entry = deviceInfo->find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
+    if (entry.count > 0) {
+        requestKeys.insert(requestKeys.end(), entry.data.i32, entry.data.i32 + entry.count);
+    }
+    requestKeys.push_back(ANDROID_CONTROL_ZOOM_RATIO);
+    res = deviceInfo->update(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
+            requestKeys.data(), requestKeys.size());
+    if (res != OK) {
+        ALOGE("%s: Failed to update REQUEST_AVAILABLE_REQUEST_KEYS: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    std::vector<int32_t> resultKeys;
+    entry = deviceInfo->find(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS);
+    if (entry.count > 0) {
+        resultKeys.insert(resultKeys.end(), entry.data.i32, entry.data.i32 + entry.count);
+    }
+    resultKeys.push_back(ANDROID_CONTROL_ZOOM_RATIO);
+    res = deviceInfo->update(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
+            resultKeys.data(), resultKeys.size());
+    if (res != OK) {
+        ALOGE("%s: Failed to update REQUEST_AVAILABLE_RESULT_KEYS: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    std::vector<int32_t> charKeys;
+    entry = deviceInfo->find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+    if (entry.count > 0) {
+        charKeys.insert(charKeys.end(), entry.data.i32, entry.data.i32 + entry.count);
+    }
+    charKeys.push_back(ANDROID_CONTROL_ZOOM_RATIO_RANGE);
+    res = deviceInfo->update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
+            charKeys.data(), charKeys.size());
+    if (res != OK) {
+        ALOGE("%s: Failed to update REQUEST_AVAILABLE_CHARACTERISTICS_KEYS: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t ZoomRatioMapper::initZoomRatioTags(const CameraMetadata* deviceInfo,
+        bool supportNativeZoomRatio, bool usePrecorrectArray) {
+    std::lock_guard<std::mutex> lock(mMutex);
+
+    camera_metadata_ro_entry_t entry;
+
+    entry = deviceInfo->find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
+    if (entry.count != 4) return BAD_VALUE;
+    int32_t arrayW = entry.data.i32[2];
+    int32_t arrayH = entry.data.i32[3];
+
+    entry = deviceInfo->find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
+    if (entry.count != 4) return BAD_VALUE;
+    int32_t activeW = entry.data.i32[2];
+    int32_t activeH = entry.data.i32[3];
+
+    if (usePrecorrectArray) {
+        mArrayWidth = arrayW;
+        mArrayHeight = arrayH;
+    } else {
+        mArrayWidth = activeW;
+        mArrayHeight = activeH;
+    }
+    mHalSupportsZoomRatio = supportNativeZoomRatio;
+
+    ALOGV("%s: array size: %d x %d, mHalSupportsZoomRatio %d",
+            __FUNCTION__, mArrayWidth, mArrayHeight, mHalSupportsZoomRatio);
+    return OK;
+}
+
+status_t ZoomRatioMapper::updateCaptureRequest(CameraMetadata* request) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    status_t res = OK;
+    bool zoomRatioIs1 = true;
+    camera_metadata_entry_t entry;
+
+    entry = request->find(ANDROID_CONTROL_ZOOM_RATIO);
+    if (entry.count == 1 && entry.data.f[0] != 1.0f) {
+        zoomRatioIs1 = false;
+    }
+
+    if (mHalSupportsZoomRatio && zoomRatioIs1) {
+        res = separateZoomFromCropLocked(request, false/*isResult*/);
+    } else if (!mHalSupportsZoomRatio && !zoomRatioIs1) {
+        res = combineZoomAndCropLocked(request, false/*isResult*/);
+    }
+
+    // If CONTROL_ZOOM_RATIO is in request, but HAL doesn't support
+    // CONTROL_ZOOM_RATIO, remove it from the request.
+    if (!mHalSupportsZoomRatio && entry.count == 1) {
+        request->erase(ANDROID_CONTROL_ZOOM_RATIO);
+    }
+
+    return res;
+}
+
+status_t ZoomRatioMapper::updateCaptureResult(CameraMetadata* result, bool requestedZoomRatioIs1) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    status_t res = OK;
+
+    if (mHalSupportsZoomRatio && requestedZoomRatioIs1) {
+        res = combineZoomAndCropLocked(result, true/*isResult*/);
+    } else if (!mHalSupportsZoomRatio && !requestedZoomRatioIs1) {
+        res = separateZoomFromCropLocked(result, true/*isResult*/);
+    } else {
+        camera_metadata_entry_t entry = result->find(ANDROID_CONTROL_ZOOM_RATIO);
+        if (entry.count == 0) {
+            float zoomRatio1x = 1.0f;
+            result->update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio1x, 1);
+        }
+    }
+
+    return res;
+}
+
+float ZoomRatioMapper::deriveZoomRatio(const CameraMetadata* metadata) {
+    float zoomRatio = 1.0;
+
+    camera_metadata_ro_entry_t entry;
+    entry = metadata->find(ANDROID_SCALER_CROP_REGION);
+    if (entry.count != 4) return zoomRatio;
+
+    // Center of the preCorrection/active size
+    float arrayCenterX = mArrayWidth / 2.0;
+    float arrayCenterY = mArrayHeight / 2.0;
+
+    // Re-map crop region to coordinate system centered to (arrayCenterX,
+    // arrayCenterY).
+    float cropRegionLeft = arrayCenterX - entry.data.i32[0] ;
+    float cropRegionTop = arrayCenterY - entry.data.i32[1];
+    float cropRegionRight = entry.data.i32[0] + entry.data.i32[2] - arrayCenterX;
+    float cropRegionBottom = entry.data.i32[1] + entry.data.i32[3] - arrayCenterY;
+
+    // Calculate the scaling factor for left, top, bottom, right
+    float zoomRatioLeft = std::max(mArrayWidth / (2 * cropRegionLeft), 1.0f);
+    float zoomRatioTop = std::max(mArrayHeight / (2 * cropRegionTop), 1.0f);
+    float zoomRatioRight = std::max(mArrayWidth / (2 * cropRegionRight), 1.0f);
+    float zoomRatioBottom = std::max(mArrayHeight / (2 * cropRegionBottom), 1.0f);
+
+    // Use minimum scaling factor to handle letterboxing or pillarboxing
+    zoomRatio = std::min(std::min(zoomRatioLeft, zoomRatioRight),
+            std::min(zoomRatioTop, zoomRatioBottom));
+
+    ALOGV("%s: derived zoomRatio is %f", __FUNCTION__, zoomRatio);
+    return zoomRatio;
+}
+
+status_t ZoomRatioMapper::separateZoomFromCropLocked(CameraMetadata* metadata, bool isResult) {
+    status_t res;
+    float zoomRatio = deriveZoomRatio(metadata);
+
+    // Update zoomRatio metadata tag
+    res = metadata->update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
+    if (res != OK) {
+        ALOGE("%s: Failed to update ANDROID_CONTROL_ZOOM_RATIO: %s(%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    // Scale regions using zoomRatio
+    camera_metadata_entry_t entry;
+    for (auto region : kMeteringRegionsToCorrect) {
+        entry = metadata->find(region);
+        for (size_t j = 0; j < entry.count; j += 5) {
+            int32_t weight = entry.data.i32[j + 4];
+            if (weight == 0) {
+                continue;
+            }
+            // Top-left is inclusively clamped
+            scaleCoordinates(entry.data.i32 + j, 1, zoomRatio, ClampInclusive);
+            // Bottom-right is exclusively clamped
+            scaleCoordinates(entry.data.i32 + j + 2, 1, zoomRatio, ClampExclusive);
+        }
+    }
+
+    for (auto rect : kRectsToCorrect) {
+        entry = metadata->find(rect);
+        scaleRects(entry.data.i32, entry.count / 4, zoomRatio);
+    }
+
+    if (isResult) {
+        for (auto pts : kResultPointsToCorrectNoClamp) {
+            entry = metadata->find(pts);
+            scaleCoordinates(entry.data.i32, entry.count / 2, zoomRatio, ClampOff);
+        }
+    }
+
+    return OK;
+}
+
+status_t ZoomRatioMapper::combineZoomAndCropLocked(CameraMetadata* metadata, bool isResult) {
+    float zoomRatio = 1.0f;
+    camera_metadata_entry_t entry;
+    entry = metadata->find(ANDROID_CONTROL_ZOOM_RATIO);
+    if (entry.count == 1) {
+        zoomRatio = entry.data.f[0];
+    }
+
+    // Unscale regions with zoomRatio
+    status_t res;
+    for (auto region : kMeteringRegionsToCorrect) {
+        entry = metadata->find(region);
+        for (size_t j = 0; j < entry.count; j += 5) {
+            int32_t weight = entry.data.i32[j + 4];
+            if (weight == 0) {
+                continue;
+            }
+            // Top-left is inclusively clamped
+            scaleCoordinates(entry.data.i32 + j, 1, 1.0 / zoomRatio, ClampInclusive);
+            // Bottom-right is exclusively clamped
+            scaleCoordinates(entry.data.i32 + j + 2, 1, 1.0 / zoomRatio, ClampExclusive);
+        }
+    }
+    for (auto rect : kRectsToCorrect) {
+        entry = metadata->find(rect);
+        scaleRects(entry.data.i32, entry.count / 4, 1.0 / zoomRatio);
+    }
+    if (isResult) {
+        for (auto pts : kResultPointsToCorrectNoClamp) {
+            entry = metadata->find(pts);
+            scaleCoordinates(entry.data.i32, entry.count / 2, 1.0 / zoomRatio, ClampOff);
+        }
+    }
+
+    zoomRatio = 1.0;
+    res = metadata->update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
+    if (res != OK) {
+        return res;
+    }
+
+    return OK;
+}
+
+void ZoomRatioMapper::scaleCoordinates(int32_t* coordPairs, int coordCount,
+        float scaleRatio, ClampMode clamp) {
+    for (int i = 0; i < coordCount * 2; i += 2) {
+        float x = coordPairs[i];
+        float y = coordPairs[i + 1];
+        float xCentered = x - mArrayWidth / 2;
+        float yCentered = y - mArrayHeight / 2;
+        float scaledX = xCentered * scaleRatio;
+        float scaledY = yCentered * scaleRatio;
+        scaledX += mArrayWidth / 2;
+        scaledY += mArrayHeight / 2;
+        // Clamp to within activeArray/preCorrectionActiveArray
+        coordPairs[i] = static_cast<int32_t>(scaledX);
+        coordPairs[i+1] = static_cast<int32_t>(scaledY);
+        if (clamp != ClampOff) {
+            int32_t right, bottom;
+            if (clamp == ClampInclusive) {
+                right = mArrayWidth - 1;
+                bottom = mArrayHeight - 1;
+            } else {
+                right = mArrayWidth;
+                bottom = mArrayHeight;
+            }
+            coordPairs[i] =
+                    std::min(right, std::max(0, coordPairs[i]));
+            coordPairs[i+1] =
+                    std::min(bottom, std::max(0, coordPairs[i+1]));
+        }
+        ALOGV("%s: coordinates: %d, %d", __FUNCTION__, coordPairs[i], coordPairs[i+1]);
+    }
+}
+
+void ZoomRatioMapper::scaleRects(int32_t* rects, int rectCount,
+        float scaleRatio) {
+    for (int i = 0; i < rectCount * 4; i += 4) {
+        // Map from (l, t, width, height) to (l, t, r, b).
+        // [l, t] is inclusive, and [r, b] is exclusive.
+        int32_t coords[4] = {
+            rects[i],
+            rects[i + 1],
+            rects[i] + rects[i + 2],
+            rects[i + 1] + rects[i + 3]
+        };
+
+        // top-left
+        scaleCoordinates(coords, 1, scaleRatio, ClampInclusive);
+        // bottom-right
+        scaleCoordinates(coords+2, 1, scaleRatio, ClampExclusive);
+
+        // Map back to (l, t, width, height)
+        rects[i] = coords[0];
+        rects[i + 1] = coords[1];
+        rects[i + 2] = coords[2] - coords[0];
+        rects[i + 3] = coords[3] - coords[1];
+    }
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.h b/services/camera/libcameraservice/device3/ZoomRatioMapper.h
new file mode 100644
index 0000000..38efbfd
--- /dev/null
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_ZOOMRATIOMAPPER_H
+#define ANDROID_SERVERS_ZOOMRATIOMAPPER_H
+
+#include <utils/Errors.h>
+#include <array>
+#include <mutex>
+
+#include "camera/CameraMetadata.h"
+#include "device3/CoordinateMapper.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Utilities to convert between the new zoomRatio and existing cropRegion
+ * metadata tags. Note that this class does conversions in 2 scenarios:
+ * - HAL supports zoomRatio and the application uses cropRegion, or
+ * - HAL doesn't support zoomRatio, but the application uses zoomRatio
+ */
+class ZoomRatioMapper : private CoordinateMapper {
+  public:
+    ZoomRatioMapper();
+    ZoomRatioMapper(const ZoomRatioMapper& other) :
+            mHalSupportsZoomRatio(other.mHalSupportsZoomRatio),
+            mArrayWidth(other.mArrayWidth), mArrayHeight(other.mArrayHeight) {}
+
+    /**
+     * Initialize request template with valid zoomRatio if necessary.
+     */
+    static status_t initZoomRatioInTemplate(CameraMetadata *request);
+
+    /**
+     * Override zoomRatio related tags in the static metadata.
+     */
+    static status_t overrideZoomRatioTags(
+            CameraMetadata* deviceInfo, bool* supportNativeZoomRatio);
+
+    /**
+     * Initialize zoom ratio mapper with static metadata.
+     *
+     * Note:
+     * This function may modify the static metadata with zoomRatio related
+     * tags.
+     */
+    status_t initZoomRatioTags(const CameraMetadata *deviceInfo,
+            bool supportNativeZoomRatio, bool usePrecorrectArray);
+
+    /**
+     * Update capture request to handle both cropRegion and zoomRatio.
+     */
+    status_t updateCaptureRequest(CameraMetadata *request);
+
+    /**
+     * Update capture result to handle both cropRegion and zoomRatio.
+     */
+    status_t updateCaptureResult(CameraMetadata *request, bool requestedZoomRatioIs1);
+
+  public: // Visible for testing. Do not use concurently.
+    enum ClampMode {
+        ClampOff,
+        ClampInclusive,
+        ClampExclusive,
+    };
+
+    void scaleCoordinates(int32_t* coordPairs, int coordCount,
+            float scaleRatio, ClampMode clamp);
+
+  private:
+    bool mHalSupportsZoomRatio;
+
+    // active array / pre-correction array dimension
+    int32_t mArrayWidth, mArrayHeight;
+
+    mutable std::mutex mMutex;
+
+    float deriveZoomRatio(const CameraMetadata* metadata);
+
+    void scaleRects(int32_t* rects, int rectCount, float scaleRatio);
+
+    status_t separateZoomFromCropLocked(CameraMetadata* metadata, bool isResult);
+    status_t combineZoomAndCropLocked(CameraMetadata* metadata, bool isResult);
+};
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/tests/ZoomRatioTest.cpp b/services/camera/libcameraservice/tests/ZoomRatioTest.cpp
new file mode 100644
index 0000000..1bfa03f
--- /dev/null
+++ b/services/camera/libcameraservice/tests/ZoomRatioTest.cpp
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "ZoomRatioMapperTest"
+
+#include <gtest/gtest.h>
+#include <utils/Errors.h>
+
+#include "../device3/ZoomRatioMapper.h"
+
+using namespace std;
+using namespace android;
+using namespace android::camera3;
+
+constexpr int kMaxAllowedPixelError = 1;
+constexpr float kMaxAllowedRatioError = 0.1;
+
+constexpr int32_t testActiveArraySize[] = {100, 100, 1024, 768};
+constexpr int32_t testPreCorrActiveArraySize[] = {90, 90, 1044, 788};
+
+constexpr int32_t testDefaultCropSize[][4] = {
+      {0, 0, 1024, 768},   // active array default crop
+      {0, 0, 1044, 788},   // preCorrection active array default crop
+};
+
+constexpr int32_t test2xCropRegion[][4] = {
+      {256, 192, 512, 384}, // active array 2x zoom crop
+      {261, 197, 522, 394}, // preCorrection active array default crop
+};
+
+constexpr int32_t testLetterBoxSize[][4] = {
+      {0, 96, 1024, 576}, // active array 2x zoom crop
+      {0, 106, 1024, 576}, // preCorrection active array default crop
+};
+
+status_t setupTestMapper(ZoomRatioMapper *m, float maxDigitalZoom,
+        const int32_t activeArray[4], const int32_t preCorrectArray[4],
+        bool hasZoomRatioRange, float zoomRatioRange[2],
+        bool usePreCorrectArray) {
+    CameraMetadata deviceInfo;
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, activeArray, 4);
+    deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, preCorrectArray, 4);
+    deviceInfo.update(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, &maxDigitalZoom, 1);
+    if (hasZoomRatioRange) {
+        deviceInfo.update(ANDROID_CONTROL_ZOOM_RATIO_RANGE, zoomRatioRange, 2);
+    }
+
+    bool supportNativeZoomRatio;
+    status_t res = ZoomRatioMapper::overrideZoomRatioTags(&deviceInfo, &supportNativeZoomRatio);
+    if (res != OK) {
+        return res;
+    }
+
+    return m->initZoomRatioTags(&deviceInfo, hasZoomRatioRange, usePreCorrectArray);
+}
+
+TEST(ZoomRatioTest, Initialization) {
+    CameraMetadata deviceInfo;
+    status_t res;
+    camera_metadata_entry_t entry;
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+            testPreCorrActiveArraySize, 4);
+    deviceInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, testActiveArraySize, 4);
+
+    // Test initialization from devices not supporting zoomRange
+    float maxDigitalZoom = 4.0f;
+    ZoomRatioMapper mapperNoZoomRange;
+    deviceInfo.update(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, &maxDigitalZoom, 1);
+    bool supportNativeZoomRatio;
+    res = ZoomRatioMapper::overrideZoomRatioTags(&deviceInfo, &supportNativeZoomRatio);
+    ASSERT_EQ(res, OK);
+    ASSERT_EQ(supportNativeZoomRatio, false);
+    res = mapperNoZoomRange.initZoomRatioTags(&deviceInfo,
+            supportNativeZoomRatio, true/*usePreCorrectArray*/);
+    ASSERT_EQ(res, OK);
+    res = mapperNoZoomRange.initZoomRatioTags(&deviceInfo,
+            supportNativeZoomRatio, false/*usePreCorrectArray*/);
+    ASSERT_EQ(res, OK);
+
+    entry = deviceInfo.find(ANDROID_CONTROL_ZOOM_RATIO_RANGE);
+    ASSERT_EQ(entry.count, 2U);
+    ASSERT_EQ(entry.data.f[0], 1.0);
+    ASSERT_EQ(entry.data.f[1], maxDigitalZoom);
+
+    entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+    ASSERT_GT(entry.count, 0U);
+    ASSERT_NE(std::find(entry.data.i32, entry.data.i32 + entry.count,
+            ANDROID_CONTROL_ZOOM_RATIO_RANGE), entry.data.i32 + entry.count);
+
+    entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
+    ASSERT_GT(entry.count, 0U);
+    ASSERT_NE(std::find(entry.data.i32, entry.data.i32 + entry.count,
+            ANDROID_CONTROL_ZOOM_RATIO), entry.data.i32 + entry.count);
+
+    entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS);
+    ASSERT_GT(entry.count, 0U);
+    ASSERT_NE(std::find(entry.data.i32, entry.data.i32 + entry.count,
+            ANDROID_CONTROL_ZOOM_RATIO), entry.data.i32 + entry.count);
+
+    // Test initialization from devices supporting zoomRange
+    float ratioRange[2] = {0.2f, maxDigitalZoom};
+    deviceInfo.update(ANDROID_CONTROL_ZOOM_RATIO_RANGE, ratioRange, 2);
+    res = ZoomRatioMapper::overrideZoomRatioTags(&deviceInfo, &supportNativeZoomRatio);
+    ASSERT_EQ(res, OK);
+    ASSERT_EQ(supportNativeZoomRatio, true);
+    ZoomRatioMapper mapperWithZoomRange;
+    res = mapperWithZoomRange.initZoomRatioTags(&deviceInfo,
+            supportNativeZoomRatio, true/*usePreCorrectArray*/);
+    ASSERT_EQ(res, OK);
+    res = mapperWithZoomRange.initZoomRatioTags(&deviceInfo,
+            supportNativeZoomRatio, false/*usePreCorrectArray*/);
+    ASSERT_EQ(res, OK);
+
+    entry = deviceInfo.find(ANDROID_CONTROL_ZOOM_RATIO_RANGE);
+    ASSERT_EQ(entry.count, 2U);
+    ASSERT_EQ(entry.data.f[0], ratioRange[0]);
+    ASSERT_EQ(entry.data.f[1], ratioRange[1]);
+
+    // Test default zoom ratio in template
+    CameraMetadata requestTemplate;
+    res = ZoomRatioMapper::initZoomRatioInTemplate(&requestTemplate);
+    ASSERT_EQ(res, OK);
+    entry = requestTemplate.find(ANDROID_CONTROL_ZOOM_RATIO);
+    ASSERT_EQ(entry.count, 1U);
+    ASSERT_EQ(entry.data.f[0], 1.0f);
+
+    float customRatio = 0.5f;
+    res = requestTemplate.update(ANDROID_CONTROL_ZOOM_RATIO, &customRatio, 1);
+    ASSERT_EQ(res, OK);
+    res = ZoomRatioMapper::initZoomRatioInTemplate(&requestTemplate);
+    ASSERT_EQ(res, OK);
+    entry = requestTemplate.find(ANDROID_CONTROL_ZOOM_RATIO);
+    ASSERT_EQ(entry.count, 1U);
+    ASSERT_EQ(entry.data.f[0], customRatio);
+}
+
+void subScaleCoordinatesTest(bool usePreCorrectArray) {
+    ZoomRatioMapper mapper;
+    float maxDigitalZoom = 4.0f;
+    float zoomRatioRange[2];
+    ASSERT_EQ(OK, setupTestMapper(&mapper, maxDigitalZoom,
+            testActiveArraySize, testPreCorrActiveArraySize,
+            false/*hasZoomRatioRange*/, zoomRatioRange,
+            usePreCorrectArray));
+
+    size_t index = 0;
+    int32_t width = testActiveArraySize[2];
+    int32_t height = testActiveArraySize[3];
+    if (usePreCorrectArray) {
+        index = 1;
+        width = testPreCorrActiveArraySize[2];
+        height = testPreCorrActiveArraySize[3];
+    }
+
+    std::array<int32_t, 16> originalCoords = {
+            0, 0, // top-left
+            width, 0, // top-right
+            0, height, // bottom-left
+            width, height, // bottom-right
+            width / 2, height / 2, // center
+            width / 4, height / 4, // top-left after 2x
+            width / 3, height * 2 / 3, // bottom-left after 3x zoom
+            width * 7 / 8, height / 2, // middle-right after 1.33x zoom
+    };
+
+    // Verify 1.0x zoom doesn't change the coordinates
+    auto coords = originalCoords;
+    mapper.scaleCoordinates(coords.data(), coords.size()/2, 1.0f, ZoomRatioMapper::ClampOff);
+    for (size_t i = 0; i < coords.size(); i++) {
+        EXPECT_EQ(coords[i], originalCoords[i]);
+    }
+
+    // Verify 2.0x zoom work as expected (no clamping)
+    std::array<float, 16> expected2xCoords = {
+            - width / 2.0f, - height / 2.0f,// top-left
+            width * 3 / 2.0f, - height / 2.0f, // top-right
+            - width / 2.0f, height * 3 / 2.0f, // bottom-left
+            width * 3 / 2.0f, height * 3 / 2.0f, // bottom-right
+            width / 2.0f, height / 2.0f, // center
+            0, 0, // top-left after 2x
+            width / 6.0f, height - height / 6.0f, // bottom-left after 3x zoom
+            width + width / 4.0f, height / 2.0f, // middle-right after 1.33x zoom
+    };
+    coords = originalCoords;
+    mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, ZoomRatioMapper::ClampOff);
+    for (size_t i = 0; i < coords.size(); i++) {
+        EXPECT_LE(std::abs(coords[i] - expected2xCoords[i]), kMaxAllowedPixelError);
+    }
+
+    // Verify 2.0x zoom work as expected (with inclusive clamping)
+    std::array<float, 16> expected2xCoordsClampedInc = {
+            0, 0, // top-left
+            static_cast<float>(width) - 1, 0, // top-right
+            0, static_cast<float>(height) - 1, // bottom-left
+            static_cast<float>(width) - 1, static_cast<float>(height) - 1, // bottom-right
+            width / 2.0f, height / 2.0f, // center
+            0, 0, // top-left after 2x
+            width / 6.0f, height - height / 6.0f , // bottom-left after 3x zoom
+            static_cast<float>(width) - 1,  height / 2.0f, // middle-right after 1.33x zoom
+    };
+    coords = originalCoords;
+    mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, ZoomRatioMapper::ClampInclusive);
+    for (size_t i = 0; i < coords.size(); i++) {
+        EXPECT_LE(std::abs(coords[i] - expected2xCoordsClampedInc[i]), kMaxAllowedPixelError);
+    }
+
+    // Verify 2.0x zoom work as expected (with exclusive clamping)
+    std::array<float, 16> expected2xCoordsClampedExc = {
+            0, 0, // top-left
+            static_cast<float>(width), 0, // top-right
+            0, static_cast<float>(height), // bottom-left
+            static_cast<float>(width), static_cast<float>(height), // bottom-right
+            width / 2.0f, height / 2.0f, // center
+            0, 0, // top-left after 2x
+            width / 6.0f, height - height / 6.0f , // bottom-left after 3x zoom
+            static_cast<float>(width),  height / 2.0f, // middle-right after 1.33x zoom
+    };
+    coords = originalCoords;
+    mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, ZoomRatioMapper::ClampExclusive);
+    for (size_t i = 0; i < coords.size(); i++) {
+        EXPECT_LE(std::abs(coords[i] - expected2xCoordsClampedExc[i]), kMaxAllowedPixelError);
+    }
+
+    // Verify 0.33x zoom work as expected
+    std::array<float, 16> expectedZoomOutCoords = {
+            width / 3.0f, height / 3.0f, // top-left
+            width * 2 / 3.0f, height / 3.0f, // top-right
+            width / 3.0f, height * 2 / 3.0f, // bottom-left
+            width * 2 / 3.0f, height * 2 / 3.0f, // bottom-right
+            width / 2.0f, height / 2.0f, // center
+            width * 5 / 12.0f, height * 5 / 12.0f, // top-left after 2x
+            width * 4 / 9.0f, height * 5 / 9.0f, // bottom-left after 3x zoom-in
+            width * 5 / 8.0f, height / 2.0f, // middle-right after 1.33x zoom-in
+    };
+    coords = originalCoords;
+    mapper.scaleCoordinates(coords.data(), coords.size()/2, 1.0f/3, ZoomRatioMapper::ClampOff);
+    for (size_t i = 0; i < coords.size(); i++) {
+        EXPECT_LE(std::abs(coords[i] - expectedZoomOutCoords[i]), kMaxAllowedPixelError);
+    }
+}
+
+TEST(ZoomRatioTest, scaleCoordinatesTest) {
+    subScaleCoordinatesTest(false/*usePreCorrectArray*/);
+    subScaleCoordinatesTest(true/*usePreCorrectArray*/);
+}
+
+void subCropOverMaxDigitalZoomTest(bool usePreCorrectArray) {
+    status_t res;
+    ZoomRatioMapper mapper;
+    float noZoomRatioRange[2];
+    res = setupTestMapper(&mapper, 4.0/*maxDigitalZoom*/,
+            testActiveArraySize, testPreCorrActiveArraySize,
+            false/*hasZoomRatioRange*/, noZoomRatioRange,
+            usePreCorrectArray);
+    ASSERT_EQ(res, OK);
+
+    CameraMetadata metadata;
+    camera_metadata_entry_t entry;
+
+    size_t index = usePreCorrectArray ? 1 : 0;
+    metadata.update(ANDROID_SCALER_CROP_REGION, testDefaultCropSize[index], 4);
+    res = mapper.updateCaptureRequest(&metadata);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i ++) {
+        EXPECT_EQ(entry.data.i32[i], testDefaultCropSize[index][i]);
+    }
+
+    metadata.update(ANDROID_SCALER_CROP_REGION, test2xCropRegion[index], 4);
+    res = mapper.updateCaptureResult(&metadata, true/*requestedZoomRatioIs1*/);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i ++) {
+        EXPECT_EQ(entry.data.i32[i], test2xCropRegion[index][i]);
+    }
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    ASSERT_TRUE(entry.count == 0 || (entry.count == 1 && entry.data.f[0] == 1.0f));
+}
+
+TEST(ZoomRatioTest, CropOverMaxDigitalZoomTest) {
+    subCropOverMaxDigitalZoomTest(false/*usePreCorrectArray*/);
+    subCropOverMaxDigitalZoomTest(true/*usePreCorrectArray*/);
+}
+
+void subCropOverZoomRangeTest(bool usePreCorrectArray) {
+    status_t res;
+    ZoomRatioMapper mapper;
+    float zoomRatioRange[2] = {0.5f, 4.0f};
+    res = setupTestMapper(&mapper, 4.0/*maxDigitalZoom*/,
+            testActiveArraySize, testPreCorrActiveArraySize,
+            true/*hasZoomRatioRange*/, zoomRatioRange,
+            usePreCorrectArray);
+    ASSERT_EQ(res, OK);
+
+    CameraMetadata metadata;
+    camera_metadata_entry_t entry;
+
+    size_t index = usePreCorrectArray ? 1 : 0;
+
+    // 2x zoom crop region, zoomRatio is 1.0f
+    metadata.update(ANDROID_SCALER_CROP_REGION, test2xCropRegion[index], 4);
+    res = mapper.updateCaptureRequest(&metadata);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i++) {
+        EXPECT_EQ(entry.data.i32[i], testDefaultCropSize[index][i]);
+    }
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    EXPECT_NEAR(entry.data.f[0], 2.0f, kMaxAllowedRatioError);
+
+    res = mapper.updateCaptureResult(&metadata, true/*requestedZoomRatioIs1*/);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    EXPECT_NEAR(entry.data.f[0], 1.0f, kMaxAllowedRatioError);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i++) {
+        EXPECT_EQ(entry.data.i32[i], test2xCropRegion[index][i]);
+    }
+
+    // Letter boxing crop region, zoomRatio is 1.0
+    float zoomRatio = 1.0f;
+    metadata.update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
+    metadata.update(ANDROID_SCALER_CROP_REGION, testLetterBoxSize[index], 4);
+    res = mapper.updateCaptureRequest(&metadata);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i++) {
+        EXPECT_EQ(entry.data.i32[i], testLetterBoxSize[index][i]);
+    }
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    EXPECT_NEAR(entry.data.f[0], 1.0f, kMaxAllowedRatioError);
+
+    res = mapper.updateCaptureResult(&metadata, true/*requestedZoomRatioIs1*/);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i++) {
+        EXPECT_EQ(entry.data.i32[i], testLetterBoxSize[index][i]);
+    }
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    EXPECT_NEAR(entry.data.f[0], 1.0f, kMaxAllowedRatioError);
+}
+
+TEST(ZoomRatioTest, CropOverZoomRangeTest) {
+    subCropOverZoomRangeTest(false/*usePreCorrectArray*/);
+    subCropOverZoomRangeTest(true/*usePreCorrectArray*/);
+}
+
+void subZoomOverMaxDigitalZoomTest(bool usePreCorrectArray) {
+    status_t res;
+    ZoomRatioMapper mapper;
+    float noZoomRatioRange[2];
+    res = setupTestMapper(&mapper, 4.0/*maxDigitalZoom*/,
+            testActiveArraySize, testPreCorrActiveArraySize,
+            false/*hasZoomRatioRange*/, noZoomRatioRange,
+            usePreCorrectArray);
+    ASSERT_EQ(res, OK);
+
+    CameraMetadata metadata;
+    float zoomRatio = 3.0f;
+    camera_metadata_entry_t entry;
+
+    size_t index = usePreCorrectArray ? 1 : 0;
+
+    // Full active array crop, zoomRatio is 3.0f
+    metadata.update(ANDROID_SCALER_CROP_REGION, testDefaultCropSize[index], 4);
+    metadata.update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
+    res = mapper.updateCaptureRequest(&metadata);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    std::array<float, 4> expectedCrop = {
+        testDefaultCropSize[index][2] / 3.0f, /*x*/
+        testDefaultCropSize[index][3] / 3.0f, /*y*/
+        testDefaultCropSize[index][2] / 3.0f, /*width*/
+        testDefaultCropSize[index][3] / 3.0f, /*height*/
+    };
+    for (int i = 0; i < 4; i++) {
+        EXPECT_LE(std::abs(entry.data.i32[i] - expectedCrop[i]), kMaxAllowedPixelError);
+    }
+
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    if (entry.count == 1) {
+        EXPECT_NEAR(entry.data.f[0], 1.0f, kMaxAllowedRatioError);
+    }
+}
+
+TEST(ZoomRatioTest, ZoomOverMaxDigitalZoomTest) {
+    subZoomOverMaxDigitalZoomTest(false/*usePreCorrectArray*/);
+    subZoomOverMaxDigitalZoomTest(true/*usePreCorrectArray*/);
+}
+
+void subZoomOverZoomRangeTest(bool usePreCorrectArray) {
+    status_t res;
+    ZoomRatioMapper mapper;
+    float zoomRatioRange[2] = {1.0f, 4.0f};
+    res = setupTestMapper(&mapper, 4.0/*maxDigitalZoom*/,
+            testActiveArraySize, testPreCorrActiveArraySize,
+            true/*hasZoomRatioRange*/, zoomRatioRange,
+            usePreCorrectArray);
+    ASSERT_EQ(res, OK);
+
+    CameraMetadata metadata;
+    float zoomRatio = 3.0f;
+    camera_metadata_entry_t entry;
+    size_t index = usePreCorrectArray ? 1 : 0;
+
+    // Full active array crop, zoomRatio is 3.0f
+    metadata.update(ANDROID_SCALER_CROP_REGION, testDefaultCropSize[index], 4);
+    metadata.update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
+    res = mapper.updateCaptureRequest(&metadata);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i ++) {
+        EXPECT_EQ(entry.data.i32[i], testDefaultCropSize[index][i]);
+    }
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    ASSERT_EQ(entry.data.f[0], zoomRatio);
+
+    res = mapper.updateCaptureResult(&metadata, false/*requestedZoomRatioIs1*/);
+    ASSERT_EQ(res, OK);
+    entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+    ASSERT_EQ(entry.count, 4U);
+    for (int i = 0; i < 4; i ++) {
+        EXPECT_EQ(entry.data.i32[i], testDefaultCropSize[index][i]);
+    }
+    entry = metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    ASSERT_EQ(entry.data.f[0], zoomRatio);
+}
+
+TEST(ZoomRatioTest, ZoomOverZoomRangeTest) {
+    subZoomOverZoomRangeTest(false/*usePreCorrectArray*/);
+    subZoomOverZoomRangeTest(true/*usePreCorrectArray*/);
+}
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index 3141c31..5811068 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -15,10 +15,6 @@
     ],
 
     target: {
-        vendor: {
-            exclude_shared_libs: ["libavservices_minijail"],
-            shared_libs: ["libavservices_minijail_vendor"],
-        },
         android: {
             product_variables: {
                 malloc_not_svelte: {
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 1cf0534..88a79e7 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -37,7 +37,7 @@
     libutils \
     liblog \
     libbase \
-    libavservices_minijail_vendor \
+    libavservices_minijail \
     libcutils \
     libhidlbase \
     libstagefright_omx \
diff --git a/services/mediametrics/TimeMachine.h b/services/mediametrics/TimeMachine.h
index 0cd8dc6..b3932de 100644
--- a/services/mediametrics/TimeMachine.h
+++ b/services/mediametrics/TimeMachine.h
@@ -70,8 +70,8 @@
             , mCreationTime(time)
             , mLastModificationTime(time)
         {
-            putValue("_pid", (int32_t)pid, time);
-            putValue("_uid", (int32_t)uid, time);
+            putValue(BUNDLE_PID, (int32_t)pid, time);
+            putValue(BUNDLE_UID, (int32_t)uid, time);
         }
 
         status_t checkPermission(uid_t uidCheck) const {
diff --git a/services/mediametrics/tests/build_and_run_all_unit_tests.sh b/services/mediametrics/tests/build_and_run_all_unit_tests.sh
index 2511c30..382be47 100755
--- a/services/mediametrics/tests/build_and_run_all_unit_tests.sh
+++ b/services/mediametrics/tests/build_and_run_all_unit_tests.sh
@@ -20,5 +20,5 @@
 echo "========================================"
 
 echo "testing mediametrics"
-adb push $OUT/data/nativetest/mediametrics_tests/mediametrics_tests /system/bin
+adb push $OUT/data/nativetest64/mediametrics_tests/mediametrics_tests /system/bin
 adb shell /system/bin/mediametrics_tests
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index 285a1ba..55ce82b 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -274,9 +274,9 @@
           ASSERT_EQ(3.125, d);
           mask |= 4;
       } else if (!strcmp(name, "string")) {
-          const char *s;
+          std::string s;
           ASSERT_TRUE(prop.get(&s));
-          ASSERT_EQ(0, strcmp(s, "abc"));
+          ASSERT_EQ("abc", s);
           mask |= 8;
       } else if (!strcmp(name, "rate")) {
           std::pair<int64_t, int64_t> r;
@@ -323,9 +323,9 @@
           ASSERT_EQ(3.125, d);
           mask |= 4;
       } else if (!strcmp(name, "string")) {
-          const char *s;
+          std::string s;
           ASSERT_TRUE(prop.get(&s));
-          ASSERT_EQ(0, strcmp(s, "abcdefghijklmnopqrstuvwxyz"));
+          ASSERT_EQ("abcdefghijklmnopqrstuvwxyz", s);
           mask |= 8;
       } else if (!strcmp(name, "rate")) {
           std::pair<int64_t, int64_t> r;
diff --git a/services/minijail/Android.bp b/services/minijail/Android.bp
index 07a94cc..0713a87 100644
--- a/services/minijail/Android.bp
+++ b/services/minijail/Android.bp
@@ -17,10 +17,14 @@
 cc_library_shared {
     name: "libavservices_minijail",
     defaults: ["libavservices_minijail_defaults"],
+    vendor_available: true,
     export_include_dirs: ["."],
 }
 
-// Small library for media.extractor and media.codec sandboxing.
+// By adding "vendor_available: true" to "libavservices_minijail", we don't
+// need to have "libavservices_minijail_vendor" any longer.
+// "libavservices_minijail_vendor" will be removed, once we replace it with
+// "libavservices_minijail" in all vendor modules. (b/146313710)
 cc_library_shared {
     name: "libavservices_minijail_vendor",
     vendor: true,