Merge "Map AC4 mime to audio format"
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 51cef8c..9bf8247 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -38,14 +38,13 @@
 #include <android/native_window.h>
 #include "NdkCameraError.h"
 #include "NdkCameraMetadata.h"
+#include "NdkCaptureRequest.h"
 
 #ifndef _NDK_CAMERA_CAPTURE_SESSION_H
 #define _NDK_CAMERA_CAPTURE_SESSION_H
 
 __BEGIN_DECLS
 
-#if __ANDROID_API__ >= 24
-
 /**
  * ACameraCaptureSession is an opaque type that manages frame captures of a camera device.
  *
@@ -433,7 +432,7 @@
  *
  */
 camera_status_t ACameraCaptureSession_getDevice(
-        ACameraCaptureSession* session, /*out*/ACameraDevice** device);
+        ACameraCaptureSession* session, /*out*/ACameraDevice** device) __INTRODUCED_IN(24);
 
 /**
  * Submit an array of requests to be captured in sequence as a burst in the minimum of time possible.
@@ -471,7 +470,7 @@
         ACameraCaptureSession* session,
         /*optional*/ACameraCaptureSession_captureCallbacks* callbacks,
         int numRequests, ACaptureRequest** requests,
-        /*optional*/int* captureSequenceId);
+        /*optional*/int* captureSequenceId) __INTRODUCED_IN(24);
 
 /**
  * Request endlessly repeating capture of a sequence of images by this capture session.
@@ -525,7 +524,7 @@
         ACameraCaptureSession* session,
         /*optional*/ACameraCaptureSession_captureCallbacks* callbacks,
         int numRequests, ACaptureRequest** requests,
-        /*optional*/int* captureSequenceId);
+        /*optional*/int* captureSequenceId) __INTRODUCED_IN(24);
 
 /**
  * Cancel any ongoing repeating capture set by {@link ACameraCaptureSession_setRepeatingRequest}.
@@ -548,7 +547,8 @@
  *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
  *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
  */
-camera_status_t ACameraCaptureSession_stopRepeating(ACameraCaptureSession* session);
+camera_status_t ACameraCaptureSession_stopRepeating(ACameraCaptureSession* session)
+        __INTRODUCED_IN(24);
 
 /**
  * Discard all captures currently pending and in-progress as fast as possible.
@@ -588,11 +588,8 @@
  *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
  *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
  */
-camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session);
-
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 28
+camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session)
+        __INTRODUCED_IN(24);
 
 typedef struct ACaptureSessionOutput ACaptureSessionOutput;
 
@@ -637,8 +634,7 @@
  *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
  */
 camera_status_t ACameraCaptureSession_updateSharedOutput(ACameraCaptureSession* session,
-        ACaptureSessionOutput* output);
-#endif /* __ANDROID_API__ >= 28 */
+        ACaptureSessionOutput* output) __INTRODUCED_IN(28);
 
 __END_DECLS
 
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index b715b12..bdd27f9 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -44,8 +44,6 @@
 
 __BEGIN_DECLS
 
-#if __ANDROID_API__ >= 24
-
 /**
  * ACameraDevice is opaque type that provides access to a camera device.
  *
@@ -176,7 +174,7 @@
  *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device is NULL.</li></ul>
  */
-camera_status_t ACameraDevice_close(ACameraDevice* device);
+camera_status_t ACameraDevice_close(ACameraDevice* device) __INTRODUCED_IN(24);
 
 /**
  * Return the camera id associated with this camera device.
@@ -187,7 +185,7 @@
  * delete/free by the application. Also the returned string must not be used after the device
  * has been closed.
  */
-const char* ACameraDevice_getId(const ACameraDevice* device);
+const char* ACameraDevice_getId(const ACameraDevice* device) __INTRODUCED_IN(24);
 
 typedef enum {
     /**
@@ -290,7 +288,7 @@
  */
 camera_status_t ACameraDevice_createCaptureRequest(
         const ACameraDevice* device, ACameraDevice_request_template templateId,
-        /*out*/ACaptureRequest** request);
+        /*out*/ACaptureRequest** request) __INTRODUCED_IN(24);
 
 
 typedef struct ACaptureSessionOutputContainer ACaptureSessionOutputContainer;
@@ -313,7 +311,7 @@
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container is NULL.</li></ul>
  */
 camera_status_t ACaptureSessionOutputContainer_create(
-        /*out*/ACaptureSessionOutputContainer** container);
+        /*out*/ACaptureSessionOutputContainer** container) __INTRODUCED_IN(24);
 
 /**
  * Free a capture session output container.
@@ -322,7 +320,8 @@
  *
  * @see ACaptureSessionOutputContainer_create
  */
-void            ACaptureSessionOutputContainer_free(ACaptureSessionOutputContainer* container);
+void            ACaptureSessionOutputContainer_free(ACaptureSessionOutputContainer* container)
+        __INTRODUCED_IN(24);
 
 /**
  * Create a ACaptureSessionOutput object.
@@ -344,7 +343,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionOutput_create(
-        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output);
+        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(24);
 
 /**
  * Free a ACaptureSessionOutput object.
@@ -353,7 +352,7 @@
  *
  * @see ACaptureSessionOutput_create
  */
-void            ACaptureSessionOutput_free(ACaptureSessionOutput* output);
+void            ACaptureSessionOutput_free(ACaptureSessionOutput* output) __INTRODUCED_IN(24);
 
 /**
  * Add an {@link ACaptureSessionOutput} object to {@link ACaptureSessionOutputContainer}.
@@ -366,7 +365,8 @@
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container or output is NULL.</li></ul>
  */
 camera_status_t ACaptureSessionOutputContainer_add(
-        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output);
+        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output)
+        __INTRODUCED_IN(24);
 
 /**
  * Remove an {@link ACaptureSessionOutput} object from {@link ACaptureSessionOutputContainer}.
@@ -382,7 +382,8 @@
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container or output is NULL.</li></ul>
  */
 camera_status_t ACaptureSessionOutputContainer_remove(
-        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output);
+        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output)
+        __INTRODUCED_IN(24);
 
 /**
  * Create a new camera capture session by providing the target output set of {@link ANativeWindow}
@@ -663,11 +664,7 @@
         ACameraDevice* device,
         const ACaptureSessionOutputContainer*       outputs,
         const ACameraCaptureSession_stateCallbacks* callbacks,
-        /*out*/ACameraCaptureSession** session);
-
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 28
+        /*out*/ACameraCaptureSession** session) __INTRODUCED_IN(24);
 
 /**
  * Create a shared ACaptureSessionOutput object.
@@ -691,7 +688,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionSharedOutput_create(
-        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output);
+        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(28);
 
 /**
  * Add a native window to shared ACaptureSessionOutput.
@@ -708,7 +705,8 @@
  *             window associated with ACaptureSessionOutput; or anw is already present inside
  *             ACaptureSessionOutput.</li></ul>
  */
-camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *output, ANativeWindow *anw);
+camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *output,
+        ANativeWindow *anw) __INTRODUCED_IN(28);
 
 /**
  * Remove a native window from shared ACaptureSessionOutput.
@@ -724,7 +722,7 @@
  *             ACaptureSessionOutput.</li></ul>
  */
 camera_status_t ACaptureSessionSharedOutput_remove(ACaptureSessionOutput *output,
-        ANativeWindow* anw);
+        ANativeWindow* anw) __INTRODUCED_IN(28);
 
 /**
  * Create a new camera capture session similar to {@link ACameraDevice_createCaptureSession}. This
@@ -757,9 +755,7 @@
         const ACaptureSessionOutputContainer* outputs,
         const ACaptureRequest* sessionParameters,
         const ACameraCaptureSession_stateCallbacks* callbacks,
-        /*out*/ACameraCaptureSession** session);
-
-#endif /* __ANDROID_API__ >= 28 */
+        /*out*/ACameraCaptureSession** session) __INTRODUCED_IN(28);
 
 __END_DECLS
 
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index 6b58155..e19ce36 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -40,8 +40,6 @@
 
 __BEGIN_DECLS
 
-#if __ANDROID_API__ >= 24
-
 typedef enum {
     ACAMERA_OK = 0,
 
@@ -132,8 +130,6 @@
     ACAMERA_ERROR_PERMISSION_DENIED     = ACAMERA_ERROR_BASE - 13,
 } camera_status_t;
 
-#endif /* __ANDROID_API__ >= 24 */
-
 __END_DECLS
 
 #endif /* _NDK_CAMERA_ERROR_H */
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index e5b3ad8..a1cca4d 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -44,8 +44,6 @@
 
 __BEGIN_DECLS
 
-#if __ANDROID_API__ >= 24
-
 /**
  * ACameraManager is opaque type that provides access to camera service.
  *
@@ -65,14 +63,14 @@
  * @return a {@link ACameraManager} instance.
  *
  */
-ACameraManager* ACameraManager_create();
+ACameraManager* ACameraManager_create() __INTRODUCED_IN(24);
 
 /**
  * <p>Delete the {@link ACameraManager} instance and free its resources. </p>
  *
  * @param manager the {@link ACameraManager} instance to be deleted.
  */
-void ACameraManager_delete(ACameraManager* manager);
+void ACameraManager_delete(ACameraManager* manager) __INTRODUCED_IN(24);
 
 /// Struct to hold list of camera devices
 typedef struct ACameraIdList {
@@ -102,14 +100,14 @@
  *         <li>{@link ACAMERA_ERROR_NOT_ENOUGH_MEMORY} if allocating memory fails.</li></ul>
  */
 camera_status_t ACameraManager_getCameraIdList(ACameraManager* manager,
-                                              /*out*/ACameraIdList** cameraIdList);
+        /*out*/ACameraIdList** cameraIdList) __INTRODUCED_IN(24);
 
 /**
  * Delete a list of camera devices allocated via {@link ACameraManager_getCameraIdList}.
  *
  * @param cameraIdList the {@link ACameraIdList} to be deleted.
  */
-void ACameraManager_deleteCameraIdList(ACameraIdList* cameraIdList);
+void ACameraManager_deleteCameraIdList(ACameraIdList* cameraIdList) __INTRODUCED_IN(24);
 
 /**
  * Definition of camera availability callbacks.
@@ -120,7 +118,8 @@
  *                 argument is owned by camera framework and will become invalid immediately after
  *                 this callback returns.
  */
-typedef void (*ACameraManager_AvailabilityCallback)(void* context, const char* cameraId);
+typedef void (*ACameraManager_AvailabilityCallback)(void* context,
+        const char* cameraId) __INTRODUCED_IN(24);
 
 /**
  * A listener for camera devices becoming available or unavailable to open.
@@ -168,7 +167,8 @@
  *                  {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
  */
 camera_status_t ACameraManager_registerAvailabilityCallback(
-        ACameraManager* manager, const ACameraManager_AvailabilityCallbacks* callback);
+        ACameraManager* manager,
+        const ACameraManager_AvailabilityCallbacks* callback) __INTRODUCED_IN(24);
 
 /**
  * Unregister camera availability callbacks.
@@ -185,7 +185,8 @@
  *                  {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
  */
 camera_status_t ACameraManager_unregisterAvailabilityCallback(
-        ACameraManager* manager, const ACameraManager_AvailabilityCallbacks* callback);
+        ACameraManager* manager,
+        const ACameraManager_AvailabilityCallbacks* callback) __INTRODUCED_IN(24);
 
 /**
  * Query the capabilities of a camera device. These capabilities are
@@ -211,7 +212,7 @@
  */
 camera_status_t ACameraManager_getCameraCharacteristics(
         ACameraManager* manager, const char* cameraId,
-        /*out*/ACameraMetadata** characteristics);
+        /*out*/ACameraMetadata** characteristics) __INTRODUCED_IN(24);
 
 /**
  * Open a connection to a camera with the given ID. The opened camera device will be
@@ -271,9 +272,7 @@
 camera_status_t ACameraManager_openCamera(
         ACameraManager* manager, const char* cameraId,
         ACameraDevice_StateCallbacks* callback,
-        /*out*/ACameraDevice** device);
-
-#endif /* __ANDROID_API__ >= 24 */
+        /*out*/ACameraDevice** device) __INTRODUCED_IN(24);
 
 __END_DECLS
 
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index bdb1587..2078da7 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -44,8 +44,6 @@
 
 __BEGIN_DECLS
 
-#if __ANDROID_API__ >= 24
-
 /**
  * ACameraMetadata is opaque type that provides access to read-only camera metadata like camera
  * characteristics (via {@link ACameraManager_getCameraCharacteristics}) or capture results (via
@@ -191,7 +189,8 @@
  *             of input tag value.</li></ul>
  */
 camera_status_t ACameraMetadata_getConstEntry(
-        const ACameraMetadata* metadata, uint32_t tag, /*out*/ACameraMetadata_const_entry* entry);
+        const ACameraMetadata* metadata,
+        uint32_t tag, /*out*/ACameraMetadata_const_entry* entry) __INTRODUCED_IN(24);
 
 /**
  * List all the entry tags in input {@link ACameraMetadata}.
@@ -208,7 +207,8 @@
  *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
  */
 camera_status_t ACameraMetadata_getAllTags(
-        const ACameraMetadata* metadata, /*out*/int32_t* numEntries, /*out*/const uint32_t** tags);
+        const ACameraMetadata* metadata,
+        /*out*/int32_t* numEntries, /*out*/const uint32_t** tags) __INTRODUCED_IN(24);
 
 /**
  * Create a copy of input {@link ACameraMetadata}.
@@ -220,16 +220,14 @@
  *
  * @return a valid ACameraMetadata pointer or NULL if the input metadata cannot be copied.
  */
-ACameraMetadata* ACameraMetadata_copy(const ACameraMetadata* src);
+ACameraMetadata* ACameraMetadata_copy(const ACameraMetadata* src) __INTRODUCED_IN(24);
 
 /**
  * Free a {@link ACameraMetadata} structure.
  *
  * @param metadata the {@link ACameraMetadata} to be freed.
  */
-void ACameraMetadata_free(ACameraMetadata* metadata);
-
-#endif /* __ANDROID_API__ >= 24 */
+void ACameraMetadata_free(ACameraMetadata* metadata) __INTRODUCED_IN(24);
 
 __END_DECLS
 
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index c7d2545..7398f78 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -40,8 +40,6 @@
 
 __BEGIN_DECLS
 
-#if __ANDROID_API__ >= 24
-
 typedef enum acamera_metadata_section {
     ACAMERA_COLOR_CORRECTION,
     ACAMERA_CONTROL,
@@ -479,11 +477,26 @@
      * Otherwise will always be present.</p>
      * <p>The maximum number of regions supported by the device is determined by the value
      * of android.control.maxRegionsAe.</p>
-     * <p>The coordinate system is based on the active pixel array,
-     * with (0,0) being the top-left pixel in the active pixel array, and
+     * <p>For devices not supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system always follows that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with (0,0) being
+     * the top-left pixel in the active pixel array, and
      * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
-     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the
-     * bottom-right pixel in the active pixel array.</p>
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right pixel in the
+     * active pixel array.</p>
+     * <p>For devices supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system depends on the mode being set.
+     * When the distortion correction mode is OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the pre-correction active array, and
+     * (ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right
+     * pixel in the pre-correction active pixel array.
+     * When the distortion correction mode is not OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the active array, and
+     * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right pixel in the
+     * active pixel array.</p>
      * <p>The weight must be within <code>[0, 1000]</code>, and represents a weight
      * for every pixel in the area. This means that a large metering area
      * with the same weight as a smaller area will have more effect in
@@ -504,8 +517,10 @@
      * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
      * ymax.</p>
      *
+     * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
     ACAMERA_CONTROL_AE_REGIONS =                                // int32[5*area_count]
             ACAMERA_CONTROL_START + 4,
@@ -641,11 +656,26 @@
      * Otherwise will always be present.</p>
      * <p>The maximum number of focus areas supported by the device is determined by the value
      * of android.control.maxRegionsAf.</p>
-     * <p>The coordinate system is based on the active pixel array,
-     * with (0,0) being the top-left pixel in the active pixel array, and
+     * <p>For devices not supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system always follows that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with (0,0) being
+     * the top-left pixel in the active pixel array, and
      * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
-     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the
-     * bottom-right pixel in the active pixel array.</p>
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right pixel in the
+     * active pixel array.</p>
+     * <p>For devices supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system depends on the mode being set.
+     * When the distortion correction mode is OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the pre-correction active array, and
+     * (ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right
+     * pixel in the pre-correction active pixel array.
+     * When the distortion correction mode is not OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the active array, and
+     * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right pixel in the
+     * active pixel array.</p>
      * <p>The weight must be within <code>[0, 1000]</code>, and represents a weight
      * for every pixel in the area. This means that a large metering area
      * with the same weight as a smaller area will have more effect in
@@ -667,8 +697,10 @@
      * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
      * ymax.</p>
      *
+     * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
     ACAMERA_CONTROL_AF_REGIONS =                                // int32[5*area_count]
             ACAMERA_CONTROL_START + 8,
@@ -800,11 +832,26 @@
      * Otherwise will always be present.</p>
      * <p>The maximum number of regions supported by the device is determined by the value
      * of android.control.maxRegionsAwb.</p>
-     * <p>The coordinate system is based on the active pixel array,
-     * with (0,0) being the top-left pixel in the active pixel array, and
+     * <p>For devices not supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system always follows that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with (0,0) being
+     * the top-left pixel in the active pixel array, and
      * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
-     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the
-     * bottom-right pixel in the active pixel array.</p>
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right pixel in the
+     * active pixel array.</p>
+     * <p>For devices supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system depends on the mode being set.
+     * When the distortion correction mode is OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the pre-correction active array, and
+     * (ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right
+     * pixel in the pre-correction active pixel array.
+     * When the distortion correction mode is not OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the active array, and
+     * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the bottom-right pixel in the
+     * active pixel array.</p>
      * <p>The weight must range from 0 to 1000, and represents a weight
      * for every pixel in the area. This means that a large metering area
      * with the same weight as a smaller area will have more effect in
@@ -825,8 +872,10 @@
      * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
      * ymax.</p>
      *
+     * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
     ACAMERA_CONTROL_AWB_REGIONS =                               // int32[5*area_count]
             ACAMERA_CONTROL_START + 12,
@@ -2979,9 +3028,17 @@
      * </ul></p>
      *
      * <p>This control can be used to implement digital zoom.</p>
-     * <p>The crop region coordinate system is based off
-     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with <code>(0, 0)</code> being the
-     * top-left corner of the sensor active array.</p>
+     * <p>For devices not supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system always follows that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with <code>(0, 0)</code> being
+     * the top-left pixel of the active array.</p>
+     * <p>For devices supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system depends on the mode being set.
+     * When the distortion correction mode is OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the pre-correction active array.
+     * When the distortion correction mode is not OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the active array.</p>
      * <p>Output streams use this rectangle to produce their output,
      * cropping to a smaller region if necessary to maintain the
      * stream's aspect ratio, then scaling the sensor input to
@@ -3000,18 +3057,26 @@
      * outputs will crop horizontally (pillarbox), and 16:9
      * streams will match exactly. These additional crops will
      * be centered within the crop region.</p>
-     * <p>The width and height of the crop region cannot
-     * be set to be smaller than
+     * <p>If the coordinate system is android.sensor.info.activeArraysSize, the width and height
+     * of the crop region cannot be set to be smaller than
      * <code>floor( activeArraySize.width / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code> and
      * <code>floor( activeArraySize.height / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code>, respectively.</p>
+     * <p>If the coordinate system is ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, the width
+     * and height of the crop region cannot be set to be smaller than
+     * <code>floor( preCorrectionActiveArraySize.width / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code>
+     * and
+     * <code>floor( preCorrectionActiveArraySize.height / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code>,
+     * respectively.</p>
      * <p>The camera device may adjust the crop region to account
      * for rounding and other hardware requirements; the final
      * crop region used will be included in the output capture
      * result.</p>
      * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
      *
+     * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
     ACAMERA_SCALER_CROP_REGION =                                // int32[4]
             ACAMERA_SCALER_START,
@@ -3977,12 +4042,24 @@
      * ACAMERA_SCALER_CROP_REGION, is defined relative to the active array rectangle given in
      * this field, with <code>(0, 0)</code> being the top-left of this rectangle.</p>
      * <p>The active array may be smaller than the full pixel array, since the full array may
-     * include black calibration pixels or other inactive regions, and geometric correction
-     * resulting in scaling or cropping may have been applied.</p>
+     * include black calibration pixels or other inactive regions.</p>
+     * <p>For devices that do not support ACAMERA_DISTORTION_CORRECTION_MODE control, the active
+     * array must be the same as ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.</p>
+     * <p>For devices that support ACAMERA_DISTORTION_CORRECTION_MODE control, the active array must
+     * be enclosed by ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE. The difference between
+     * pre-correction active array and active array accounts for scaling or cropping caused
+     * by lens geometric distortion correction.</p>
+     * <p>In general, application should always refer to active array size for controls like
+     * metering regions or crop region. Two exceptions are when the application is dealing with
+     * RAW image buffers (RAW_SENSOR, RAW10, RAW12 etc), or when application explicitly set
+     * ACAMERA_DISTORTION_CORRECTION_MODE to OFF. In these cases, application should refer
+     * to ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.</p>
      * <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
      *
+     * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
     ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE =                     // int32[4]
             ACAMERA_SENSOR_INFO_START,
@@ -4224,9 +4301,9 @@
      * <ol>
      * <li>ACAMERA_LENS_DISTORTION.</li>
      * </ol>
-     * <p>If all of the geometric distortion fields are no-ops, this rectangle will be the same
-     * as the post-distortion-corrected rectangle given in
-     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     * <p>If the camera device doesn't support geometric distortion correction, or all of the
+     * geometric distortion fields are no-ops, this rectangle will be the same as the
+     * post-distortion-corrected rectangle given in ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
      * <p>This rectangle is defined relative to the full pixel array; (0,0) is the top-left of
      * the full pixel array, and the size of the full pixel array is given by
      * ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE.</p>
@@ -4372,11 +4449,22 @@
      *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
      * </ul></p>
      *
-     * <p>The coordinate system is that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <p>For devices not supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system always follows that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with <code>(0, 0)</code> being
+     * the top-left pixel of the active array.</p>
+     * <p>For devices supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system depends on the mode being set.
+     * When the distortion correction mode is OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the pre-correction active array.
+     * When the distortion correction mode is not OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
      * <code>(0, 0)</code> being the top-left pixel of the active array.</p>
      * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE == FULL</p>
      *
+     * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
      */
     ACAMERA_STATISTICS_FACE_LANDMARKS =                         // int32[n*6]
@@ -4392,12 +4480,23 @@
      *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
      * </ul></p>
      *
-     * <p>The coordinate system is that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <p>For devices not supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system always follows that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with <code>(0, 0)</code> being
+     * the top-left pixel of the active array.</p>
+     * <p>For devices supporting ACAMERA_DISTORTION_CORRECTION_MODE control, the coordinate
+     * system depends on the mode being set.
+     * When the distortion correction mode is OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the pre-correction active array.
+     * When the distortion correction mode is not OFF, the coordinate system follows
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
      * <code>(0, 0)</code> being the top-left pixel of the active array.</p>
      * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE != OFF
-     * The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
+     * The data representation is <code>int[4]</code>, which maps to <code>(left, top, right, bottom)</code>.</p>
      *
+     * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
      */
     ACAMERA_STATISTICS_FACE_RECTANGLES =                        // int32[n*4]
@@ -5315,18 +5414,18 @@
      * any correction at all would slow down capture rate.  Every output stream will have a
      * similar amount of enhancement applied.</p>
      * <p>The correction only applies to processed outputs such as YUV, JPEG, or DEPTH16; it is not
-     * applied to any RAW output.  Metadata coordinates such as face rectangles or metering
+     * applied to any RAW output. Metadata coordinates such as face rectangles or metering
      * regions are also not affected by correction.</p>
-     * <p>Applications enabling distortion correction need to pay extra attention when converting
-     * image coordinates between corrected output buffers and the sensor array. For example, if
-     * the app supports tap-to-focus and enables correction, it then has to apply the distortion
-     * model described in ACAMERA_LENS_DISTORTION to the image buffer tap coordinates to properly
-     * calculate the tap position on the sensor active array to be used with
-     * ACAMERA_CONTROL_AF_REGIONS. The same applies in reverse to detected face rectangles if
-     * they need to be drawn on top of the corrected output buffers.</p>
+     * <p>This control will be on by default on devices that support this control. Applications
+     * disabling distortion correction need to pay extra attention with the coordinate system of
+     * metering regions, crop region, and face rectangles. When distortion correction is OFF,
+     * metadata coordinates follow the coordinate system of
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE. When distortion is not OFF, metadata
+     * coordinates follow the coordinate system of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
      *
-     * @see ACAMERA_CONTROL_AF_REGIONS
      * @see ACAMERA_LENS_DISTORTION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
     ACAMERA_DISTORTION_CORRECTION_MODE =                        // byte (acamera_metadata_enum_android_distortion_correction_mode_t)
             ACAMERA_DISTORTION_CORRECTION_START,
@@ -7050,6 +7149,10 @@
     /**
      * <p>The camera device is a logical camera backed by two or more physical cameras that are
      * also exposed to the application.</p>
+     * <p>Camera application shouldn't assume that there are at most 1 rear camera and 1 front
+     * camera in the system. For an application that switches between front and back cameras,
+     * the recommendation is to switch between the first rear camera and the first front
+     * camera in the list of supported camera devices.</p>
      * <p>This capability requires the camera device to support the following:</p>
      * <ul>
      * <li>This camera device must list the following static metadata entries in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html">CameraCharacteristics</a>:<ul>
@@ -7100,7 +7203,7 @@
 
     /**
      * <p>The camera device is a monochrome camera that doesn't contain a color filter array,
-     * and the pixel values on U and Y planes are all 128.</p>
+     * and the pixel values on U and V planes are all 128.</p>
      */
     ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME                = 12,
 
@@ -7775,9 +7878,6 @@
 
 } acamera_metadata_enum_android_distortion_correction_mode_t;
 
-
-#endif /* __ANDROID_API__ >= 24 */
-
 __END_DECLS
 
 #endif /* _NDK_CAMERA_METADATA_TAGS_H */
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index 4961ce3..2fb5d12 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -44,8 +44,6 @@
 
 __BEGIN_DECLS
 
-#if __ANDROID_API__ >= 24
-
 // Container for output targets
 typedef struct ACameraOutputTargets ACameraOutputTargets;
 
@@ -101,7 +99,8 @@
  *
  * @see ACaptureRequest_addTarget
  */
-camera_status_t ACameraOutputTarget_create(ANativeWindow* window, ACameraOutputTarget** output);
+camera_status_t ACameraOutputTarget_create(ANativeWindow* window,
+        ACameraOutputTarget** output) __INTRODUCED_IN(24);
 
 /**
  * Free a ACameraOutputTarget object.
@@ -110,7 +109,7 @@
  *
  * @see ACameraOutputTarget_create
  */
-void ACameraOutputTarget_free(ACameraOutputTarget* output);
+void ACameraOutputTarget_free(ACameraOutputTarget* output) __INTRODUCED_IN(24);
 
 /**
  * Add an {@link ACameraOutputTarget} object to {@link ACaptureRequest}.
@@ -123,7 +122,7 @@
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or output is NULL.</li></ul>
  */
 camera_status_t ACaptureRequest_addTarget(ACaptureRequest* request,
-        const ACameraOutputTarget* output);
+        const ACameraOutputTarget* output) __INTRODUCED_IN(24);
 
 /**
  * Remove an {@link ACameraOutputTarget} object from {@link ACaptureRequest}.
@@ -138,7 +137,7 @@
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or output is NULL.</li></ul>
  */
 camera_status_t ACaptureRequest_removeTarget(ACaptureRequest* request,
-        const ACameraOutputTarget* output);
+        const ACameraOutputTarget* output) __INTRODUCED_IN(24);
 
 /**
  * Get a metadata entry from input {@link ACaptureRequest}.
@@ -158,7 +157,7 @@
  *             entry of input tag value.</li></ul>
  */
 camera_status_t ACaptureRequest_getConstEntry(
-        const ACaptureRequest* request, uint32_t tag, ACameraMetadata_const_entry* entry);
+        const ACaptureRequest* request, uint32_t tag, ACameraMetadata_const_entry* entry) __INTRODUCED_IN(24);
 
 /*
  * List all the entry tags in input {@link ACaptureRequest}.
@@ -179,7 +178,7 @@
  *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
  */
 camera_status_t ACaptureRequest_getAllTags(
-        const ACaptureRequest* request, /*out*/int32_t* numTags, /*out*/const uint32_t** tags);
+        const ACaptureRequest* request, /*out*/int32_t* numTags, /*out*/const uint32_t** tags) __INTRODUCED_IN(24);
 
 /**
  * Set/change a camera capture control entry with unsigned 8 bits data type.
@@ -198,7 +197,7 @@
  *             the tag is not controllable by application.</li></ul>
  */
 camera_status_t ACaptureRequest_setEntry_u8(
-        ACaptureRequest* request, uint32_t tag, uint32_t count, const uint8_t* data);
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const uint8_t* data) __INTRODUCED_IN(24);
 
 /**
  * Set/change a camera capture control entry with signed 32 bits data type.
@@ -217,7 +216,7 @@
  *             the tag is not controllable by application.</li></ul>
  */
 camera_status_t ACaptureRequest_setEntry_i32(
-        ACaptureRequest* request, uint32_t tag, uint32_t count, const int32_t* data);
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const int32_t* data) __INTRODUCED_IN(24);
 
 /**
  * Set/change a camera capture control entry with float data type.
@@ -236,7 +235,7 @@
  *             the tag is not controllable by application.</li></ul>
  */
 camera_status_t ACaptureRequest_setEntry_float(
-        ACaptureRequest* request, uint32_t tag, uint32_t count, const float* data);
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const float* data) __INTRODUCED_IN(24);
 
 /**
  * Set/change a camera capture control entry with signed 64 bits data type.
@@ -255,7 +254,7 @@
  *             the tag is not controllable by application.</li></ul>
  */
 camera_status_t ACaptureRequest_setEntry_i64(
-        ACaptureRequest* request, uint32_t tag, uint32_t count, const int64_t* data);
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const int64_t* data) __INTRODUCED_IN(24);
 
 /**
  * Set/change a camera capture control entry with double data type.
@@ -274,7 +273,7 @@
  *             the tag is not controllable by application.</li></ul>
  */
 camera_status_t ACaptureRequest_setEntry_double(
-        ACaptureRequest* request, uint32_t tag, uint32_t count, const double* data);
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const double* data) __INTRODUCED_IN(24);
 
 /**
  * Set/change a camera capture control entry with rational data type.
@@ -294,18 +293,14 @@
  */
 camera_status_t ACaptureRequest_setEntry_rational(
         ACaptureRequest* request, uint32_t tag, uint32_t count,
-        const ACameraMetadata_rational* data);
+        const ACameraMetadata_rational* data) __INTRODUCED_IN(24);
 
 /**
  * Free a {@link ACaptureRequest} structure.
  *
  * @param request the {@link ACaptureRequest} to be freed.
  */
-void ACaptureRequest_free(ACaptureRequest* request);
-
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 28
+void ACaptureRequest_free(ACaptureRequest* request) __INTRODUCED_IN(24);
 
 /**
  * Associate an arbitrary user context pointer to the {@link ACaptureRequest}
@@ -325,7 +320,7 @@
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL.</li></ul>
  */
 camera_status_t ACaptureRequest_setUserContext(
-        ACaptureRequest* request, void* context);
+        ACaptureRequest* request, void* context) __INTRODUCED_IN(28);
 
 /**
  * Get the user context pointer of the {@link ACaptureRequest}
@@ -341,7 +336,7 @@
  *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL.</li></ul>
  */
 camera_status_t ACaptureRequest_getUserContext(
-        const ACaptureRequest* request, /*out*/void** context);
+        const ACaptureRequest* request, /*out*/void** context) __INTRODUCED_IN(28);
 
 /**
  * Create a copy of input {@link ACaptureRequest}.
@@ -353,9 +348,7 @@
  *
  * @return a valid ACaptureRequest pointer or NULL if the input request cannot be copied.
  */
-ACaptureRequest* ACaptureRequest_copy(const ACaptureRequest* src);
-
-#endif /* __ANDROID_API__ >= 28 */
+ACaptureRequest* ACaptureRequest_copy(const ACaptureRequest* src) __INTRODUCED_IN(28);
 
 __END_DECLS
 
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index d179aa0..a29e96d 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -6,11 +6,11 @@
     ACameraCaptureSession_getDevice;
     ACameraCaptureSession_setRepeatingRequest;
     ACameraCaptureSession_stopRepeating;
-    ACameraCaptureSession_updateSharedOutput;
+    ACameraCaptureSession_updateSharedOutput; # introduced=28
     ACameraDevice_close;
     ACameraDevice_createCaptureRequest;
     ACameraDevice_createCaptureSession;
-    ACameraDevice_createCaptureSessionWithSessionParameters;
+    ACameraDevice_createCaptureSessionWithSessionParameters; # introduced=28
     ACameraDevice_getId;
     ACameraManager_create;
     ACameraManager_delete;
@@ -27,11 +27,11 @@
     ACameraOutputTarget_create;
     ACameraOutputTarget_free;
     ACaptureRequest_addTarget;
-    ACaptureRequest_copy;
+    ACaptureRequest_copy; # introduced=28
     ACaptureRequest_free;
     ACaptureRequest_getAllTags;
     ACaptureRequest_getConstEntry;
-    ACaptureRequest_getUserContext;
+    ACaptureRequest_getUserContext; # introduced=28
     ACaptureRequest_removeTarget;
     ACaptureRequest_setEntry_double;
     ACaptureRequest_setEntry_float;
@@ -39,15 +39,15 @@
     ACaptureRequest_setEntry_i64;
     ACaptureRequest_setEntry_rational;
     ACaptureRequest_setEntry_u8;
-    ACaptureRequest_setUserContext;
+    ACaptureRequest_setUserContext; # introduced=28
     ACaptureSessionOutputContainer_add;
     ACaptureSessionOutputContainer_create;
     ACaptureSessionOutputContainer_free;
     ACaptureSessionOutputContainer_remove;
     ACaptureSessionOutput_create;
-    ACaptureSessionSharedOutput_create;
-    ACaptureSessionSharedOutput_add;
-    ACaptureSessionSharedOutput_remove;
+    ACaptureSessionSharedOutput_create; # introduced=28
+    ACaptureSessionSharedOutput_add; # introduced=28
+    ACaptureSessionSharedOutput_remove; # introduced=28
     ACaptureSessionOutput_free;
   local:
     *;
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 9d2daab..a7ac2d7 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -73,7 +73,7 @@
 static bool gMonotonicTime = false;     // use system monotonic time for timestamps
 static bool gPersistentSurface = false; // use persistent surface
 static enum {
-    FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES, FORMAT_RAW_FRAMES
+    FORMAT_MP4, FORMAT_H264, FORMAT_WEBM, FORMAT_3GPP, FORMAT_FRAMES, FORMAT_RAW_FRAMES
 } gOutputFormat = FORMAT_MP4;           // data format for output
 static AString gCodecName = "";         // codec name override
 static bool gSizeSpecified = false;     // was size explicitly requested?
@@ -135,6 +135,7 @@
                 strerror(errno));
         return err;
     }
+    signal(SIGPIPE, SIG_IGN);
     return NO_ERROR;
 }
 
@@ -668,7 +669,9 @@
     sp<MediaMuxer> muxer = NULL;
     FILE* rawFp = NULL;
     switch (gOutputFormat) {
-        case FORMAT_MP4: {
+        case FORMAT_MP4:
+        case FORMAT_WEBM:
+        case FORMAT_3GPP: {
             // Configure muxer.  We have to wait for the CSD blob from the encoder
             // before we can start it.
             err = unlink(fileName);
@@ -681,7 +684,13 @@
                 fprintf(stderr, "ERROR: couldn't open file\n");
                 abort();
             }
-            muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+            if (gOutputFormat == FORMAT_MP4) {
+                muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+            } else if (gOutputFormat == FORMAT_WEBM) {
+                muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_WEBM);
+            } else {
+                muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_THREE_GPP);
+            }
             close(fd);
             if (gRotate) {
                 muxer->setOrientationHint(90);  // TODO: does this do anything?
@@ -1001,6 +1010,10 @@
                 gOutputFormat = FORMAT_MP4;
             } else if (strcmp(optarg, "h264") == 0) {
                 gOutputFormat = FORMAT_H264;
+            } else if (strcmp(optarg, "webm") == 0) {
+                gOutputFormat = FORMAT_WEBM;
+            } else if (strcmp(optarg, "3gpp") == 0) {
+                gOutputFormat = FORMAT_3GPP;
             } else if (strcmp(optarg, "frames") == 0) {
                 gOutputFormat = FORMAT_FRAMES;
             } else if (strcmp(optarg, "raw-frames") == 0) {
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index a2574ea..97e160e 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -78,6 +78,7 @@
 static bool gPlaybackAudio;
 static bool gWriteMP4;
 static bool gDisplayHistogram;
+static bool gVerbose = false;
 static bool showProgress = true;
 static String8 gWriteMP4Filename;
 static String8 gComponentNameOverride;
@@ -159,6 +160,11 @@
             break;
         }
 
+        if (gVerbose) {
+            MetaDataBase &meta = mbuf->meta_data();
+            fprintf(stdout, "sample format: %s\n", meta.toString().c_str());
+        }
+
         CHECK_EQ(
                 fwrite((const uint8_t *)mbuf->data() + mbuf->range_offset(),
                        1,
@@ -630,6 +636,7 @@
     fprintf(stderr, "       -T allocate buffers from a surface texture\n");
     fprintf(stderr, "       -d(ump) output_filename (raw stream data to a file)\n");
     fprintf(stderr, "       -D(ump) output_filename (decoded PCM data to a file)\n");
+    fprintf(stderr, "       -v be more verbose\n");
 }
 
 static void dumpCodecProfiles(bool queryDecoders) {
@@ -708,7 +715,7 @@
     sp<ALooper> looper;
 
     int res;
-    while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
+    while ((res = getopt(argc, argv, "vhaqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
         switch (res) {
             case 'a':
             {
@@ -832,6 +839,12 @@
                 break;
             }
 
+            case 'v':
+            {
+                gVerbose = true;
+                break;
+            }
+
             case '?':
             case 'h':
             default:
@@ -893,7 +906,7 @@
                 VideoFrame *frame = (VideoFrame *)mem->pointer();
 
                 CHECK_EQ(writeJpegFile("/sdcard/out.jpg",
-                            (uint8_t *)frame + sizeof(VideoFrame),
+                            frame->getFlattenedData(),
                             frame->mWidth, frame->mHeight), 0);
             }
 
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index 61b5127..3035c5a 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -22,7 +22,6 @@
 #include <android/hidl/manager/1.0/IServiceManager.h>
 
 #include <binder/IMemory.h>
-#include <cutils/native_handle.h>
 #include <hidlmemory/FrameworkUtils.h>
 #include <media/hardware/CryptoAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -224,10 +223,14 @@
     Mutex::Autolock autoLock(mLock);
 
     if (mInitCheck != OK) {
-        return mInitCheck;
+        return false;
     }
 
-    return mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
+    Return<bool> hResult = mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
+    if (!hResult.isOk()) {
+        return false;
+    }
+    return hResult;
 }
 
 
@@ -245,17 +248,12 @@
         ALOGE("setHeapBase(): heap is NULL");
         return -1;
     }
-    native_handle_t* nativeHandle = native_handle_create(1, 0);
-    if (!nativeHandle) {
-        ALOGE("setHeapBase(), failed to create native handle");
-        return -1;
-    }
 
     Mutex::Autolock autoLock(mLock);
 
     int32_t seqNum = mHeapSeqNum++;
     sp<HidlMemory> hidlMemory = fromHeap(heap);
-    mHeapBases.add(seqNum, mNextBufferId);
+    mHeapBases.add(seqNum, HeapBase(mNextBufferId, heap->getSize()));
     Return<void> hResult = mPlugin->setSharedBufferBase(*hidlMemory, mNextBufferId++);
     ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
     return seqNum;
@@ -264,7 +262,22 @@
 void CryptoHal::clearHeapBase(int32_t seqNum) {
     Mutex::Autolock autoLock(mLock);
 
-    mHeapBases.removeItem(seqNum);
+    /*
+     * Clear the remote shared memory mapping by setting the shared
+     * buffer base to a null hidl_memory.
+     *
+     * TODO: Add a releaseSharedBuffer method in a future DRM HAL
+     * API version to make this explicit.
+     */
+    ssize_t index = mHeapBases.indexOfKey(seqNum);
+    if (index >= 0) {
+        if (mPlugin != NULL) {
+            uint32_t bufferId = mHeapBases[index].getBufferId();
+            Return<void> hResult = mPlugin->setSharedBufferBase(hidl_memory(), bufferId);
+            ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
+        }
+        mHeapBases.removeItem(seqNum);
+    }
 }
 
 status_t CryptoHal::toSharedBuffer(const sp<IMemory>& memory, int32_t seqNum, ::SharedBuffer* buffer) {
@@ -280,10 +293,26 @@
         return UNEXPECTED_NULL;
     }
 
-    // memory must be in the declared heap
-    CHECK(mHeapBases.indexOfKey(seqNum) >= 0);
+    // memory must be in one of the heaps that have been set
+    if (mHeapBases.indexOfKey(seqNum) < 0) {
+        return UNKNOWN_ERROR;
+    }
 
-    buffer->bufferId = mHeapBases.valueFor(seqNum);
+    // heap must be the same size as the one that was set in setHeapBase
+    if (mHeapBases.valueFor(seqNum).getSize() != heap->getSize()) {
+        android_errorWriteLog(0x534e4554, "76221123");
+        return UNKNOWN_ERROR;
+     }
+
+    // memory must be within the address space of the heap
+    if (memory->pointer() != static_cast<uint8_t *>(heap->getBase()) + memory->offset()  ||
+            heap->getSize() < memory->offset() + memory->size() ||
+            SIZE_MAX - memory->offset() < memory->size()) {
+        android_errorWriteLog(0x534e4554, "76221123");
+        return UNKNOWN_ERROR;
+    }
+
+    buffer->bufferId = mHeapBases.valueFor(seqNum).getBufferId();
     buffer->offset = offset >= 0 ? offset : 0;
     buffer->size = size;
     return OK;
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 4e8ad52..cf08610 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -68,10 +68,12 @@
 
 template<typename T>
 std::string toBase64StringNoPad(const T* data, size_t size) {
-    if (size == 0) {
+    // Note that the base 64 conversion only works with arrays of single-byte
+    // values. If the source is empty or is not an array of single-byte values,
+    // return empty string.
+    if (size == 0 || sizeof(data[0]) != 1) {
       return "";
     }
-    CHECK(sizeof(data[0] == 1));
 
     android::AString outputString;
     encodeBase64(data, size, &outputString);
@@ -1186,9 +1188,9 @@
 
     DrmSessionManager::Instance()->useSession(sessionId);
 
-    Status status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
+    Return<Status> status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
             toHidlString(algorithm));
-    return toStatusT(status);
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
 }
 
 status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const &sessionId,
@@ -1198,9 +1200,9 @@
 
     DrmSessionManager::Instance()->useSession(sessionId);
 
-    Status status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
+    Return<Status> status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
             toHidlString(algorithm));
-    return toStatusT(status);
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
 }
 
 status_t DrmHal::encrypt(Vector<uint8_t> const &sessionId,
diff --git a/drm/libmediadrm/DrmMetrics.cpp b/drm/libmediadrm/DrmMetrics.cpp
index fce1717..4fed707 100644
--- a/drm/libmediadrm/DrmMetrics.cpp
+++ b/drm/libmediadrm/DrmMetrics.cpp
@@ -29,6 +29,7 @@
 using ::android::String16;
 using ::android::String8;
 using ::android::drm_metrics::DrmFrameworkMetrics;
+using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::drm::V1_0::EventType;
 using ::android::hardware::drm::V1_0::KeyStatusType;
@@ -192,6 +193,13 @@
     }
 }
 
+inline String16 MakeIndexString(unsigned int index) {
+  std::string str("[");
+  str.append(std::to_string(index));
+  str.append("]");
+  return String16(str.c_str());
+}
+
 } // namespace
 
 namespace android {
@@ -370,9 +378,11 @@
     }
 
     int groupIndex = 0;
+    std::map<String16, int> indexMap;
     for (const auto &hidlMetricGroup : hidlMetricGroups) {
         PersistableBundle bundleMetricGroup;
         for (const auto &hidlMetric : hidlMetricGroup.metrics) {
+            String16 metricName(hidlMetric.name.c_str());
             PersistableBundle bundleMetric;
             // Add metric component values.
             for (const auto &value : hidlMetric.values) {
@@ -388,14 +398,22 @@
             // Add attributes to the bundle metric.
             bundleMetric.putPersistableBundle(String16("attributes"),
                                               bundleMetricAttributes);
+            // Add one layer of indirection, allowing for repeated metric names.
+            PersistableBundle repeatedMetrics;
+            bundleMetricGroup.getPersistableBundle(metricName,
+                                                   &repeatedMetrics);
+            int index = indexMap[metricName];
+            repeatedMetrics.putPersistableBundle(MakeIndexString(index),
+                                                 bundleMetric);
+            indexMap[metricName] = ++index;
+
             // Add the bundle metric to the group of metrics.
-            bundleMetricGroup.putPersistableBundle(
-                String16(hidlMetric.name.c_str()), bundleMetric);
+            bundleMetricGroup.putPersistableBundle(metricName,
+                                                   repeatedMetrics);
         }
         // Add the bundle metric group to the collection of groups.
-        bundleMetricGroups->putPersistableBundle(
-            String16(std::to_string(groupIndex).c_str()), bundleMetricGroup);
-        groupIndex++;
+        bundleMetricGroups->putPersistableBundle(MakeIndexString(groupIndex++),
+                                                 bundleMetricGroup);
     }
 
     return OK;
diff --git a/drm/libmediadrm/tests/DrmMetrics_test.cpp b/drm/libmediadrm/tests/DrmMetrics_test.cpp
index 1a20342..64aa9d0 100644
--- a/drm/libmediadrm/tests/DrmMetrics_test.cpp
+++ b/drm/libmediadrm/tests/DrmMetrics_test.cpp
@@ -429,7 +429,8 @@
   DrmMetricGroup hidlMetricGroup =
       { { {
               "open_session_ok",
-              { { "status", DrmMetricGroup::ValueType::INT64_TYPE, (int64_t) Status::OK, 0.0, "" } },
+              { { "status", DrmMetricGroup::ValueType::INT64_TYPE,
+                  (int64_t) Status::OK, 0.0, "" } },
               { { "count", DrmMetricGroup::ValueType::INT64_TYPE, 3, 0.0, "" } }
           },
           {
@@ -444,25 +445,28 @@
                                                      &bundleMetricGroups));
   ASSERT_EQ(1U, bundleMetricGroups.size());
   PersistableBundle bundleMetricGroup;
-  ASSERT_TRUE(bundleMetricGroups.getPersistableBundle(String16("0"), &bundleMetricGroup));
+  ASSERT_TRUE(bundleMetricGroups.getPersistableBundle(String16("[0]"), &bundleMetricGroup));
   ASSERT_EQ(2U, bundleMetricGroup.size());
 
   // Verify each metric.
   PersistableBundle metric;
   ASSERT_TRUE(bundleMetricGroup.getPersistableBundle(String16("open_session_ok"), &metric));
+  PersistableBundle metricInstance;
+  ASSERT_TRUE(metric.getPersistableBundle(String16("[0]"), &metricInstance));
   int64_t value = 0;
-  ASSERT_TRUE(metric.getLong(String16("count"), &value));
+  ASSERT_TRUE(metricInstance.getLong(String16("count"), &value));
   ASSERT_EQ(3, value);
   PersistableBundle attributeBundle;
-  ASSERT_TRUE(metric.getPersistableBundle(String16("attributes"), &attributeBundle));
+  ASSERT_TRUE(metricInstance.getPersistableBundle(String16("attributes"), &attributeBundle));
   ASSERT_TRUE(attributeBundle.getLong(String16("status"), &value));
   ASSERT_EQ((int64_t) Status::OK, value);
 
   ASSERT_TRUE(bundleMetricGroup.getPersistableBundle(String16("close_session_not_opened"),
                                                      &metric));
-  ASSERT_TRUE(metric.getLong(String16("count"), &value));
+  ASSERT_TRUE(metric.getPersistableBundle(String16("[0]"), &metricInstance));
+  ASSERT_TRUE(metricInstance.getLong(String16("count"), &value));
   ASSERT_EQ(7, value);
-  ASSERT_TRUE(metric.getPersistableBundle(String16("attributes"), &attributeBundle));
+  ASSERT_TRUE(metricInstance.getPersistableBundle(String16("attributes"), &attributeBundle));
   value = 0;
   ASSERT_TRUE(attributeBundle.getLong(String16("status"), &value));
   ASSERT_EQ((int64_t) Status::ERROR_DRM_SESSION_NOT_OPENED, value);
diff --git a/include/common_time/ICommonClock.h b/include/common_time/ICommonClock.h
deleted file mode 100644
index d7073f1..0000000
--- a/include/common_time/ICommonClock.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_ICOMMONCLOCK_H
-#define ANDROID_ICOMMONCLOCK_H
-
-#include <stdint.h>
-#include <linux/socket.h>
-
-#include <binder/IInterface.h>
-#include <binder/IServiceManager.h>
-
-namespace android {
-
-class ICommonClockListener : public IInterface {
-  public:
-    DECLARE_META_INTERFACE(CommonClockListener);
-
-    virtual void onTimelineChanged(uint64_t timelineID) = 0;
-};
-
-class BnCommonClockListener : public BnInterface<ICommonClockListener> {
-  public:
-    virtual status_t onTransact(uint32_t code, const Parcel& data,
-                                Parcel* reply, uint32_t flags = 0);
-};
-
-class ICommonClock : public IInterface {
-  public:
-    DECLARE_META_INTERFACE(CommonClock);
-
-    // Name of the ICommonClock service registered with the service manager.
-    static const String16 kServiceName;
-
-    // a reserved invalid timeline ID
-    static const uint64_t kInvalidTimelineID;
-
-    // a reserved invalid error estimate
-    static const int32_t kErrorEstimateUnknown;
-
-    enum State {
-        // the device just came up and is trying to discover the master
-        STATE_INITIAL,
-
-        // the device is a client of a master
-        STATE_CLIENT,
-
-        // the device is acting as master
-        STATE_MASTER,
-
-        // the device has lost contact with its master and needs to participate
-        // in the election of a new master
-        STATE_RONIN,
-
-        // the device is waiting for announcement of the newly elected master
-        STATE_WAIT_FOR_ELECTION,
-    };
-
-    virtual status_t isCommonTimeValid(bool* valid, uint32_t* timelineID) = 0;
-    virtual status_t commonTimeToLocalTime(int64_t commonTime,
-                                           int64_t* localTime) = 0;
-    virtual status_t localTimeToCommonTime(int64_t localTime,
-                                           int64_t* commonTime) = 0;
-    virtual status_t getCommonTime(int64_t* commonTime) = 0;
-    virtual status_t getCommonFreq(uint64_t* freq) = 0;
-    virtual status_t getLocalTime(int64_t* localTime) = 0;
-    virtual status_t getLocalFreq(uint64_t* freq) = 0;
-    virtual status_t getEstimatedError(int32_t* estimate) = 0;
-    virtual status_t getTimelineID(uint64_t* id) = 0;
-    virtual status_t getState(State* state) = 0;
-    virtual status_t getMasterAddr(struct sockaddr_storage* addr) = 0;
-
-    virtual status_t registerListener(
-            const sp<ICommonClockListener>& listener) = 0;
-    virtual status_t unregisterListener(
-            const sp<ICommonClockListener>& listener) = 0;
-
-    // Simple helper to make it easier to connect to the CommonClock service.
-    static inline sp<ICommonClock> getInstance() {
-        sp<IBinder> binder = defaultServiceManager()->checkService(
-                ICommonClock::kServiceName);
-        sp<ICommonClock> clk = interface_cast<ICommonClock>(binder);
-        return clk;
-    }
-};
-
-class BnCommonClock : public BnInterface<ICommonClock> {
-  public:
-    virtual status_t onTransact(uint32_t code, const Parcel& data,
-                                Parcel* reply, uint32_t flags = 0);
-};
-
-};  // namespace android
-
-#endif  // ANDROID_ICOMMONCLOCK_H
diff --git a/include/common_time/ICommonTimeConfig.h b/include/common_time/ICommonTimeConfig.h
deleted file mode 100644
index 497b666..0000000
--- a/include/common_time/ICommonTimeConfig.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_ICOMMONTIMECONFIG_H
-#define ANDROID_ICOMMONTIMECONFIG_H
-
-#include <stdint.h>
-#include <linux/socket.h>
-
-#include <binder/IInterface.h>
-#include <binder/IServiceManager.h>
-
-namespace android {
-
-class String16;
-
-class ICommonTimeConfig : public IInterface {
-  public:
-    DECLARE_META_INTERFACE(CommonTimeConfig);
-
-    // Name of the ICommonTimeConfig service registered with the service
-    // manager.
-    static const String16 kServiceName;
-
-    virtual status_t getMasterElectionPriority(uint8_t *priority) = 0;
-    virtual status_t setMasterElectionPriority(uint8_t priority) = 0;
-    virtual status_t getMasterElectionEndpoint(struct sockaddr_storage *addr) = 0;
-    virtual status_t setMasterElectionEndpoint(const struct sockaddr_storage *addr) = 0;
-    virtual status_t getMasterElectionGroupId(uint64_t *id) = 0;
-    virtual status_t setMasterElectionGroupId(uint64_t id) = 0;
-    virtual status_t getInterfaceBinding(String16& ifaceName) = 0;
-    virtual status_t setInterfaceBinding(const String16& ifaceName) = 0;
-    virtual status_t getMasterAnnounceInterval(int *interval) = 0;
-    virtual status_t setMasterAnnounceInterval(int interval) = 0;
-    virtual status_t getClientSyncInterval(int *interval) = 0;
-    virtual status_t setClientSyncInterval(int interval) = 0;
-    virtual status_t getPanicThreshold(int *threshold) = 0;
-    virtual status_t setPanicThreshold(int threshold) = 0;
-    virtual status_t getAutoDisable(bool *autoDisable) = 0;
-    virtual status_t setAutoDisable(bool autoDisable) = 0;
-    virtual status_t forceNetworklessMasterMode() = 0;
-
-    // Simple helper to make it easier to connect to the CommonTimeConfig service.
-    static inline sp<ICommonTimeConfig> getInstance() {
-        sp<IBinder> binder = defaultServiceManager()->checkService(
-                ICommonTimeConfig::kServiceName);
-        sp<ICommonTimeConfig> clk = interface_cast<ICommonTimeConfig>(binder);
-        return clk;
-    }
-};
-
-class BnCommonTimeConfig : public BnInterface<ICommonTimeConfig> {
-  public:
-    virtual status_t onTransact(uint32_t code, const Parcel& data,
-                                Parcel* reply, uint32_t flags = 0);
-};
-
-};  // namespace android
-
-#endif  // ANDROID_ICOMMONTIMECONFIG_H
diff --git a/include/common_time/cc_helper.h b/include/common_time/cc_helper.h
deleted file mode 100644
index 8c4d5c0..0000000
--- a/include/common_time/cc_helper.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __CC_HELPER_H__
-#define __CC_HELPER_H__
-
-#include <stdint.h>
-#include <common_time/ICommonClock.h>
-#include <utils/threads.h>
-
-namespace android {
-
-// CCHelper is a simple wrapper class to help with centralizing access to the
-// Common Clock service and implementing lifetime managment, as well as to
-// implement a simple policy of making a basic attempt to reconnect to the
-// common clock service when things go wrong.
-//
-// On platforms which run the native common_time service in auto-disable mode,
-// the service will go into networkless mode whenever it has no active clients.
-// It tracks active clients using registered CommonClockListeners (the callback
-// interface for onTimelineChanged) since this provides a convienent death
-// handler notification for when the service's clients die unexpectedly.  This
-// means that users of the common time service should really always have a
-// CommonClockListener, unless they know that the time service is not running in
-// auto disabled mode, or that there is at least one other registered listener
-// active in the system.  The CCHelper makes this a little easier by sharing a
-// ref counted ICommonClock interface across all clients and automatically
-// registering and unregistering a listener whenever there are CCHelper
-// instances active in the process.
-class CCHelper {
-  public:
-    CCHelper();
-    ~CCHelper();
-
-    status_t isCommonTimeValid(bool* valid, uint32_t* timelineID);
-    status_t commonTimeToLocalTime(int64_t commonTime, int64_t* localTime);
-    status_t localTimeToCommonTime(int64_t localTime, int64_t* commonTime);
-    status_t getCommonTime(int64_t* commonTime);
-    status_t getCommonFreq(uint64_t* freq);
-    status_t getLocalTime(int64_t* localTime);
-    status_t getLocalFreq(uint64_t* freq);
-
-  private:
-    class CommonClockListener : public BnCommonClockListener {
-      public:
-        void onTimelineChanged(uint64_t timelineID);
-    };
-
-    static bool verifyClock_l();
-
-    static Mutex lock_;
-    static sp<ICommonClock> common_clock_;
-    static sp<ICommonClockListener> common_clock_listener_;
-    static uint32_t ref_count_;
-};
-
-
-}  // namespace android
-#endif  // __CC_HELPER_H__
diff --git a/include/common_time/local_clock.h b/include/common_time/local_clock.h
deleted file mode 100644
index 384c3de..0000000
--- a/include/common_time/local_clock.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef __LOCAL_CLOCK_H__
-#define __LOCAL_CLOCK_H__
-
-#include <stdint.h>
-
-#include <hardware/local_time_hal.h>
-#include <utils/Errors.h>
-#include <utils/threads.h>
-
-namespace android {
-
-class LocalClock {
-  public:
-    LocalClock();
-
-    bool initCheck();
-
-    int64_t  getLocalTime();
-    uint64_t getLocalFreq();
-    status_t setLocalSlew(int16_t rate);
-    int32_t  getDebugLog(struct local_time_debug_event* records,
-                         int max_records);
-
-  private:
-    static Mutex dev_lock_;
-    static local_time_hw_device_t* dev_;
-};
-
-}  // namespace android
-#endif  // __LOCAL_CLOCK_H__
diff --git a/include/media/MmapStreamCallback.h b/include/media/MmapStreamCallback.h
index 8098e79..31b8eb5 100644
--- a/include/media/MmapStreamCallback.h
+++ b/include/media/MmapStreamCallback.h
@@ -31,8 +31,9 @@
      * The mmap stream should be torn down because conditions that permitted its creation with
      * the requested parameters have changed and do not allow it to operate with the requested
      * constraints any more.
+     * \param[in] handle handle for the client stream to tear down.
      */
-    virtual void onTearDown() = 0;
+    virtual void onTearDown(audio_port_handle_t handle) = 0;
 
     /**
      * The volume to be applied to the use case specified when opening the stream has changed
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ca119d5..5f19f74 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -538,6 +538,10 @@
         mTimestampMutator.push(timestamp);
     }
 
+    virtual ExtendedTimestamp getTimestamp() const {
+        return mTimestampMutator.last();
+    }
+
     // Flushes the shared ring buffer if the client had requested it using mStreaming.mFlush.
     // If flush occurs then:
     //   cblk->u.mStreaming.mFront, ServerProxy::mFlush and ServerProxy::mFlushed will be modified
@@ -551,6 +555,9 @@
     // Total count of the number of flushed frames since creation (never reset).
     virtual int64_t     framesFlushed() const { return mFlushed; }
 
+    // Safe frames ready query with no side effects.
+    virtual size_t      framesReadySafe() const = 0;
+
     // Get dynamic buffer size from the shared control block.
     uint32_t            getBufferSizeInFrames() const {
         return android_atomic_acquire_load((int32_t *)&mCblk->mBufferSizeInFrames);
@@ -588,8 +595,7 @@
     // which may include non-contiguous frames
     virtual size_t      framesReady();
 
-    // Safe frames ready query used by dump() - this has no side effects.
-    virtual size_t      framesReadySafe() const;
+    size_t              framesReadySafe() const override; // frames available to read by server.
 
     // Currently AudioFlinger will call framesReady() for a fast track from two threads:
     // FastMixer thread, and normal mixer thread.  This is dangerous, as the proxy is intended
@@ -693,6 +699,8 @@
         return mCblk->u.mStreaming.mRear; // For completeness only; mRear written by server.
     }
 
+    size_t framesReadySafe() const override; // frames available to read by client.
+
 protected:
     virtual ~AudioRecordServerProxy() { }
 };
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index a9d4dd1..712f118 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -25,94 +25,34 @@
 
 namespace android {
 
-// Represents a color converted (RGB-based) video frame
-// with bitmap pixels stored in FrameBuffer
+// Represents a color converted (RGB-based) video frame with bitmap
+// pixels stored in FrameBuffer.
+// In a VideoFrame struct stored in IMemory, frame data and ICC data
+// come after the VideoFrame structure. Their locations can be retrieved
+// by getFlattenedData() and getFlattenedIccData();
 class VideoFrame
 {
 public:
     // Construct a VideoFrame object with the specified parameters,
-    // will allocate frame buffer if |allocate| is set to true, will
-    // allocate buffer to hold ICC data if |iccData| and |iccSize|
-    // indicate its presence.
+    // will calculate frame buffer size if |hasData| is set to true.
     VideoFrame(uint32_t width, uint32_t height,
             uint32_t displayWidth, uint32_t displayHeight,
-            uint32_t angle, uint32_t bpp, bool allocate,
-            const void *iccData, size_t iccSize):
+            uint32_t tileWidth, uint32_t tileHeight,
+            uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
         mWidth(width), mHeight(height),
         mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
+        mTileWidth(tileWidth), mTileHeight(tileHeight),
         mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
-        mSize(0), mIccSize(0), mReserved(0), mData(0), mIccData(0) {
-        if (allocate) {
-            mSize = mRowBytes * mHeight;
-            mData = new uint8_t[mSize];
-            if (mData == NULL) {
-                mSize = 0;
-            }
-        }
-
-        if (iccData != NULL && iccSize > 0) {
-            mIccSize = iccSize;
-            mIccData = new uint8_t[iccSize];
-            if (mIccData != NULL) {
-                memcpy(mIccData, iccData, iccSize);
-            } else {
-                mIccSize = 0;
-            }
-        }
+        mSize(hasData ? (bpp * width * height) : 0),
+        mIccSize(iccSize), mReserved(0) {
     }
 
-    // Deep copy of both the information fields and the frame data
-    VideoFrame(const VideoFrame& copy) {
-        copyInfoOnly(copy);
-
-        mSize = copy.mSize;
-        mData = NULL;  // initialize it first
-        if (mSize > 0 && copy.mData != NULL) {
-            mData = new uint8_t[mSize];
-            if (mData != NULL) {
-                memcpy(mData, copy.mData, mSize);
-            } else {
-                mSize = 0;
-            }
-        }
-
-        mIccSize = copy.mIccSize;
-        mIccData = NULL;  // initialize it first
-        if (mIccSize > 0 && copy.mIccData != NULL) {
-            mIccData = new uint8_t[mIccSize];
-            if (mIccData != NULL) {
-                memcpy(mIccData, copy.mIccData, mIccSize);
-            } else {
-                mIccSize = 0;
-            }
-        }
-    }
-
-    ~VideoFrame() {
-        if (mData != 0) {
-            delete[] mData;
-        }
-        if (mIccData != 0) {
-            delete[] mIccData;
-        }
-    }
-
-    // Copy |copy| to a flattened VideoFrame in IMemory, 'this' must point to
-    // a chunk of memory back by IMemory of size at least getFlattenedSize()
-    // of |copy|.
-    void copyFlattened(const VideoFrame& copy) {
-        copyInfoOnly(copy);
-
-        mSize = copy.mSize;
-        mData = NULL;  // initialize it first
-        if (copy.mSize > 0 && copy.mData != NULL) {
-            memcpy(getFlattenedData(), copy.mData, copy.mSize);
-        }
-
-        mIccSize = copy.mIccSize;
-        mIccData = NULL;  // initialize it first
-        if (copy.mIccSize > 0 && copy.mIccData != NULL) {
-            memcpy(getFlattenedIccData(), copy.mIccData, copy.mIccSize);
+    void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
+        *this = copy;
+        if (mIccSize == iccSize && iccSize > 0 && iccData != NULL) {
+            memcpy(getFlattenedIccData(), iccData, iccSize);
+        } else {
+            mIccSize = 0;
         }
     }
 
@@ -136,38 +76,14 @@
     uint32_t mHeight;          // Decoded image height before rotation
     uint32_t mDisplayWidth;    // Display width before rotation
     uint32_t mDisplayHeight;   // Display height before rotation
+    uint32_t mTileWidth;       // Tile width (0 if image doesn't have grid)
+    uint32_t mTileHeight;      // Tile height (0 if image doesn't have grid)
     int32_t  mRotationAngle;   // Rotation angle, clockwise, should be multiple of 90
     uint32_t mBytesPerPixel;   // Number of bytes per pixel
     uint32_t mRowBytes;        // Number of bytes per row before rotation
-    uint32_t mSize;            // Number of bytes in mData
-    uint32_t mIccSize;         // Number of bytes in mIccData
+    uint32_t mSize;            // Number of bytes of frame data
+    uint32_t mIccSize;         // Number of bytes of ICC data
     uint32_t mReserved;        // (padding to make mData 64-bit aligned)
-
-    // mData should be 64-bit aligned to prevent additional padding
-    uint8_t* mData;            // Actual binary data
-    // pad structure so it's the same size on 64-bit and 32-bit
-    char     mPadding[8 - sizeof(mData)];
-
-    // mIccData should be 64-bit aligned to prevent additional padding
-    uint8_t* mIccData;            // Actual binary data
-    // pad structure so it's the same size on 64-bit and 32-bit
-    char     mIccPadding[8 - sizeof(mIccData)];
-
-private:
-    //
-    // Utility methods used only within VideoFrame struct
-    //
-
-    // Copy the information fields only
-    void copyInfoOnly(const VideoFrame& copy) {
-        mWidth = copy.mWidth;
-        mHeight = copy.mHeight;
-        mDisplayWidth = copy.mDisplayWidth;
-        mDisplayHeight = copy.mDisplayHeight;
-        mRotationAngle = copy.mRotationAngle;
-        mBytesPerPixel = copy.mBytesPerPixel;
-        mRowBytes = copy.mRowBytes;
-    }
 };
 
 }; // namespace android
diff --git a/media/OWNERS b/media/OWNERS
index 1f687a2..1e2d123 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -2,8 +2,10 @@
 dwkang@google.com
 elaurent@google.com
 essick@google.com
+gkasten@google.com
 hkuang@google.com
 hunga@google.com
+jiabin@google.com
 jmtrivi@google.com
 krocard@google.com
 lajos@google.com
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 75675a9..f1e815b 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -2,10 +2,12 @@
     class core
     user audioserver
     # media gid needed for /dev/fm (radio) and for /data/misc/media (tee)
-    group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
+    group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct wakelock
+    capabilities BLOCK_SUSPEND
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
     onrestart restart vendor.audio-hal-2-0
+    onrestart restart vendor.audio-hal-4-0-msd
     # Keep the original service name for backward compatibility when upgrading
     # O-MR1 devices with framework-only.
     onrestart restart audio-hal-2-0
diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk
deleted file mode 100644
index aaa0db2..0000000
--- a/media/common_time/Android.mk
+++ /dev/null
@@ -1,24 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-#
-# libcommon_time_client
-# (binder marshalers for ICommonClock as well as common clock and local clock
-# helper code)
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := libcommon_time_client
-LOCAL_MODULE_TAGS := optional
-LOCAL_SRC_FILES := cc_helper.cpp \
-                   local_clock.cpp \
-                   ICommonClock.cpp \
-                   ICommonTimeConfig.cpp \
-                   utils.cpp
-LOCAL_SHARED_LIBRARIES := libbinder \
-                          libhardware \
-                          libutils \
-                          liblog
-
-LOCAL_CFLAGS := -Wall -Werror
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/common_time/ICommonClock.cpp b/media/common_time/ICommonClock.cpp
deleted file mode 100644
index f1f1fca..0000000
--- a/media/common_time/ICommonClock.cpp
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sys/socket.h>
-
-#include <common_time/ICommonClock.h>
-#include <binder/Parcel.h>
-
-#include "utils.h"
-
-namespace android {
-
-/***** ICommonClock *****/
-
-enum {
-    IS_COMMON_TIME_VALID = IBinder::FIRST_CALL_TRANSACTION,
-    COMMON_TIME_TO_LOCAL_TIME,
-    LOCAL_TIME_TO_COMMON_TIME,
-    GET_COMMON_TIME,
-    GET_COMMON_FREQ,
-    GET_LOCAL_TIME,
-    GET_LOCAL_FREQ,
-    GET_ESTIMATED_ERROR,
-    GET_TIMELINE_ID,
-    GET_STATE,
-    GET_MASTER_ADDRESS,
-    REGISTER_LISTENER,
-    UNREGISTER_LISTENER,
-};
-
-const String16 ICommonClock::kServiceName("common_time.clock");
-const uint64_t ICommonClock::kInvalidTimelineID = 0;
-const int32_t ICommonClock::kErrorEstimateUnknown = 0x7FFFFFFF;
-
-class BpCommonClock : public BpInterface<ICommonClock>
-{
-  public:
-    explicit BpCommonClock(const sp<IBinder>& impl)
-        : BpInterface<ICommonClock>(impl) {}
-
-    virtual status_t isCommonTimeValid(bool* valid, uint32_t* timelineID) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(IS_COMMON_TIME_VALID,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *valid = reply.readInt32();
-                *timelineID = reply.readInt32();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t commonTimeToLocalTime(int64_t commonTime,
-            int64_t* localTime) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        data.writeInt64(commonTime);
-        status_t status = remote()->transact(COMMON_TIME_TO_LOCAL_TIME,
-                data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *localTime = reply.readInt64();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t localTimeToCommonTime(int64_t localTime,
-            int64_t* commonTime) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        data.writeInt64(localTime);
-        status_t status = remote()->transact(LOCAL_TIME_TO_COMMON_TIME,
-                data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *commonTime = reply.readInt64();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getCommonTime(int64_t* commonTime) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_COMMON_TIME, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *commonTime = reply.readInt64();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getCommonFreq(uint64_t* freq) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_COMMON_FREQ, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *freq = reply.readInt64();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getLocalTime(int64_t* localTime) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_LOCAL_TIME, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *localTime = reply.readInt64();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getLocalFreq(uint64_t* freq) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_LOCAL_FREQ, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *freq = reply.readInt64();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getEstimatedError(int32_t* estimate) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_ESTIMATED_ERROR, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *estimate = reply.readInt32();
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getTimelineID(uint64_t* id) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_TIMELINE_ID, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *id = static_cast<uint64_t>(reply.readInt64());
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getState(State* state) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_STATE, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *state = static_cast<State>(reply.readInt32());
-            }
-        }
-        return status;
-    }
-
-    virtual status_t getMasterAddr(struct sockaddr_storage* addr) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_MASTER_ADDRESS, data, &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK)
-                deserializeSockaddr(&reply, addr);
-        }
-        return status;
-    }
-
-    virtual status_t registerListener(
-            const sp<ICommonClockListener>& listener) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(listener));
-
-        status_t status = remote()->transact(REGISTER_LISTENER, data, &reply);
-
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t unregisterListener(
-            const sp<ICommonClockListener>& listener) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(listener));
-        status_t status = remote()->transact(UNREGISTER_LISTENER, data, &reply);
-
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-};
-
-IMPLEMENT_META_INTERFACE(CommonClock, "android.os.ICommonClock");
-
-status_t BnCommonClock::onTransact(uint32_t code,
-                                   const Parcel& data,
-                                   Parcel* reply,
-                                   uint32_t flags) {
-    switch(code) {
-        case IS_COMMON_TIME_VALID: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            bool valid;
-            uint32_t timelineID;
-            status_t status = isCommonTimeValid(&valid, &timelineID);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(valid);
-                reply->writeInt32(timelineID);
-            }
-            return OK;
-        } break;
-
-        case COMMON_TIME_TO_LOCAL_TIME: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            int64_t commonTime = data.readInt64();
-            int64_t localTime;
-            status_t status = commonTimeToLocalTime(commonTime, &localTime);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(localTime);
-            }
-            return OK;
-        } break;
-
-        case LOCAL_TIME_TO_COMMON_TIME: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            int64_t localTime = data.readInt64();
-            int64_t commonTime;
-            status_t status = localTimeToCommonTime(localTime, &commonTime);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(commonTime);
-            }
-            return OK;
-        } break;
-
-        case GET_COMMON_TIME: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            int64_t commonTime;
-            status_t status = getCommonTime(&commonTime);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(commonTime);
-            }
-            return OK;
-        } break;
-
-        case GET_COMMON_FREQ: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            uint64_t freq;
-            status_t status = getCommonFreq(&freq);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(freq);
-            }
-            return OK;
-        } break;
-
-        case GET_LOCAL_TIME: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            int64_t localTime;
-            status_t status = getLocalTime(&localTime);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(localTime);
-            }
-            return OK;
-        } break;
-
-        case GET_LOCAL_FREQ: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            uint64_t freq;
-            status_t status = getLocalFreq(&freq);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(freq);
-            }
-            return OK;
-        } break;
-
-        case GET_ESTIMATED_ERROR: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            int32_t error;
-            status_t status = getEstimatedError(&error);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(error);
-            }
-            return OK;
-        } break;
-
-        case GET_TIMELINE_ID: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            uint64_t id;
-            status_t status = getTimelineID(&id);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(static_cast<int64_t>(id));
-            }
-            return OK;
-        } break;
-
-        case GET_STATE: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            State state;
-            status_t status = getState(&state);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(static_cast<int32_t>(state));
-            }
-            return OK;
-        } break;
-
-        case GET_MASTER_ADDRESS: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            struct sockaddr_storage addr;
-            status_t status = getMasterAddr(&addr);
-
-            if ((status == OK) && !canSerializeSockaddr(&addr)) {
-                status = UNKNOWN_ERROR;
-            }
-
-            reply->writeInt32(status);
-
-            if (status == OK) {
-                serializeSockaddr(reply, &addr);
-            }
-
-            return OK;
-        } break;
-
-        case REGISTER_LISTENER: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            sp<ICommonClockListener> listener =
-                interface_cast<ICommonClockListener>(data.readStrongBinder());
-            status_t status = registerListener(listener);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case UNREGISTER_LISTENER: {
-            CHECK_INTERFACE(ICommonClock, data, reply);
-            sp<ICommonClockListener> listener =
-                interface_cast<ICommonClockListener>(data.readStrongBinder());
-            status_t status = unregisterListener(listener);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-    }
-    return BBinder::onTransact(code, data, reply, flags);
-}
-
-/***** ICommonClockListener *****/
-
-enum {
-    ON_TIMELINE_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
-};
-
-class BpCommonClockListener : public BpInterface<ICommonClockListener>
-{
-  public:
-    explicit BpCommonClockListener(const sp<IBinder>& impl)
-        : BpInterface<ICommonClockListener>(impl) {}
-
-    virtual void onTimelineChanged(uint64_t timelineID) {
-        Parcel data, reply;
-        data.writeInterfaceToken(
-                ICommonClockListener::getInterfaceDescriptor());
-        data.writeInt64(timelineID);
-        remote()->transact(ON_TIMELINE_CHANGED, data, &reply);
-    }
-};
-
-IMPLEMENT_META_INTERFACE(CommonClockListener,
-                         "android.os.ICommonClockListener");
-
-status_t BnCommonClockListener::onTransact(
-        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
-    switch(code) {
-        case ON_TIMELINE_CHANGED: {
-            CHECK_INTERFACE(ICommonClockListener, data, reply);
-            uint32_t timelineID = data.readInt64();
-            onTimelineChanged(timelineID);
-            return NO_ERROR;
-        } break;
-    }
-
-    return BBinder::onTransact(code, data, reply, flags);
-}
-
-}; // namespace android
diff --git a/media/common_time/ICommonTimeConfig.cpp b/media/common_time/ICommonTimeConfig.cpp
deleted file mode 100644
index e587b39..0000000
--- a/media/common_time/ICommonTimeConfig.cpp
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sys/socket.h>
-
-#include <common_time/ICommonTimeConfig.h>
-#include <binder/Parcel.h>
-
-#include "utils.h"
-
-namespace android {
-
-/***** ICommonTimeConfig *****/
-
-enum {
-    GET_MASTER_ELECTION_PRIORITY = IBinder::FIRST_CALL_TRANSACTION,
-    SET_MASTER_ELECTION_PRIORITY,
-    GET_MASTER_ELECTION_ENDPOINT,
-    SET_MASTER_ELECTION_ENDPOINT,
-    GET_MASTER_ELECTION_GROUP_ID,
-    SET_MASTER_ELECTION_GROUP_ID,
-    GET_INTERFACE_BINDING,
-    SET_INTERFACE_BINDING,
-    GET_MASTER_ANNOUNCE_INTERVAL,
-    SET_MASTER_ANNOUNCE_INTERVAL,
-    GET_CLIENT_SYNC_INTERVAL,
-    SET_CLIENT_SYNC_INTERVAL,
-    GET_PANIC_THRESHOLD,
-    SET_PANIC_THRESHOLD,
-    GET_AUTO_DISABLE,
-    SET_AUTO_DISABLE,
-    FORCE_NETWORKLESS_MASTER_MODE,
-};
-
-const String16 ICommonTimeConfig::kServiceName("common_time.config");
-
-class BpCommonTimeConfig : public BpInterface<ICommonTimeConfig>
-{
-  public:
-    explicit BpCommonTimeConfig(const sp<IBinder>& impl)
-        : BpInterface<ICommonTimeConfig>(impl) {}
-
-    virtual status_t getMasterElectionPriority(uint8_t *priority) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_MASTER_ELECTION_PRIORITY,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *priority = static_cast<uint8_t>(reply.readInt32());
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setMasterElectionPriority(uint8_t priority) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        data.writeInt32(static_cast<int32_t>(priority));
-        status_t status = remote()->transact(SET_MASTER_ELECTION_PRIORITY,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t getMasterElectionEndpoint(struct sockaddr_storage *addr) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_MASTER_ELECTION_ENDPOINT,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                deserializeSockaddr(&reply, addr);
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setMasterElectionEndpoint(
-            const struct sockaddr_storage *addr) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        if (!canSerializeSockaddr(addr))
-            return BAD_VALUE;
-        if (NULL == addr) {
-            data.writeInt32(0);
-        } else {
-            data.writeInt32(1);
-            serializeSockaddr(&data, addr);
-        }
-        status_t status = remote()->transact(SET_MASTER_ELECTION_ENDPOINT,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t getMasterElectionGroupId(uint64_t *id) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_MASTER_ELECTION_GROUP_ID,
-                                             data,
-                                             &reply);
-
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *id = static_cast<uint64_t>(reply.readInt64());
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setMasterElectionGroupId(uint64_t id) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        data.writeInt64(id);
-        status_t status = remote()->transact(SET_MASTER_ELECTION_GROUP_ID,
-                                             data,
-                                             &reply);
-
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t getInterfaceBinding(String16& ifaceName) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_INTERFACE_BINDING,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                ifaceName = reply.readString16();
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setInterfaceBinding(const String16& ifaceName) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        data.writeString16(ifaceName);
-        status_t status = remote()->transact(SET_INTERFACE_BINDING,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t getMasterAnnounceInterval(int *interval) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_MASTER_ANNOUNCE_INTERVAL,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *interval = reply.readInt32();
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setMasterAnnounceInterval(int interval) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        data.writeInt32(interval);
-        status_t status = remote()->transact(SET_MASTER_ANNOUNCE_INTERVAL,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t getClientSyncInterval(int *interval) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_CLIENT_SYNC_INTERVAL,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *interval = reply.readInt32();
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setClientSyncInterval(int interval) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        data.writeInt32(interval);
-        status_t status = remote()->transact(SET_CLIENT_SYNC_INTERVAL,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t getPanicThreshold(int *threshold) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_PANIC_THRESHOLD,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *threshold = reply.readInt32();
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setPanicThreshold(int threshold) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        data.writeInt32(threshold);
-        status_t status = remote()->transact(SET_PANIC_THRESHOLD,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t getAutoDisable(bool *autoDisable) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_AUTO_DISABLE,
-                                             data,
-                                             &reply);
-        if (status == OK) {
-            status = reply.readInt32();
-            if (status == OK) {
-                *autoDisable = (0 != reply.readInt32());
-            }
-        }
-
-        return status;
-    }
-
-    virtual status_t setAutoDisable(bool autoDisable) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        data.writeInt32(autoDisable ? 1 : 0);
-        status_t status = remote()->transact(SET_AUTO_DISABLE,
-                                             data,
-                                             &reply);
-
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-
-    virtual status_t forceNetworklessMasterMode() {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
-        status_t status = remote()->transact(FORCE_NETWORKLESS_MASTER_MODE,
-                                             data,
-                                             &reply);
-
-        if (status == OK) {
-            status = reply.readInt32();
-        }
-
-        return status;
-    }
-};
-
-IMPLEMENT_META_INTERFACE(CommonTimeConfig, "android.os.ICommonTimeConfig");
-
-status_t BnCommonTimeConfig::onTransact(uint32_t code,
-                                   const Parcel& data,
-                                   Parcel* reply,
-                                   uint32_t flags) {
-    switch(code) {
-        case GET_MASTER_ELECTION_PRIORITY: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            uint8_t priority;
-            status_t status = getMasterElectionPriority(&priority);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(static_cast<int32_t>(priority));
-            }
-            return OK;
-        } break;
-
-        case SET_MASTER_ELECTION_PRIORITY: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            uint8_t priority = static_cast<uint8_t>(data.readInt32());
-            status_t status = setMasterElectionPriority(priority);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case GET_MASTER_ELECTION_ENDPOINT: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            struct sockaddr_storage addr;
-            status_t status = getMasterElectionEndpoint(&addr);
-
-            if ((status == OK) && !canSerializeSockaddr(&addr)) {
-                status = UNKNOWN_ERROR;
-            }
-
-            reply->writeInt32(status);
-
-            if (status == OK) {
-                serializeSockaddr(reply, &addr);
-            }
-
-            return OK;
-        } break;
-
-        case SET_MASTER_ELECTION_ENDPOINT: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            struct sockaddr_storage addr;
-            int hasAddr = data.readInt32();
-
-            status_t status;
-            if (hasAddr) {
-                deserializeSockaddr(&data, &addr);
-                status = setMasterElectionEndpoint(&addr);
-            } else {
-                status = setMasterElectionEndpoint(&addr);
-            }
-
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case GET_MASTER_ELECTION_GROUP_ID: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            uint64_t id;
-            status_t status = getMasterElectionGroupId(&id);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt64(id);
-            }
-            return OK;
-        } break;
-
-        case SET_MASTER_ELECTION_GROUP_ID: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            uint64_t id = static_cast<uint64_t>(data.readInt64());
-            status_t status = setMasterElectionGroupId(id);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case GET_INTERFACE_BINDING: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            String16 ret;
-            status_t status = getInterfaceBinding(ret);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeString16(ret);
-            }
-            return OK;
-        } break;
-
-        case SET_INTERFACE_BINDING: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            String16 ifaceName;
-            ifaceName = data.readString16();
-            status_t status = setInterfaceBinding(ifaceName);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case GET_MASTER_ANNOUNCE_INTERVAL: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            int interval;
-            status_t status = getMasterAnnounceInterval(&interval);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(interval);
-            }
-            return OK;
-        } break;
-
-        case SET_MASTER_ANNOUNCE_INTERVAL: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            int interval = data.readInt32();
-            status_t status = setMasterAnnounceInterval(interval);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case GET_CLIENT_SYNC_INTERVAL: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            int interval;
-            status_t status = getClientSyncInterval(&interval);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(interval);
-            }
-            return OK;
-        } break;
-
-        case SET_CLIENT_SYNC_INTERVAL: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            int interval = data.readInt32();
-            status_t status = setClientSyncInterval(interval);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case GET_PANIC_THRESHOLD: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            int threshold;
-            status_t status = getPanicThreshold(&threshold);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(threshold);
-            }
-            return OK;
-        } break;
-
-        case SET_PANIC_THRESHOLD: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            int threshold = data.readInt32();
-            status_t status = setPanicThreshold(threshold);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case GET_AUTO_DISABLE: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            bool autoDisable;
-            status_t status = getAutoDisable(&autoDisable);
-            reply->writeInt32(status);
-            if (status == OK) {
-                reply->writeInt32(autoDisable ? 1 : 0);
-            }
-            return OK;
-        } break;
-
-        case SET_AUTO_DISABLE: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            bool autoDisable = (0 != data.readInt32());
-            status_t status = setAutoDisable(autoDisable);
-            reply->writeInt32(status);
-            return OK;
-        } break;
-
-        case FORCE_NETWORKLESS_MASTER_MODE: {
-            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
-            status_t status = forceNetworklessMasterMode();
-            reply->writeInt32(status);
-            return OK;
-        } break;
-    }
-    return BBinder::onTransact(code, data, reply, flags);
-}
-
-}; // namespace android
-
diff --git a/media/common_time/MODULE_LICENSE_APACHE2 b/media/common_time/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/media/common_time/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/media/common_time/NOTICE b/media/common_time/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/media/common_time/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
-   Copyright (c) 2005-2008, The Android Open Source Project
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
diff --git a/media/common_time/cc_helper.cpp b/media/common_time/cc_helper.cpp
deleted file mode 100644
index 6a7de74..0000000
--- a/media/common_time/cc_helper.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdint.h>
-
-#include <common_time/cc_helper.h>
-#include <common_time/ICommonClock.h>
-#include <utils/threads.h>
-
-namespace android {
-
-Mutex CCHelper::lock_;
-sp<ICommonClock> CCHelper::common_clock_;
-sp<ICommonClockListener> CCHelper::common_clock_listener_;
-uint32_t CCHelper::ref_count_ = 0;
-
-bool CCHelper::verifyClock_l() {
-    bool ret = false;
-
-    if (common_clock_ == NULL) {
-        common_clock_ = ICommonClock::getInstance();
-        if (common_clock_ == NULL)
-            goto bailout;
-    }
-
-    if (ref_count_ > 0) {
-        if (common_clock_listener_ == NULL) {
-            common_clock_listener_ = new CommonClockListener();
-            if (common_clock_listener_ == NULL)
-                goto bailout;
-
-            if (OK != common_clock_->registerListener(common_clock_listener_))
-                goto bailout;
-        }
-    }
-
-    ret = true;
-
-bailout:
-    if (!ret) {
-        common_clock_listener_ = NULL;
-        common_clock_ = NULL;
-    }
-    return ret;
-}
-
-CCHelper::CCHelper() {
-    Mutex::Autolock lock(&lock_);
-    ref_count_++;
-    verifyClock_l();
-}
-
-CCHelper::~CCHelper() {
-    Mutex::Autolock lock(&lock_);
-
-    assert(ref_count_ > 0);
-    ref_count_--;
-
-    // If we were the last CCHelper instance in the system, and we had
-    // previously register a listener, unregister it now so that the common time
-    // service has the chance to go into auto-disabled mode.
-    if (!ref_count_ &&
-       (common_clock_ != NULL) &&
-       (common_clock_listener_ != NULL)) {
-        common_clock_->unregisterListener(common_clock_listener_);
-        common_clock_listener_ = NULL;
-    }
-}
-
-void CCHelper::CommonClockListener::onTimelineChanged(uint64_t /*timelineID*/) {
-    // do nothing; listener is only really used as a token so the server can
-    // find out when clients die.
-}
-
-// Helper methods which attempts to make calls to the common time binder
-// service.  If the first attempt fails with DEAD_OBJECT, the helpers will
-// attempt to make a connection to the service again (assuming that the process
-// hosting the service had crashed and the client proxy we are holding is dead)
-// If the second attempt fails, or no connection can be made, the we let the
-// error propagate up the stack and let the caller deal with the situation as
-// best they can.
-#define CCHELPER_METHOD(decl, call)                 \
-    status_t CCHelper::decl {                       \
-        Mutex::Autolock lock(&lock_);               \
-                                                    \
-        if (!verifyClock_l())                       \
-            return DEAD_OBJECT;                     \
-                                                    \
-        status_t status = common_clock_->call;      \
-        if (DEAD_OBJECT == status) {                \
-            if (!verifyClock_l())                   \
-                return DEAD_OBJECT;                 \
-            status = common_clock_->call;           \
-        }                                           \
-                                                    \
-        return status;                              \
-    }
-
-#define VERIFY_CLOCK()
-
-CCHELPER_METHOD(isCommonTimeValid(bool* valid, uint32_t* timelineID),
-                isCommonTimeValid(valid, timelineID))
-CCHELPER_METHOD(commonTimeToLocalTime(int64_t commonTime, int64_t* localTime),
-                commonTimeToLocalTime(commonTime, localTime))
-CCHELPER_METHOD(localTimeToCommonTime(int64_t localTime, int64_t* commonTime),
-                localTimeToCommonTime(localTime, commonTime))
-CCHELPER_METHOD(getCommonTime(int64_t* commonTime),
-                getCommonTime(commonTime))
-CCHELPER_METHOD(getCommonFreq(uint64_t* freq),
-                getCommonFreq(freq))
-CCHELPER_METHOD(getLocalTime(int64_t* localTime),
-                getLocalTime(localTime))
-CCHELPER_METHOD(getLocalFreq(uint64_t* freq),
-                getLocalFreq(freq))
-
-}  // namespace android
diff --git a/media/common_time/local_clock.cpp b/media/common_time/local_clock.cpp
deleted file mode 100644
index a7c61fc..0000000
--- a/media/common_time/local_clock.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "common_time"
-#include <utils/Log.h>
-
-#include <assert.h>
-#include <stdint.h>
-
-#include <common_time/local_clock.h>
-#include <hardware/hardware.h>
-#include <hardware/local_time_hal.h>
-#include <utils/Errors.h>
-#include <utils/threads.h>
-
-namespace android {
-
-Mutex LocalClock::dev_lock_;
-local_time_hw_device_t* LocalClock::dev_ = NULL;
-
-LocalClock::LocalClock() {
-    int res;
-    const hw_module_t* mod;
-
-    AutoMutex lock(&dev_lock_);
-
-    if (dev_ != NULL)
-        return;
-
-    res = hw_get_module_by_class(LOCAL_TIME_HARDWARE_MODULE_ID, NULL, &mod);
-    if (res) {
-        ALOGE("Failed to open local time HAL module (res = %d)", res);
-    } else {
-        res = local_time_hw_device_open(mod, &dev_);
-        if (res) {
-            ALOGE("Failed to open local time HAL device (res = %d)", res);
-            dev_ = NULL;
-        }
-    }
-}
-
-bool LocalClock::initCheck() {
-    return (NULL != dev_);
-}
-
-int64_t LocalClock::getLocalTime() {
-    assert(NULL != dev_);
-    assert(NULL != dev_->get_local_time);
-
-    return dev_->get_local_time(dev_);
-}
-
-uint64_t LocalClock::getLocalFreq() {
-    assert(NULL != dev_);
-    assert(NULL != dev_->get_local_freq);
-
-    return dev_->get_local_freq(dev_);
-}
-
-status_t LocalClock::setLocalSlew(int16_t rate) {
-    assert(NULL != dev_);
-
-    if (!dev_->set_local_slew)
-        return INVALID_OPERATION;
-
-    return static_cast<status_t>(dev_->set_local_slew(dev_, rate));
-}
-
-int32_t LocalClock::getDebugLog(struct local_time_debug_event* records,
-                                int max_records) {
-    assert(NULL != dev_);
-
-    if (!dev_->get_debug_log)
-        return INVALID_OPERATION;
-
-    return dev_->get_debug_log(dev_, records, max_records);
-}
-
-}  // namespace android
diff --git a/media/common_time/utils.cpp b/media/common_time/utils.cpp
deleted file mode 100644
index 91cf2fd..0000000
--- a/media/common_time/utils.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <arpa/inet.h>
-#include <linux/socket.h>
-
-#include <binder/Parcel.h>
-
-namespace android {
-
-bool canSerializeSockaddr(const struct sockaddr_storage* addr) {
-    switch (addr->ss_family) {
-        case AF_INET:
-        case AF_INET6:
-            return true;
-        default:
-            return false;
-    }
-}
-
-void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr) {
-    switch (addr->ss_family) {
-        case AF_INET: {
-            const struct sockaddr_in* s =
-                reinterpret_cast<const struct sockaddr_in*>(addr);
-            p->writeInt32(AF_INET);
-            p->writeInt32(ntohl(s->sin_addr.s_addr));
-            p->writeInt32(static_cast<int32_t>(ntohs(s->sin_port)));
-        } break;
-
-        case AF_INET6: {
-            const struct sockaddr_in6* s =
-                reinterpret_cast<const struct sockaddr_in6*>(addr);
-            const int32_t* a =
-                reinterpret_cast<const int32_t*>(s->sin6_addr.s6_addr);
-            p->writeInt32(AF_INET6);
-            p->writeInt32(ntohl(a[0]));
-            p->writeInt32(ntohl(a[1]));
-            p->writeInt32(ntohl(a[2]));
-            p->writeInt32(ntohl(a[3]));
-            p->writeInt32(static_cast<int32_t>(ntohs(s->sin6_port)));
-            p->writeInt32(ntohl(s->sin6_flowinfo));
-            p->writeInt32(ntohl(s->sin6_scope_id));
-        } break;
-    }
-}
-
-void deserializeSockaddr(const Parcel* p, struct sockaddr_storage* addr) {
-    memset(addr, 0, sizeof(*addr));
-
-    addr->ss_family = p->readInt32();
-    switch(addr->ss_family) {
-        case AF_INET: {
-            struct sockaddr_in* s =
-                reinterpret_cast<struct sockaddr_in*>(addr);
-            s->sin_addr.s_addr = htonl(p->readInt32());
-            s->sin_port = htons(static_cast<uint16_t>(p->readInt32()));
-        } break;
-
-        case AF_INET6: {
-            struct sockaddr_in6* s =
-                reinterpret_cast<struct sockaddr_in6*>(addr);
-            int32_t* a = reinterpret_cast<int32_t*>(s->sin6_addr.s6_addr);
-
-            a[0] = htonl(p->readInt32());
-            a[1] = htonl(p->readInt32());
-            a[2] = htonl(p->readInt32());
-            a[3] = htonl(p->readInt32());
-            s->sin6_port = htons(static_cast<uint16_t>(p->readInt32()));
-            s->sin6_flowinfo = htonl(p->readInt32());
-            s->sin6_scope_id = htonl(p->readInt32());
-        } break;
-    }
-}
-
-}  // namespace android
diff --git a/media/common_time/utils.h b/media/common_time/utils.h
deleted file mode 100644
index ce79d0d..0000000
--- a/media/common_time/utils.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_LIBCOMMONCLOCK_UTILS_H
-#define ANDROID_LIBCOMMONCLOCK_UTILS_H
-
-#include <linux/socket.h>
-
-#include <binder/Parcel.h>
-#include <utils/Errors.h>
-
-namespace android {
-
-extern bool canSerializeSockaddr(const struct sockaddr_storage* addr);
-extern void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr);
-extern status_t deserializeSockaddr(const Parcel* p,
-                                    struct sockaddr_storage* addr);
-
-};  // namespace android
-
-#endif  // ANDROID_LIBCOMMONCLOCK_UTILS_H
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
index 92575f2..5f05b42 100644
--- a/media/extractors/aac/Android.bp
+++ b/media/extractors/aac/Android.bp
@@ -9,12 +9,12 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
-        "libstagefright_foundation",
-        "libutils",
     ],
 
     static_libs: [
+        "libstagefright_foundation",
         "libstagefright_metadatautils",
+        "libutils",
     ],
 
     name: "libaacextractor",
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
index bd8a00c..d962b93 100644
--- a/media/extractors/amr/Android.bp
+++ b/media/extractors/amr/Android.bp
@@ -9,6 +9,9 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
+    ],
+
+    static_libs: [
         "libstagefright_foundation",
     ],
 
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 0160ca4..6282793 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -10,11 +10,11 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
-        "libstagefright_foundation",
     ],
 
     static_libs: [
         "libFLAC",
+        "libstagefright_foundation",
     ],
 
     name: "libflacextractor",
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index 5412e99..fde09df18 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -9,12 +9,12 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
-        "libstagefright_foundation"
     ],
 
     static_libs: [
         "libmedia_midiiowrapper",
         "libsonivox",
+        "libstagefright_foundation"
     ],
     name: "libmidiextractor",
     relative_install_path: "extractors",
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index c6cd753..681fd35 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -12,14 +12,14 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
-        "libstagefright_foundation",
-        "libutils",
     ],
 
     static_libs: [
         "libstagefright_flacdec",
+        "libstagefright_foundation",
         "libstagefright_metadatautils",
         "libwebm",
+        "libutils",
     ],
 
     name: "libmkvextractor",
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index fc60fd4..d657582 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -37,7 +37,9 @@
 #include <media/stagefright/MetaDataUtils.h>
 #include <utils/String8.h>
 
+#include <arpa/inet.h>
 #include <inttypes.h>
+#include <vector>
 
 namespace android {
 
@@ -358,7 +360,15 @@
 
             res = mCluster->Parse(pos, len);
             ALOGV("Parse (2) returned %ld", res);
-            CHECK_GE(res, 0);
+
+            if (res < 0) {
+                // I/O error
+
+                ALOGE("Cluster::Parse returned result %ld", res);
+
+                mCluster = NULL;
+                break;
+            }
 
             mBlockEntryIndex = 0;
             continue;
@@ -584,31 +594,15 @@
     }
 
     const uint8_t *data = (const uint8_t *)mbuf->data() + mbuf->range_offset();
-    bool blockEncrypted = data[0] & 0x1;
-    if (blockEncrypted && mbuf->range_length() < 9) {
+    bool encrypted = data[0] & 0x1;
+    bool partitioned = data[0] & 0x2;
+    if (encrypted && mbuf->range_length() < 9) {
         // 1-byte signal + 8-byte IV
         return ERROR_MALFORMED;
     }
 
     MetaDataBase &meta = mbuf->meta_data();
-    if (blockEncrypted) {
-        /*
-         *  0                   1                   2                   3
-         *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-         *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-         *  |  Signal Byte  |                                               |
-         *  +-+-+-+-+-+-+-+-+             IV                                |
-         *  |                                                               |
-         *  |               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-         *  |               |                                               |
-         *  |-+-+-+-+-+-+-+-+                                               |
-         *  :               Bytes 1..N of encrypted frame                   :
-         *  |                                                               |
-         *  |                                                               |
-         *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-         */
-        int32_t plainSizes[] = { 0 };
-        int32_t encryptedSizes[] = { static_cast<int32_t>(mbuf->range_length() - 9) };
+    if (encrypted) {
         uint8_t ctrCounter[16] = { 0 };
         uint32_t type;
         const uint8_t *keyId;
@@ -618,9 +612,83 @@
         meta.setData(kKeyCryptoKey, 0, keyId, keyIdSize);
         memcpy(ctrCounter, data + 1, 8);
         meta.setData(kKeyCryptoIV, 0, ctrCounter, 16);
-        meta.setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
-        meta.setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
-        mbuf->set_range(9, mbuf->range_length() - 9);
+        if (partitioned) {
+            /*  0                   1                   2                   3
+             *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+             * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+             * |  Signal Byte  |                                               |
+             * +-+-+-+-+-+-+-+-+             IV                                |
+             * |                                                               |
+             * |               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+             * |               | num_partition |     Partition 0 offset ->     |
+             * |-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-|
+             * |     -> Partition 0 offset     |              ...              |
+             * |-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-|
+             * |             ...               |     Partition n-1 offset ->   |
+             * |-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-|
+             * |     -> Partition n-1 offset   |                               |
+             * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+                               |
+             * |                    Clear/encrypted sample data                |
+             * |                                                               |
+             * |                                                               |
+             * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+             */
+            if (mbuf->range_length() < 10) {
+                return ERROR_MALFORMED;
+            }
+            uint8_t numPartitions = data[9];
+            if (mbuf->range_length() - 10 < numPartitions * sizeof(uint32_t)) {
+                return ERROR_MALFORMED;
+            }
+            std::vector<uint32_t> plainSizes, encryptedSizes;
+            uint32_t prev = 0;
+            uint32_t frameOffset = 10 + numPartitions * sizeof(uint32_t);
+            const uint32_t *partitions = reinterpret_cast<const uint32_t*>(data + 10);
+            for (uint32_t i = 0; i <= numPartitions; ++i) {
+                uint32_t p_i = i < numPartitions
+                        ? ntohl(partitions[i])
+                        : (mbuf->range_length() - frameOffset);
+                if (p_i < prev) {
+                    return ERROR_MALFORMED;
+                }
+                uint32_t size = p_i - prev;
+                prev = p_i;
+                if (i % 2) {
+                    encryptedSizes.push_back(size);
+                } else {
+                    plainSizes.push_back(size);
+                }
+            }
+            if (plainSizes.size() > encryptedSizes.size()) {
+                encryptedSizes.push_back(0);
+            }
+            uint32_t sizeofPlainSizes = sizeof(uint32_t) * plainSizes.size();
+            uint32_t sizeofEncryptedSizes = sizeof(uint32_t) * encryptedSizes.size();
+            meta.setData(kKeyPlainSizes, 0, plainSizes.data(), sizeofPlainSizes);
+            meta.setData(kKeyEncryptedSizes, 0, encryptedSizes.data(), sizeofEncryptedSizes);
+            mbuf->set_range(frameOffset, mbuf->range_length() - frameOffset);
+        } else {
+            /*
+             *  0                   1                   2                   3
+             *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+             *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+             *  |  Signal Byte  |                                               |
+             *  +-+-+-+-+-+-+-+-+             IV                                |
+             *  |                                                               |
+             *  |               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+             *  |               |                                               |
+             *  |-+-+-+-+-+-+-+-+                                               |
+             *  :               Bytes 1..N of encrypted frame                   :
+             *  |                                                               |
+             *  |                                                               |
+             *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+             */
+            int32_t plainSizes[] = { 0 };
+            int32_t encryptedSizes[] = { static_cast<int32_t>(mbuf->range_length() - 9) };
+            meta.setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
+            meta.setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
+            mbuf->set_range(9, mbuf->range_length() - 9);
+        }
     } else {
         /*
          *  0                   1                   2                   3
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
index d93562c..a3aeaca 100644
--- a/media/extractors/mp3/Android.bp
+++ b/media/extractors/mp3/Android.bp
@@ -14,10 +14,10 @@
         "liblog",
         "libmediaextractor",
         "libstagefright_foundation",
-        "libutils",
     ],
 
     static_libs: [
+        "libutils",
         "libstagefright_id3",
     ],
 
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 0b6e75a..40b2c97 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -16,13 +16,13 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
-        "libstagefright_foundation",
-        "libutils",
     ],
 
     static_libs: [
         "libstagefright_esds",
+        "libstagefright_foundation",
         "libstagefright_id3",
+        "libutils",
     ],
 
     cflags: [
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index 26b8251..be442e6 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -506,7 +506,7 @@
 
         ImageItem &derivedImage = itemIdToItemMap.editValueAt(itemIndex);
         if (!derivedImage.dimgRefs.empty()) {
-            ALOGW("dimgRefs if not clean!");
+            ALOGW("dimgRefs not clean!");
         }
         derivedImage.dimgRefs.appendVector(mRefs);
 
@@ -1397,7 +1397,8 @@
         ALOGV("adding %s: itemId %d", image.isGrid() ? "grid" : "image", info.itemId);
 
         if (image.isGrid()) {
-            if (size > 12) {
+            // ImageGrid struct is at least 8-byte, at most 12-byte (if flags&1)
+            if (size < 8 || size > 12) {
                 return ERROR_MALFORMED;
             }
             uint8_t buf[12];
@@ -1489,6 +1490,17 @@
 
     const ImageItem *image = &mItemIdToItemMap[itemIndex];
 
+    ssize_t tileItemIndex = -1;
+    if (image->isGrid()) {
+        if (image->dimgRefs.empty()) {
+            return NULL;
+        }
+        tileItemIndex = mItemIdToItemMap.indexOfKey(image->dimgRefs[0]);
+        if (tileItemIndex < 0) {
+            return NULL;
+        }
+    }
+
     sp<MetaData> meta = new MetaData;
     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
 
@@ -1517,22 +1529,22 @@
         if (thumbItemIndex >= 0) {
             const ImageItem &thumbnail = mItemIdToItemMap[thumbItemIndex];
 
-            meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
-            meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
-            meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
-                    thumbnail.hvcc->data(), thumbnail.hvcc->size());
-            ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
-                    imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
+            if (thumbnail.hvcc != NULL) {
+                meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
+                meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
+                meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
+                        thumbnail.hvcc->data(), thumbnail.hvcc->size());
+                ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
+                        imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
+            } else {
+                ALOGW("%s: thumbnail data is missing for image[%u]!", __FUNCTION__, imageIndex);
+            }
         } else {
             ALOGW("%s: Referenced thumbnail does not exist!", __FUNCTION__);
         }
     }
 
     if (image->isGrid()) {
-        ssize_t tileItemIndex = mItemIdToItemMap.indexOfKey(image->dimgRefs[0]);
-        if (tileItemIndex < 0) {
-            return NULL;
-        }
         meta->setInt32(kKeyGridRows, image->rows);
         meta->setInt32(kKeyGridCols, image->columns);
 
@@ -1591,10 +1603,9 @@
 
     ssize_t thumbItemIndex = mItemIdToItemMap.indexOfKey(masterImage.thumbnails[0]);
     if (thumbItemIndex < 0) {
-        ALOGW("%s: Thumbnail item id %d not found, use master instead",
-                __FUNCTION__, masterImage.thumbnails[0]);
-        *itemIndex = masterItemIndex;
-        return OK;
+        // Do not return the master image in this case, fail it so that the
+        // thumbnail extraction code knows we really don't have it.
+        return INVALID_OPERATION;
     }
 
     *itemIndex = thumbItemIndex;
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index bbc735e..8412812 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -112,6 +112,8 @@
     int32_t mCryptoMode;    // passed in from extractor
     int32_t mDefaultIVSize; // passed in from extractor
     uint8_t mCryptoKey[16]; // passed in from extractor
+    int32_t mDefaultEncryptedByteBlock;
+    int32_t mDefaultSkipByteBlock;
     uint32_t mCurrentAuxInfoType;
     uint32_t mCurrentAuxInfoTypeParameter;
     int32_t mCurrentDefaultSampleInfoSize;
@@ -147,6 +149,8 @@
     status_t parseTrackFragmentRun(off64_t offset, off64_t size);
     status_t parseSampleAuxiliaryInformationSizes(off64_t offset, off64_t size);
     status_t parseSampleAuxiliaryInformationOffsets(off64_t offset, off64_t size);
+    status_t parseClearEncryptedSizes(off64_t offset, bool isSubsampleEncryption, uint32_t flags);
+    status_t parseSampleEncryption(off64_t offset);
 
     struct TrackFragmentHeaderInfo {
         enum Flags {
@@ -326,8 +330,13 @@
         case FOURCC('a', 'c', '-', '4'):
             return MEDIA_MIMETYPE_AUDIO_AC4;
         default:
-            CHECK(!"should not be here.");
-            return NULL;
+            ALOGW("Unknown fourcc: %c%c%c%c",
+                   (fourcc >> 24) & 0xff,
+                   (fourcc >> 16) & 0xff,
+                   (fourcc >> 8) & 0xff,
+                   fourcc & 0xff
+                   );
+            return "application/octet-stream";
     }
 }
 
@@ -628,7 +637,7 @@
             track->meta.setInt32(kKeyTrackID, imageIndex);
             track->includes_expensive_metadata = false;
             track->skipTrack = false;
-            track->timescale = 0;
+            track->timescale = 1000000;
         }
     }
 
@@ -921,6 +930,7 @@
                 track->timescale = 0;
                 track->meta.setCString(kKeyMIMEType, "application/octet-stream");
                 track->has_elst = false;
+                track->subsample_encryption = false;
             }
 
             off64_t stop_offset = *offset + chunk_size;
@@ -980,6 +990,51 @@
             break;
         }
 
+        case FOURCC('s', 'c', 'h', 'm'):
+        {
+
+            *offset += chunk_size;
+            if (!mLastTrack) {
+                return ERROR_MALFORMED;
+            }
+
+            uint32_t scheme_type;
+            if (mDataSource->readAt(data_offset + 4, &scheme_type, 4) < 4) {
+                return ERROR_IO;
+            }
+            scheme_type = ntohl(scheme_type);
+            int32_t mode = kCryptoModeUnencrypted;
+            switch(scheme_type) {
+                case FOURCC('c', 'b', 'c', '1'):
+                {
+                    mode = kCryptoModeAesCbc;
+                    break;
+                }
+                case FOURCC('c', 'b', 'c', 's'):
+                {
+                    mode = kCryptoModeAesCbc;
+                    mLastTrack->subsample_encryption = true;
+                    break;
+                }
+                case FOURCC('c', 'e', 'n', 'c'):
+                {
+                    mode = kCryptoModeAesCtr;
+                    break;
+                }
+                case FOURCC('c', 'e', 'n', 's'):
+                {
+                    mode = kCryptoModeAesCtr;
+                    mLastTrack->subsample_encryption = true;
+                    break;
+                }
+            }
+            if (mode != kCryptoModeUnencrypted) {
+                mLastTrack->meta.setInt32(kKeyCryptoMode, mode);
+            }
+            break;
+        }
+
+
         case FOURCC('e', 'l', 's', 't'):
         {
             *offset += chunk_size;
@@ -1071,31 +1126,54 @@
             // tenc box contains 1 byte version, 3 byte flags, 3 byte default algorithm id, one byte
             // default IV size, 16 bytes default KeyID
             // (ISO 23001-7)
-            char buf[4];
+
+            uint8_t version;
+            if (mDataSource->readAt(data_offset, &version, sizeof(version))
+                    < (ssize_t)sizeof(version)) {
+                return ERROR_IO;
+            }
+
+            uint8_t buf[4];
             memset(buf, 0, 4);
             if (mDataSource->readAt(data_offset + 4, buf + 1, 3) < 3) {
                 return ERROR_IO;
             }
-            uint32_t defaultAlgorithmId = ntohl(*((int32_t*)buf));
-            if (defaultAlgorithmId > 1) {
-                // only 0 (clear) and 1 (AES-128) are valid
+
+            if (mLastTrack == NULL) {
                 return ERROR_MALFORMED;
             }
 
+            uint8_t defaultEncryptedByteBlock = 0;
+            uint8_t defaultSkipByteBlock = 0;
+            uint32_t defaultAlgorithmId = ntohl(*((int32_t*)buf));
+            if (version == 1) {
+                uint32_t pattern = buf[2];
+                defaultEncryptedByteBlock = pattern >> 4;
+                defaultSkipByteBlock = pattern & 0xf;
+                if (defaultEncryptedByteBlock == 0 && defaultSkipByteBlock == 0) {
+                    // use (1,0) to mean "encrypt everything"
+                    defaultEncryptedByteBlock = 1;
+                }
+            } else if (mLastTrack->subsample_encryption) {
+                ALOGW("subsample_encryption should be version 1");
+            } else if (defaultAlgorithmId > 1) {
+                // only 0 (clear) and 1 (AES-128) are valid
+                ALOGW("defaultAlgorithmId: %u is a reserved value", defaultAlgorithmId);
+                defaultAlgorithmId = 1;
+            }
+
             memset(buf, 0, 4);
             if (mDataSource->readAt(data_offset + 7, buf + 3, 1) < 1) {
                 return ERROR_IO;
             }
             uint32_t defaultIVSize = ntohl(*((int32_t*)buf));
 
-            if ((defaultAlgorithmId == 0 && defaultIVSize != 0) ||
-                    (defaultAlgorithmId != 0 && defaultIVSize == 0)) {
+            if (defaultAlgorithmId == 0 && defaultIVSize != 0) {
                 // only unencrypted data must have 0 IV size
                 return ERROR_MALFORMED;
             } else if (defaultIVSize != 0 &&
                     defaultIVSize != 8 &&
                     defaultIVSize != 16) {
-                // only supported sizes are 0, 8 and 16
                 return ERROR_MALFORMED;
             }
 
@@ -1105,12 +1183,41 @@
                 return ERROR_IO;
             }
 
-            if (mLastTrack == NULL)
-                return ERROR_MALFORMED;
+            sp<ABuffer> defaultConstantIv;
+            if (defaultAlgorithmId != 0 && defaultIVSize == 0) {
 
-            mLastTrack->meta.setInt32(kKeyCryptoMode, defaultAlgorithmId);
+                uint8_t ivlength;
+                if (mDataSource->readAt(data_offset + 24, &ivlength, sizeof(ivlength))
+                        < (ssize_t)sizeof(ivlength)) {
+                    return ERROR_IO;
+                }
+
+                if (ivlength != 8 && ivlength != 16) {
+                    ALOGW("unsupported IV length: %u", ivlength);
+                    return ERROR_MALFORMED;
+                }
+
+                defaultConstantIv = new ABuffer(ivlength);
+                if (mDataSource->readAt(data_offset + 25, defaultConstantIv->data(), ivlength)
+                        < (ssize_t)ivlength) {
+                    return ERROR_IO;
+                }
+
+                defaultConstantIv->setRange(0, ivlength);
+            }
+
+            int32_t tmpAlgorithmId;
+            if (!mLastTrack->meta.findInt32(kKeyCryptoMode, &tmpAlgorithmId)) {
+                mLastTrack->meta.setInt32(kKeyCryptoMode, defaultAlgorithmId);
+            }
+
             mLastTrack->meta.setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
             mLastTrack->meta.setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
+            mLastTrack->meta.setInt32(kKeyEncryptedByteBlock, defaultEncryptedByteBlock);
+            mLastTrack->meta.setInt32(kKeySkipByteBlock, defaultSkipByteBlock);
+            if (defaultConstantIv != NULL) {
+                mLastTrack->meta.setData(kKeyCryptoIV, 'dciv', defaultConstantIv->data(), defaultConstantIv->size());
+            }
             break;
         }
 
@@ -1610,7 +1717,10 @@
 
                 const char *mime;
                 CHECK(mLastTrack->meta.findCString(kKeyMIMEType, &mime));
-                if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+                if (!strncmp(mime, "audio/", 6)) {
+                    // for audio, use 128KB
+                    max_size = 1024 * 128;
+                } else if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
                         || !strcmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
                     // AVC & HEVC requires compression ratio of at least 2, and uses
                     // macroblocks
@@ -3828,6 +3938,8 @@
       mCurrentMoofOffset(firstMoofOffset),
       mNextMoofOffset(-1),
       mCurrentTime(0),
+      mDefaultEncryptedByteBlock(0),
+      mDefaultSkipByteBlock(0),
       mCurrentSampleInfoAllocSize(0),
       mCurrentSampleInfoSizes(NULL),
       mCurrentSampleInfoOffsetsAllocSize(0),
@@ -3858,6 +3970,9 @@
         memcpy(mCryptoKey, key, keysize);
     }
 
+    mFormat.findInt32(kKeyEncryptedByteBlock, &mDefaultEncryptedByteBlock);
+    mFormat.findInt32(kKeySkipByteBlock, &mDefaultSkipByteBlock);
+
     const char *mime;
     bool success = mFormat.findCString(kKeyMIMEType, &mime);
     CHECK(success);
@@ -3944,9 +4059,10 @@
     }
 
     // Allow up to kMaxBuffers, but not if the total exceeds kMaxBufferSize.
+    const size_t kInitialBuffers = 2;
     const size_t kMaxBuffers = 8;
-    const size_t buffers = min(kMaxBufferSize / max_size, kMaxBuffers);
-    mGroup = new MediaBufferGroup(buffers, max_size);
+    const size_t realMaxBuffers = min(kMaxBufferSize / max_size, kMaxBuffers);
+    mGroup = new MediaBufferGroup(kInitialBuffers, max_size, realMaxBuffers);
     mSrcBuffer = new (std::nothrow) uint8_t[max_size];
     if (mSrcBuffer == NULL) {
         // file probably specified a bad max size
@@ -4104,6 +4220,15 @@
             break;
         }
 
+        case FOURCC('s', 'e', 'n', 'c'): {
+            status_t err;
+            if ((err = parseSampleEncryption(data_offset)) != OK) {
+                return err;
+            }
+            *offset += chunk_size;
+            break;
+        }
+
         case FOURCC('m', 'd', 'a', 't'): {
             // parse DRM info if present
             ALOGV("MPEG4Source::parseChunk mdat");
@@ -4254,6 +4379,12 @@
     off64_t drmoffset = mCurrentSampleInfoOffsets[0]; // from moof
 
     drmoffset += mCurrentMoofOffset;
+
+    return parseClearEncryptedSizes(drmoffset, false, 0);
+}
+
+status_t MPEG4Source::parseClearEncryptedSizes(off64_t offset, bool isSubsampleEncryption, uint32_t flags) {
+
     int ivlength;
     CHECK(mFormat.findInt32(kKeyCryptoDefaultIVSize, &ivlength));
 
@@ -4262,42 +4393,61 @@
         ALOGW("unsupported IV length: %d", ivlength);
         return ERROR_MALFORMED;
     }
+
+    uint32_t sampleCount = mCurrentSampleInfoCount;
+    if (isSubsampleEncryption) {
+        if (!mDataSource->getUInt32(offset, &sampleCount)) {
+            return ERROR_IO;
+        }
+        offset += 4;
+    }
+
     // read CencSampleAuxiliaryDataFormats
-    for (size_t i = 0; i < mCurrentSampleInfoCount; i++) {
+    for (size_t i = 0; i < sampleCount; i++) {
         if (i >= mCurrentSamples.size()) {
             ALOGW("too few samples");
             break;
         }
         Sample *smpl = &mCurrentSamples.editItemAt(i);
+        if (!smpl->clearsizes.isEmpty()) {
+            continue;
+        }
 
         memset(smpl->iv, 0, 16);
-        if (mDataSource->readAt(drmoffset, smpl->iv, ivlength) != ivlength) {
+        if (mDataSource->readAt(offset, smpl->iv, ivlength) != ivlength) {
             return ERROR_IO;
         }
 
-        drmoffset += ivlength;
+        offset += ivlength;
 
-        int32_t smplinfosize = mCurrentDefaultSampleInfoSize;
-        if (smplinfosize == 0) {
-            smplinfosize = mCurrentSampleInfoSizes[i];
+        bool readSubsamples;
+        if (isSubsampleEncryption) {
+            readSubsamples = flags & 2;
+        } else {
+            int32_t smplinfosize = mCurrentDefaultSampleInfoSize;
+            if (smplinfosize == 0) {
+                smplinfosize = mCurrentSampleInfoSizes[i];
+            }
+            readSubsamples = smplinfosize > ivlength;
         }
-        if (smplinfosize > ivlength) {
+
+        if (readSubsamples) {
             uint16_t numsubsamples;
-            if (!mDataSource->getUInt16(drmoffset, &numsubsamples)) {
+            if (!mDataSource->getUInt16(offset, &numsubsamples)) {
                 return ERROR_IO;
             }
-            drmoffset += 2;
+            offset += 2;
             for (size_t j = 0; j < numsubsamples; j++) {
                 uint16_t numclear;
                 uint32_t numencrypted;
-                if (!mDataSource->getUInt16(drmoffset, &numclear)) {
+                if (!mDataSource->getUInt16(offset, &numclear)) {
                     return ERROR_IO;
                 }
-                drmoffset += 2;
-                if (!mDataSource->getUInt32(drmoffset, &numencrypted)) {
+                offset += 2;
+                if (!mDataSource->getUInt32(offset, &numencrypted)) {
                     return ERROR_IO;
                 }
-                drmoffset += 4;
+                offset += 4;
                 smpl->clearsizes.add(numclear);
                 smpl->encryptedsizes.add(numencrypted);
             }
@@ -4307,10 +4457,17 @@
         }
     }
 
-
     return OK;
 }
 
+status_t MPEG4Source::parseSampleEncryption(off64_t offset) {
+    uint32_t flags;
+    if (!mDataSource->getUInt32(offset, &flags)) { // actually version + flags
+        return ERROR_MALFORMED;
+    }
+    return parseClearEncryptedSizes(offset + 4, true, flags);
+}
+
 status_t MPEG4Source::parseTrackFragmentHeader(off64_t offset, off64_t size) {
 
     if (size < 8) {
@@ -4562,6 +4719,7 @@
         tmp.size = sampleSize;
         tmp.duration = sampleDuration;
         tmp.compositionOffset = sampleCtsOffset;
+        memset(tmp.iv, 0, sizeof(tmp.iv));
         mCurrentSamples.add(tmp);
 
         dataOffset += sampleSize;
@@ -5125,10 +5283,22 @@
                 smpl->clearsizes.array(), smpl->clearsizes.size() * 4);
         bufmeta.setData(kKeyEncryptedSizes, 0,
                 smpl->encryptedsizes.array(), smpl->encryptedsizes.size() * 4);
-        bufmeta.setData(kKeyCryptoIV, 0, smpl->iv, 16); // use 16 or the actual size?
         bufmeta.setInt32(kKeyCryptoDefaultIVSize, mDefaultIVSize);
         bufmeta.setInt32(kKeyCryptoMode, mCryptoMode);
         bufmeta.setData(kKeyCryptoKey, 0, mCryptoKey, 16);
+        bufmeta.setInt32(kKeyEncryptedByteBlock, mDefaultEncryptedByteBlock);
+        bufmeta.setInt32(kKeySkipByteBlock, mDefaultSkipByteBlock);
+
+        uint32_t type = 0;
+        const void *iv = NULL;
+        size_t ivlength = 0;
+        if (!mFormat.findData(
+                kKeyCryptoIV, &type, &iv, &ivlength)) {
+            iv = smpl->iv;
+            ivlength = 16; // use 16 or the actual size?
+        }
+        bufmeta.setData(kKeyCryptoIV, 0, iv, ivlength);
+
     }
 
     if ((!mIsAVC && !mIsHEVC)|| mWantsNALFragments) {
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 6148334..ed70aa7 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -85,6 +85,7 @@
         bool has_elst;
         int64_t elst_media_time;
         uint64_t elst_segment_duration;
+        bool subsample_encryption;
     };
 
     Vector<SidxEntry> mSidxEntries;
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index b012b5d..5e4a592 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -23,11 +23,11 @@
         "liblog",
         "libmediaextractor",
         "libstagefright_foundation",
-        "libutils",
     ],
 
     static_libs: [
         "libstagefright_mpeg2support",
+        "libutils",
     ],
 
     name: "libmpeg2extractor",
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index 2c09a5f..7c6fc75 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -10,11 +10,11 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
-        "libstagefright_foundation",
-        "libutils",
     ],
 
     static_libs: [
+        "libstagefright_foundation",
+        "libutils",
         "libvorbisidec",
     ],
 
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index 17836bb..067933e 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -9,11 +9,11 @@
     shared_libs: [
         "liblog",
         "libmediaextractor",
-        "libstagefright_foundation",
     ],
 
     static_libs: [
         "libfifo",
+        "libstagefright_foundation",
     ],
 
     name: "libwavextractor",
diff --git a/media/img_utils/include/img_utils/DngUtils.h b/media/img_utils/include/img_utils/DngUtils.h
index 1d8df9c..de8f120 100644
--- a/media/img_utils/include/img_utils/DngUtils.h
+++ b/media/img_utils/include/img_utils/DngUtils.h
@@ -39,11 +39,16 @@
  */
 class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
     public:
+        // Note that the Adobe DNG 1.4 spec for Bayer phase (defined for the
+        // FixBadPixelsConstant and FixBadPixelsList opcodes) is incorrect. It's
+        // inconsistent with the DNG SDK (cf. dng_negative::SetBayerMosaic and
+        // dng_opcode_FixBadPixelsList::IsGreen), and Adobe confirms that the
+        // spec should be updated to match the SDK.
         enum CfaLayout {
-            CFA_RGGB = 0,
-            CFA_GRBG,
-            CFA_GBRG,
+            CFA_GRBG = 0,
+            CFA_RGGB,
             CFA_BGGR,
+            CFA_GBRG,
         };
 
         OpcodeListBuilder();
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index 67ec244..9ac7e2a 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -302,29 +302,14 @@
     normalizedOCX = CLAMP(normalizedOCX, 0, 1);
     normalizedOCY = CLAMP(normalizedOCY, 0, 1);
 
-    // Conversion factors from Camera2 K factors to DNG spec. K factors:
-    //
-    //      Note: these are necessary because our unit system assumes a
-    //      normalized max radius of sqrt(2), whereas the DNG spec's
-    //      WarpRectilinear opcode assumes a normalized max radius of 1.
-    //      Thus, each K coefficient must include the domain scaling
-    //      factor (the DNG domain is scaled by sqrt(2) to emulate the
-    //      domain used by the Camera2 specification).
-
-    const double c_0 = sqrt(2);
-    const double c_1 = 2 * sqrt(2);
-    const double c_2 = 4 * sqrt(2);
-    const double c_3 = 8 * sqrt(2);
-    const double c_4 = 2;
-    const double c_5 = 2;
-
-    const double coeffs[] = { c_0 * kCoeffs[0],
-                              c_1 * kCoeffs[1],
-                              c_2 * kCoeffs[2],
-                              c_3 * kCoeffs[3],
-                              c_4 * kCoeffs[4],
-                              c_5 * kCoeffs[5] };
-
+    double coeffs[6] = {
+        kCoeffs[0],
+        kCoeffs[1],
+        kCoeffs[2],
+        kCoeffs[3],
+        kCoeffs[4],
+        kCoeffs[5]
+    };
 
     return addWarpRectilinear(/*numPlanes*/1,
                               /*opticalCenterX*/normalizedOCX,
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
index 986158f..d10f812 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -26,30 +26,39 @@
 #include "AAudioExampleUtils.h"
 #include "AAudioSimpleRecorder.h"
 
-#define NUM_SECONDS           5
-
-int main(int argc, char **argv)
+int main(int argc, const char **argv)
 {
-    (void)argc; // unused
-    AAudioSimpleRecorder recorder;
-    PeakTrackerData_t myData = {0.0};
-    aaudio_result_t result;
+    AAudioArgsParser      argParser;
+    AAudioSimpleRecorder  recorder;
+    PeakTrackerData_t     myData = {0.0};
+    AAudioStream         *aaudioStream = nullptr;
+    aaudio_result_t       result;
     aaudio_stream_state_t state;
+
+    int       loopsNeeded = 0;
     const int displayRateHz = 20; // arbitrary
-    const int loopsNeeded = NUM_SECONDS * displayRateHz;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
     setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
-    printf("%s - Display audio input using an AAudio callback, V0.1.2\n", argv[0]);
+    printf("%s - Display audio input using an AAudio callback, V0.1.3\n", argv[0]);
 
-    result = recorder.open(2, 48000, AAUDIO_FORMAT_PCM_I16,
-                       SimpleRecorderDataCallbackProc, SimpleRecorderErrorCallbackProc, &myData);
+    if (argParser.parseArgs(argc, argv)) {
+        return EXIT_FAILURE;
+    }
+
+    result = recorder.open(argParser,
+                           SimpleRecorderDataCallbackProc,
+                           SimpleRecorderErrorCallbackProc,
+                           &myData);
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  recorder.open() returned %d\n", result);
         printf("IMPORTANT - Did you remember to enter:   adb root\n");
         goto error;
     }
+    aaudioStream = recorder.getStream();
+    argParser.compareWithStream(aaudioStream);
+
     printf("recorder.getFramesPerSecond() = %d\n", recorder.getFramesPerSecond());
     printf("recorder.getSamplesPerFrame() = %d\n", recorder.getSamplesPerFrame());
 
@@ -59,7 +68,9 @@
         goto error;
     }
 
-    printf("Sleep for %d seconds while audio record in a callback thread.\n", NUM_SECONDS);
+    printf("Sleep for %d seconds while audio record in a callback thread.\n",
+           argParser.getDurationSeconds());
+    loopsNeeded = argParser.getDurationSeconds() * displayRateHz;
     for (int i = 0; i < loopsNeeded; i++)
     {
         const struct timespec request = { .tv_sec = 0,
@@ -68,7 +79,7 @@
         printf("%08d: ", (int)recorder.getFramesRead());
         displayPeakLevel(myData.peakLevel);
 
-        result = AAudioStream_waitForStateChange(recorder.getStream(),
+        result = AAudioStream_waitForStateChange(aaudioStream,
                                                  AAUDIO_STREAM_STATE_CLOSED,
                                                  &state,
                                                  0);
@@ -94,7 +105,8 @@
         goto error;
     }
 
-    printf("Sleep for %d seconds while audio records in a callback thread.\n", NUM_SECONDS);
+    printf("Sleep for %d seconds while audio records in a callback thread.\n",
+           argParser.getDurationSeconds());
     for (int i = 0; i < loopsNeeded; i++)
     {
         const struct timespec request = { .tv_sec = 0,
@@ -103,13 +115,14 @@
         printf("%08d: ", (int)recorder.getFramesRead());
         displayPeakLevel(myData.peakLevel);
 
-        state = AAudioStream_getState(recorder.getStream());
+        state = AAudioStream_getState(aaudioStream);
         if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
             printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
             break;
         }
     }
     printf("Woke up now.\n");
+    argParser.compareWithStream(aaudioStream);
 
     result = recorder.stop();
     if (result != AAUDIO_OK) {
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
index 1e282d1..ef9a753 100644
--- a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
+++ b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
@@ -41,12 +41,16 @@
 #define MAX_ZEROTH_PARTIAL_BINS  40
 constexpr double MAX_ECHO_GAIN = 10.0; // based on experiments, otherwise autocorrelation too noisy
 
+// A narrow impulse seems to have better immunity against over estimating the
+// latency due to detecting subharmonics by the auto-correlator.
 static const float s_Impulse[] = {
-        0.0f, 0.0f, 0.0f, 0.0f, 0.2f, // silence on each side of the impulse
-        0.5f, 0.9999f, 0.0f, -0.9999, -0.5f, // bipolar
-        -0.2f, 0.0f, 0.0f, 0.0f, 0.0f
+        0.0f, 0.0f, 0.0f, 0.0f, 0.3f, // silence on each side of the impulse
+        0.99f, 0.0f, -0.99f, // bipolar with one zero crossing in middle
+        -0.3f, 0.0f, 0.0f, 0.0f, 0.0f
 };
 
+constexpr int32_t kImpulseSizeInFrames = (int32_t)(sizeof(s_Impulse) / sizeof(s_Impulse[0]));
+
 class PseudoRandom {
 public:
     PseudoRandom() {}
@@ -495,16 +499,26 @@
     }
 
     void printStatus() override {
-        printf("state = %d, echo gain = %f ", mState, mEchoGain);
+        printf("st = %d, echo gain = %f ", mState, mEchoGain);
     }
 
-    static void sendImpulse(float *outputData, int outputChannelCount) {
-        for (float sample : s_Impulse) {
+    void sendImpulses(float *outputData, int outputChannelCount, int numFrames) {
+        while (numFrames-- > 0) {
+            float sample = s_Impulse[mSampleIndex++];
+            if (mSampleIndex >= kImpulseSizeInFrames) {
+                mSampleIndex = 0;
+            }
+
             *outputData = sample;
             outputData += outputChannelCount;
         }
     }
 
+    void sendOneImpulse(float *outputData, int outputChannelCount) {
+        mSampleIndex = 0;
+        sendImpulses(outputData, outputChannelCount, kImpulseSizeInFrames);
+    }
+
     void process(float *inputData, int inputChannelCount,
                  float *outputData, int outputChannelCount,
                  int numFrames) override {
@@ -530,7 +544,7 @@
                 break;
 
             case STATE_MEASURING_GAIN:
-                sendImpulse(outputData, outputChannelCount);
+                sendImpulses(outputData, outputChannelCount, numFrames);
                 peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
                 // If we get several in a row then go to next state.
                 if (peak > mPulseThreshold) {
@@ -548,7 +562,7 @@
                             nextState = STATE_WAITING_FOR_SILENCE;
                         }
                     }
-                } else {
+                } else if (numFrames > kImpulseSizeInFrames){ // ignore short callbacks
                     mDownCounter = 8;
                 }
                 break;
@@ -574,7 +588,7 @@
 
             case STATE_SENDING_PULSE:
                 mAudioRecording.write(inputData, inputChannelCount, numFrames);
-                sendImpulse(outputData, outputChannelCount);
+                sendOneImpulse(outputData, outputChannelCount);
                 nextState = STATE_GATHERING_ECHOS;
                 //printf("%5d: switch to STATE_GATHERING_ECHOS\n", mLoopCounter);
                 break;
@@ -634,8 +648,9 @@
         STATE_FAILED
     };
 
-    int             mDownCounter = 500;
-    int             mLoopCounter = 0;
+    int32_t         mDownCounter = 500;
+    int32_t         mLoopCounter = 0;
+    int32_t         mSampleIndex = 0;
     float           mPulseThreshold = 0.02f;
     float           mSilenceThreshold = 0.002f;
     float           mMeasuredLoopGain = 0.0f;
@@ -670,7 +685,7 @@
         printf(LOOPBACK_RESULT_TAG "phase.offset       = %7.5f\n", mPhaseOffset);
         printf(LOOPBACK_RESULT_TAG "ref.phase          = %7.5f\n", mPhase);
         printf(LOOPBACK_RESULT_TAG "frames.accumulated = %6d\n", mFramesAccumulated);
-        printf(LOOPBACK_RESULT_TAG "sine.period        = %6d\n", mPeriod);
+        printf(LOOPBACK_RESULT_TAG "sine.period        = %6d\n", mSinePeriod);
         printf(LOOPBACK_RESULT_TAG "test.state         = %6d\n", mState);
         printf(LOOPBACK_RESULT_TAG "frame.count        = %6d\n", mFrameCounter);
         // Did we ever get a lock?
@@ -684,7 +699,7 @@
     }
 
     void printStatus() override {
-        printf("  state = %d, glitches = %d,", mState, mGlitchCount);
+        printf("st = %d, #gl = %3d,", mState, mGlitchCount);
     }
 
     double calculateMagnitude(double *phasePtr = NULL) {
@@ -709,6 +724,8 @@
     void process(float *inputData, int inputChannelCount,
                  float *outputData, int outputChannelCount,
                  int numFrames) override {
+        mProcessCount++;
+
         float peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
         if (peak > mPeakAmplitude) {
             mPeakAmplitude = peak;
@@ -720,6 +737,7 @@
             float sinOut = sinf(mPhase);
 
             switch (mState) {
+                case STATE_IDLE:
                 case STATE_IMMUNE:
                 case STATE_WAITING_FOR_SIGNAL:
                     break;
@@ -728,7 +746,7 @@
                     mCosAccumulator += sample * cosf(mPhase);
                     mFramesAccumulated++;
                     // Must be a multiple of the period or the calculation will not be accurate.
-                    if (mFramesAccumulated == mPeriod * 4) {
+                    if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
                         mPhaseOffset = 0.0;
                         mMagnitude = calculateMagnitude(&mPhaseOffset);
                         if (mMagnitude > mThreshold) {
@@ -754,7 +772,22 @@
                         //       mFrameCounter, mGlitchCount, predicted, sample);
                         mState = STATE_IMMUNE;
                         //printf("%5d: switch to STATE_IMMUNE\n", mFrameCounter);
-                        mDownCounter = mPeriod;  // Set duration of IMMUNE state.
+                        mDownCounter = mSinePeriod;  // Set duration of IMMUNE state.
+                    }
+
+                    // Track incoming signal and slowly adjust magnitude to account
+                    // for drift in the DRC or AGC.
+                    mSinAccumulator += sample * sinOut;
+                    mCosAccumulator += sample * cosf(mPhase);
+                    mFramesAccumulated++;
+                    // Must be a multiple of the period or the calculation will not be accurate.
+                    if (mFramesAccumulated == mSinePeriod) {
+                        const double coefficient = 0.1;
+                        double phaseOffset = 0.0;
+                        double magnitude = calculateMagnitude(&phaseOffset);
+                        // One pole averaging filter.
+                        mMagnitude = (mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient);
+                        resetAccumulator();
                     }
                 } break;
             }
@@ -775,6 +808,9 @@
 
         // Do these once per buffer.
         switch (mState) {
+            case STATE_IDLE:
+                mState = STATE_IMMUNE; // so we can tell when
+                break;
             case STATE_IMMUNE:
                 mDownCounter -= numFrames;
                 if (mDownCounter <= 0) {
@@ -805,21 +841,29 @@
     void reset() override {
         mGlitchCount = 0;
         mState = STATE_IMMUNE;
-        mPhaseIncrement = 2.0 * M_PI / mPeriod;
-        printf("phaseInc = %f for period %d\n", mPhaseIncrement, mPeriod);
+        mDownCounter = IMMUNE_FRAME_COUNT;
+        mPhaseIncrement = 2.0 * M_PI / mSinePeriod;
+        printf("phaseInc = %f for period %d\n", mPhaseIncrement, mSinePeriod);
         resetAccumulator();
+        mProcessCount = 0;
     }
 
 private:
 
     enum sine_state_t {
+        STATE_IDLE,
         STATE_IMMUNE,
         STATE_WAITING_FOR_SIGNAL,
         STATE_WAITING_FOR_LOCK,
         STATE_LOCKED
     };
 
-    int     mPeriod = 79;
+    enum constants {
+        IMMUNE_FRAME_COUNT = 48 * 500,
+        PERIODS_NEEDED_FOR_LOCK = 8
+    };
+
+    int     mSinePeriod = 79;
     double  mPhaseIncrement = 0.0;
     double  mPhase = 0.0;
     double  mPhaseOffset = 0.0;
@@ -828,18 +872,19 @@
     double  mThreshold = 0.005;
     double  mTolerance = 0.01;
     int32_t mFramesAccumulated = 0;
+    int32_t mProcessCount = 0;
     double  mSinAccumulator = 0.0;
     double  mCosAccumulator = 0.0;
     int32_t mGlitchCount = 0;
     double  mPeakAmplitude = 0.0;
-    int     mDownCounter = 4000;
+    int     mDownCounter = IMMUNE_FRAME_COUNT;
     int32_t mFrameCounter = 0;
     float   mOutputAmplitude = 0.75;
 
     PseudoRandom  mWhiteNoise;
     float   mNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
 
-    sine_state_t  mState = STATE_IMMUNE;
+    sine_state_t  mState = STATE_IDLE;
 };
 
 
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 026ff0f..91ebf73 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -38,22 +38,31 @@
 // Tag for machine readable results as property = value pairs
 #define RESULT_TAG              "RESULT: "
 #define NUM_SECONDS             5
+#define PERIOD_MILLIS           1000
 #define NUM_INPUT_CHANNELS      1
 #define FILENAME_ALL            "/data/loopback_all.wav"
 #define FILENAME_ECHOS          "/data/loopback_echos.wav"
-#define APP_VERSION             "0.1.22"
+#define APP_VERSION             "0.2.04"
+
+constexpr int kNumCallbacksToDrain   = 20;
+constexpr int kNumCallbacksToDiscard = 20;
 
 struct LoopbackData {
     AAudioStream      *inputStream = nullptr;
     int32_t            inputFramesMaximum = 0;
-    int16_t           *inputData = nullptr;
-    int16_t            peakShort = 0;
-    float             *conversionBuffer = nullptr;
+    int16_t           *inputShortData = nullptr;
+    float             *inputFloatData = nullptr;
+    aaudio_format_t    actualInputFormat = AAUDIO_FORMAT_INVALID;
     int32_t            actualInputChannelCount = 0;
     int32_t            actualOutputChannelCount = 0;
-    int32_t            inputBuffersToDiscard = 10;
+    int32_t            numCallbacksToDrain = kNumCallbacksToDrain;
+    int32_t            numCallbacksToDiscard = kNumCallbacksToDiscard;
     int32_t            minNumFrames = INT32_MAX;
     int32_t            maxNumFrames = 0;
+    int32_t            insufficientReadCount = 0;
+    int32_t            insufficientReadFrames = 0;
+    int32_t            framesReadTotal = 0;
+    int32_t            framesWrittenTotal = 0;
     bool               isDone = false;
 
     aaudio_result_t    inputError = AAUDIO_OK;
@@ -68,7 +77,7 @@
 static void convertPcm16ToFloat(const int16_t *source,
                                 float *destination,
                                 int32_t numSamples) {
-    const float scaler = 1.0f / 32768.0f;
+    constexpr float scaler = 1.0f / 32768.0f;
     for (int i = 0; i < numSamples; i++) {
         destination[i] = source[i] * scaler;
     }
@@ -78,6 +87,31 @@
 // ========================= CALLBACK =================================================
 // ====================================================================================
 // Callback function that fills the audio output buffer.
+
+static int32_t readFormattedData(LoopbackData *myData, int32_t numFrames) {
+    int32_t framesRead = AAUDIO_ERROR_INVALID_FORMAT;
+    if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+        framesRead = AAudioStream_read(myData->inputStream, myData->inputShortData,
+                                       numFrames,
+                                       0 /* timeoutNanoseconds */);
+    } else if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+        framesRead = AAudioStream_read(myData->inputStream, myData->inputFloatData,
+                                       numFrames,
+                                       0 /* timeoutNanoseconds */);
+    } else {
+        printf("ERROR actualInputFormat = %d\n", myData->actualInputFormat);
+        assert(false);
+    }
+    if (framesRead < 0) {
+        myData->inputError = framesRead;
+        printf("ERROR in read = %d = %s\n", framesRead,
+               AAudio_convertResultToText(framesRead));
+    } else {
+        myData->framesReadTotal += framesRead;
+    }
+    return framesRead;
+}
+
 static aaudio_data_callback_result_t MyDataCallbackProc(
         AAudioStream *outputStream,
         void *userData,
@@ -90,7 +124,7 @@
     float  *outputData = (float  *) audioData;
 
     // Read audio data from the input stream.
-    int32_t framesRead;
+    int32_t actualFramesRead;
 
     if (numFrames > myData->inputFramesMaximum) {
         myData->inputError = AAUDIO_ERROR_OUT_OF_RANGE;
@@ -104,46 +138,86 @@
         myData->minNumFrames = numFrames;
     }
 
-    if (myData->inputBuffersToDiscard > 0) {
+    // Silence the output.
+    int32_t numBytes = numFrames * myData->actualOutputChannelCount * sizeof(float);
+    memset(audioData, 0 /* value */, numBytes);
+
+    if (myData->numCallbacksToDrain > 0) {
         // Drain the input.
+        int32_t totalFramesRead = 0;
         do {
-            framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
-                                       numFrames, 0);
-            if (framesRead < 0) {
-                myData->inputError = framesRead;
-                printf("ERROR in read = %d", framesRead);
-                result = AAUDIO_CALLBACK_RESULT_STOP;
-            } else if (framesRead > 0) {
-                myData->inputBuffersToDiscard--;
+            actualFramesRead = readFormattedData(myData, numFrames);
+            if (actualFramesRead) {
+                totalFramesRead += actualFramesRead;
             }
-        } while(framesRead > 0);
-    } else {
-        framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
-                                       numFrames, 0);
-        if (framesRead < 0) {
-            myData->inputError = framesRead;
-            printf("ERROR in read = %d", framesRead);
+            // Ignore errors because input stream may not be started yet.
+        } while (actualFramesRead > 0);
+        // Only counts if we actually got some data.
+        if (totalFramesRead > 0) {
+            myData->numCallbacksToDrain--;
+        }
+
+    } else if (myData->numCallbacksToDiscard > 0) {
+        // Ignore. Allow the input to fill back up to equilibrium with the output.
+        actualFramesRead = readFormattedData(myData, numFrames);
+        if (actualFramesRead < 0) {
             result = AAUDIO_CALLBACK_RESULT_STOP;
-        } else if (framesRead > 0) {
+        }
+        myData->numCallbacksToDiscard--;
 
-            myData->audioRecording.write(myData->inputData,
-                                        myData->actualInputChannelCount,
-                                         framesRead);
+    } else {
 
-            int32_t numSamples = framesRead * myData->actualInputChannelCount;
-            convertPcm16ToFloat(myData->inputData, myData->conversionBuffer, numSamples);
+        int32_t numInputBytes = numFrames * myData->actualInputChannelCount * sizeof(float);
+        memset(myData->inputFloatData, 0 /* value */, numInputBytes);
 
-            myData->loopbackProcessor->process(myData->conversionBuffer,
-                                              myData->actualInputChannelCount,
-                                              outputData,
-                                              myData->actualOutputChannelCount,
-                                              framesRead);
+        // Process data after equilibrium.
+        int64_t inputFramesWritten = AAudioStream_getFramesWritten(myData->inputStream);
+        int64_t inputFramesRead = AAudioStream_getFramesRead(myData->inputStream);
+        int64_t framesAvailable = inputFramesWritten - inputFramesRead;
+        actualFramesRead = readFormattedData(myData, numFrames);
+        if (actualFramesRead < 0) {
+            result = AAUDIO_CALLBACK_RESULT_STOP;
+        } else {
+
+            if (actualFramesRead < numFrames) {
+                if(actualFramesRead < (int32_t) framesAvailable) {
+                    printf("insufficient but numFrames = %d"
+                                   ", actualFramesRead = %d"
+                                   ", inputFramesWritten = %d"
+                                   ", inputFramesRead = %d"
+                                   ", available = %d\n",
+                           numFrames,
+                           actualFramesRead,
+                           (int) inputFramesWritten,
+                           (int) inputFramesRead,
+                           (int) framesAvailable);
+                }
+                myData->insufficientReadCount++;
+                myData->insufficientReadFrames += numFrames - actualFramesRead; // deficit
+            }
+
+            int32_t numSamples = actualFramesRead * myData->actualInputChannelCount;
+
+            if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+                convertPcm16ToFloat(myData->inputShortData, myData->inputFloatData, numSamples);
+            }
+            // Save for later.
+            myData->audioRecording.write(myData->inputFloatData,
+                                         myData->actualInputChannelCount,
+                                         numFrames);
+            // Analyze the data.
+            myData->loopbackProcessor->process(myData->inputFloatData,
+                                               myData->actualInputChannelCount,
+                                               outputData,
+                                               myData->actualOutputChannelCount,
+                                               numFrames);
             myData->isDone = myData->loopbackProcessor->isDone();
             if (myData->isDone) {
                 result = AAUDIO_CALLBACK_RESULT_STOP;
             }
         }
     }
+    myData->framesWrittenTotal += numFrames;
 
     return result;
 }
@@ -160,7 +234,9 @@
 static void usage() {
     printf("Usage: aaudio_loopback [OPTION]...\n\n");
     AAudioArgsParser::usage();
+    printf("      -B{frames}        input capacity in frames\n");
     printf("      -C{channels}      number of input channels\n");
+    printf("      -F{0,1,2}         input format, 1=I16, 2=FLOAT\n");
     printf("      -g{gain}          recirculating loopback gain\n");
     printf("      -P{inPerf}        set input AAUDIO_PERFORMANCE_MODE*\n");
     printf("          n for _NONE\n");
@@ -236,9 +312,10 @@
         }
     }
     float gain = 0.98f / maxSample;
+
     for (int32_t i = start; i < end; i++) {
         float sample = data[i];
-        printf("%5.3f ", sample); // actual value
+        printf("%6d: %7.4f ", i, sample); // actual value
         sample *= gain;
         printAudioScope(sample);
     }
@@ -254,23 +331,24 @@
     AAudioSimplePlayer    player;
     AAudioSimpleRecorder  recorder;
     LoopbackData          loopbackData;
-    AAudioStream         *outputStream = nullptr;
+    AAudioStream         *inputStream                = nullptr;
+    AAudioStream         *outputStream               = nullptr;
 
     aaudio_result_t       result = AAUDIO_OK;
-    aaudio_sharing_mode_t requestedInputSharingMode     = AAUDIO_SHARING_MODE_SHARED;
+    aaudio_sharing_mode_t requestedInputSharingMode  = AAUDIO_SHARING_MODE_SHARED;
     int                   requestedInputChannelCount = NUM_INPUT_CHANNELS;
-    const aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_PCM_I16;
-    const aaudio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
-    aaudio_format_t       actualInputFormat;
-    aaudio_format_t       actualOutputFormat;
-    aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
-    int32_t               actualSampleRate = 0;
+    aaudio_format_t       requestedInputFormat       = AAUDIO_FORMAT_UNSPECIFIED;
+    int32_t               requestedInputCapacity     = -1;
+    aaudio_performance_mode_t inputPerformanceLevel  = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
 
-    int testMode = TEST_ECHO_LATENCY;
-    double gain = 1.0;
+    int32_t               outputFramesPerBurst = 0;
 
-    int32_t framesPerBurst = 0;
-    float *outputData = NULL;
+    aaudio_format_t       actualOutputFormat         = AAUDIO_FORMAT_INVALID;
+    int32_t               actualSampleRate           = 0;
+    int                   written                    = 0;
+
+    int                   testMode                   = TEST_ECHO_LATENCY;
+    double                gain                       = 1.0;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
@@ -285,9 +363,15 @@
             if (arg[0] == '-') {
                 char option = arg[1];
                 switch (option) {
+                    case 'B':
+                        requestedInputCapacity = atoi(&arg[2]);
+                        break;
                     case 'C':
                         requestedInputChannelCount = atoi(&arg[2]);
                         break;
+                    case 'F':
+                        requestedInputFormat = atoi(&arg[2]);
+                        break;
                     case 'g':
                         gain = atof(&arg[2]);
                         break;
@@ -320,7 +404,9 @@
     }
 
     int32_t requestedDuration = argParser.getDurationSeconds();
-    int32_t recordingDuration = std::min(60, requestedDuration);
+    int32_t requestedDurationMillis = requestedDuration * MILLIS_PER_SECOND;
+    int32_t timeMillis = 0;
+    int32_t recordingDuration = std::min(60 * 5, requestedDuration);
 
     switch(testMode) {
         case TEST_SINE_MAGNITUDE:
@@ -346,59 +432,96 @@
     }
 
     printf("OUTPUT stream ----------------------------------------\n");
-    argParser.setFormat(requestedOutputFormat);
     result = player.open(argParser, MyDataCallbackProc, MyErrorCallbackProc, &loopbackData);
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
         exit(1);
     }
     outputStream = player.getStream();
-    argParser.compareWithStream(outputStream);
 
     actualOutputFormat = AAudioStream_getFormat(outputStream);
-    assert(actualOutputFormat == AAUDIO_FORMAT_PCM_FLOAT);
+    if (actualOutputFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+        fprintf(stderr, "ERROR - only AAUDIO_FORMAT_PCM_FLOAT supported\n");
+        exit(1);
+    }
 
     actualSampleRate = AAudioStream_getSampleRate(outputStream);
     loopbackData.audioRecording.allocate(recordingDuration * actualSampleRate);
     loopbackData.audioRecording.setSampleRate(actualSampleRate);
+    outputFramesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
 
-    printf("INPUT stream ----------------------------------------\n");
+    argParser.compareWithStream(outputStream);
+
+    printf("INPUT  stream ----------------------------------------\n");
     // Use different parameters for the input.
     argParser.setNumberOfBursts(AAUDIO_UNSPECIFIED);
     argParser.setFormat(requestedInputFormat);
     argParser.setPerformanceMode(inputPerformanceLevel);
     argParser.setChannelCount(requestedInputChannelCount);
     argParser.setSharingMode(requestedInputSharingMode);
+
+    // Make sure the input buffer has plenty of capacity.
+    // Extra capacity on input should not increase latency if we keep it drained.
+    int32_t inputBufferCapacity = requestedInputCapacity;
+    if (inputBufferCapacity < 0) {
+        int32_t outputBufferCapacity = AAudioStream_getBufferCapacityInFrames(outputStream);
+        inputBufferCapacity = 2 * outputBufferCapacity;
+    }
+    argParser.setBufferCapacity(inputBufferCapacity);
+
     result = recorder.open(argParser);
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  recorder.open() returned %d\n", result);
         goto finish;
     }
-    loopbackData.inputStream = recorder.getStream();
-    argParser.compareWithStream(loopbackData.inputStream);
+    inputStream = loopbackData.inputStream = recorder.getStream();
 
-    // This is the number of frames that are read in one chunk by a DMA controller
-    // or a DSP or a mixer.
-    framesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
+    {
+        int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
+        result = AAudioStream_setBufferSizeInFrames(inputStream, actualCapacity);
+        if (result < 0) {
+            fprintf(stderr, "ERROR -  AAudioStream_setBufferSizeInFrames() returned %d\n", result);
+            goto finish;
+        } else {}
+    }
 
-    actualInputFormat = AAudioStream_getFormat(outputStream);
-    assert(actualInputFormat == AAUDIO_FORMAT_PCM_I16);
+    argParser.compareWithStream(inputStream);
 
+    // If the input stream is too small then we cannot satisfy the output callback.
+    {
+        int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
+        if (actualCapacity < 2 * outputFramesPerBurst) {
+            fprintf(stderr, "ERROR - input capacity < 2 * outputFramesPerBurst\n");
+            goto finish;
+        }
+    }
+
+    // ------- Setup loopbackData -----------------------------
+    loopbackData.actualInputFormat = AAudioStream_getFormat(inputStream);
 
     loopbackData.actualInputChannelCount = recorder.getChannelCount();
     loopbackData.actualOutputChannelCount = player.getChannelCount();
 
     // Allocate a buffer for the audio data.
-    loopbackData.inputFramesMaximum = 32 * framesPerBurst;
-    loopbackData.inputBuffersToDiscard = 200;
+    loopbackData.inputFramesMaximum = 32 * AAudioStream_getFramesPerBurst(inputStream);
 
-    loopbackData.inputData = new int16_t[loopbackData.inputFramesMaximum
-                                         * loopbackData.actualInputChannelCount];
-    loopbackData.conversionBuffer = new float[loopbackData.inputFramesMaximum *
-                                              loopbackData.actualInputChannelCount];
+    if (loopbackData.actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+        loopbackData.inputShortData = new int16_t[loopbackData.inputFramesMaximum
+                                                  * loopbackData.actualInputChannelCount]{};
+    }
+    loopbackData.inputFloatData = new float[loopbackData.inputFramesMaximum *
+                                              loopbackData.actualInputChannelCount]{};
 
     loopbackData.loopbackProcessor->reset();
 
+    // Start OUTPUT first so INPUT does not overflow.
+    result = player.start();
+    if (result != AAUDIO_OK) {
+        printf("ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
+               result, AAudio_convertResultToText(result));
+        goto finish;
+    }
+
     result = recorder.start();
     if (result != AAUDIO_OK) {
         printf("ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
@@ -406,16 +529,8 @@
         goto finish;
     }
 
-    result = player.start();
-    if (result != AAUDIO_OK) {
-        printf("ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
-               result, AAudio_convertResultToText(result));
-        goto finish;
-    }
-
-    printf("------- sleep while the callback runs --------------\n");
-    fflush(stdout);
-    for (int i = requestedDuration; i > 0 ; i--) {
+    printf("------- sleep and log while the callback runs --------------\n");
+    while (timeMillis <= requestedDurationMillis) {
         if (loopbackData.inputError != AAUDIO_OK) {
             printf("  ERROR on input stream\n");
             break;
@@ -423,70 +538,128 @@
                 printf("  ERROR on output stream\n");
                 break;
         } else if (loopbackData.isDone) {
-                printf("  test says it is done!\n");
+                printf("  Test says it is DONE!\n");
                 break;
         } else {
-            sleep(1);
-            printf("%4d: ", i);
+            // Log a line of stream data.
+            printf("%7.3f: ", 0.001 * timeMillis); // display in seconds
             loopbackData.loopbackProcessor->printStatus();
+            printf(" insf %3d,", (int) loopbackData.insufficientReadCount);
 
-            int64_t inputFramesWritten = AAudioStream_getFramesWritten(loopbackData.inputStream);
-            int64_t inputFramesRead = AAudioStream_getFramesRead(loopbackData.inputStream);
+            int64_t inputFramesWritten = AAudioStream_getFramesWritten(inputStream);
+            int64_t inputFramesRead = AAudioStream_getFramesRead(inputStream);
             int64_t outputFramesWritten = AAudioStream_getFramesWritten(outputStream);
             int64_t outputFramesRead = AAudioStream_getFramesRead(outputStream);
-            printf(" INPUT: wr %lld rd %lld state %s, OUTPUT: wr %lld rd %lld state %s, xruns %d\n",
+            static const int textOffset = strlen("AAUDIO_STREAM_STATE_"); // strip this off
+            printf(" | INPUT: wr %7lld - rd %7lld = %5lld, st %8s, oruns %3d",
                    (long long) inputFramesWritten,
                    (long long) inputFramesRead,
-                   AAudio_convertStreamStateToText(AAudioStream_getState(loopbackData.inputStream)),
+                   (long long) (inputFramesWritten - inputFramesRead),
+                   &AAudio_convertStreamStateToText(
+                           AAudioStream_getState(inputStream))[textOffset],
+                   AAudioStream_getXRunCount(inputStream));
+
+            printf(" | OUTPUT: wr %7lld - rd %7lld = %5lld, st %8s, uruns %3d\n",
                    (long long) outputFramesWritten,
                    (long long) outputFramesRead,
-                   AAudio_convertStreamStateToText(AAudioStream_getState(outputStream)),
+                    (long long) (outputFramesWritten - outputFramesRead),
+                   &AAudio_convertStreamStateToText(
+                           AAudioStream_getState(outputStream))[textOffset],
                    AAudioStream_getXRunCount(outputStream)
             );
         }
+        int32_t periodMillis = (timeMillis < 2000) ? PERIOD_MILLIS / 4 : PERIOD_MILLIS;
+        usleep(periodMillis * 1000);
+        timeMillis += periodMillis;
+    }
+
+    result = player.stop();
+    if (result != AAUDIO_OK) {
+        printf("ERROR - player.stop() returned %d = %s\n",
+               result, AAudio_convertResultToText(result));
+        goto finish;
+    }
+
+    result = recorder.stop();
+    if (result != AAUDIO_OK) {
+        printf("ERROR - recorder.stop() returned %d = %s\n",
+               result, AAudio_convertResultToText(result));
+        goto finish;
+    }
+
+    printf("input error = %d = %s\n",
+           loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
+
+    if (loopbackData.inputError == AAUDIO_OK) {
+        if (testMode == TEST_SINE_MAGNITUDE) {
+            printAudioGraph(loopbackData.audioRecording, 200);
+        }
+        // Print again so we don't have to scroll past waveform.
+        printf("OUTPUT Stream ----------------------------------------\n");
+        argParser.compareWithStream(outputStream);
+        printf("INPUT  Stream ----------------------------------------\n");
+        argParser.compareWithStream(inputStream);
+
+        loopbackData.loopbackProcessor->report();
+    }
+
+    {
+        int32_t framesRead = AAudioStream_getFramesRead(inputStream);
+        int32_t framesWritten = AAudioStream_getFramesWritten(inputStream);
+        printf("Callback Results ---------------------------------------- INPUT\n");
+        printf("  input overruns   = %d\n", AAudioStream_getXRunCount(inputStream));
+        printf("  framesWritten    = %8d\n", framesWritten);
+        printf("  framesRead       = %8d\n", framesRead);
+        printf("  myFramesRead     = %8d\n", (int) loopbackData.framesReadTotal);
+        printf("  written - read   = %8d\n", (int) (framesWritten - framesRead));
+        printf("  insufficient #   = %8d\n", (int) loopbackData.insufficientReadCount);
+        if (loopbackData.insufficientReadCount > 0) {
+            printf("  insufficient frames = %8d\n", (int) loopbackData.insufficientReadFrames);
+        }
+    }
+    {
+        int32_t framesRead = AAudioStream_getFramesRead(outputStream);
+        int32_t framesWritten = AAudioStream_getFramesWritten(outputStream);
+        printf("Callback Results ---------------------------------------- OUTPUT\n");
+        printf("  output underruns = %d\n", AAudioStream_getXRunCount(outputStream));
+        printf("  myFramesWritten  = %8d\n", (int) loopbackData.framesWrittenTotal);
+        printf("  framesWritten    = %8d\n", framesWritten);
+        printf("  framesRead       = %8d\n", framesRead);
+        printf("  min numFrames    = %8d\n", (int) loopbackData.minNumFrames);
+        printf("  max numFrames    = %8d\n", (int) loopbackData.maxNumFrames);
+    }
+
+    written = loopbackData.loopbackProcessor->save(FILENAME_ECHOS);
+    if (written > 0) {
+        printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
+               written, FILENAME_ECHOS);
+    }
+
+    written = loopbackData.audioRecording.save(FILENAME_ALL);
+    if (written > 0) {
+        printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
+               written, FILENAME_ALL);
     }
 
     if (loopbackData.loopbackProcessor->getResult() < 0) {
-        printf("ERROR: Could not get a good loopback signal. Probably because the volume was too low.\n");
-    } else {
-        printf("input error = %d = %s\n",
-               loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
-
-        printf("AAudioStream_getXRunCount %d\n", AAudioStream_getXRunCount(outputStream));
-        printf("framesRead    = %8d\n", (int) AAudioStream_getFramesRead(outputStream));
-        printf("framesWritten = %8d\n", (int) AAudioStream_getFramesWritten(outputStream));
-        printf("min numFrames = %8d\n", (int) loopbackData.minNumFrames);
-        printf("max numFrames = %8d\n", (int) loopbackData.maxNumFrames);
-
-        if (loopbackData.inputError == AAUDIO_OK) {
-            if (testMode == TEST_SINE_MAGNITUDE) {
-                printAudioGraph(loopbackData.audioRecording, 200);
-            }
-            loopbackData.loopbackProcessor->report();
-        }
-
-        int written = loopbackData.loopbackProcessor->save(FILENAME_ECHOS);
-        if (written > 0) {
-            printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
-                   written, FILENAME_ECHOS);
-        }
-
-        written = loopbackData.audioRecording.save(FILENAME_ALL);
-        if (written > 0) {
-            printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
-                   written, FILENAME_ALL);
-        }
+        printf("ERROR: LOOPBACK PROCESSING FAILED. Maybe because the volume was too low.\n");
+        result = loopbackData.loopbackProcessor->getResult();
+    }
+    if (loopbackData.insufficientReadCount > 3) {
+        printf("ERROR: LOOPBACK PROCESSING FAILED. insufficientReadCount too high\n");
+        result = AAUDIO_ERROR_UNAVAILABLE;
     }
 
 finish:
     player.close();
     recorder.close();
-    delete[] loopbackData.conversionBuffer;
-    delete[] loopbackData.inputData;
-    delete[] outputData;
+    delete[] loopbackData.inputFloatData;
+    delete[] loopbackData.inputShortData;
 
-    printf(RESULT_TAG "result = %s\n", AAudio_convertResultToText(result));
-    if ((result != AAUDIO_OK)) {
+    printf(RESULT_TAG "result = %d \n", result); // machine readable
+    printf("result is %s\n", AAudio_convertResultToText(result)); // human readable
+    if (result != AAUDIO_OK) {
+        printf("FAILURE\n");
         return EXIT_FAILURE;
     } else {
         printf("SUCCESS\n");
diff --git a/media/libaaudio/examples/loopback/src/loopback.sh b/media/libaaudio/examples/loopback/src/loopback.sh
index bc63125..a5712b8 100644
--- a/media/libaaudio/examples/loopback/src/loopback.sh
+++ b/media/libaaudio/examples/loopback/src/loopback.sh
@@ -1,10 +1,30 @@
 #!/system/bin/sh
 # Run a loopback test in the background after a delay.
-# To run the script enter:
+# To run the script, enter these commands once:
+#    adb disable-verity
+#    adb reboot
+#    adb remount
+#    adb sync
+#    adb push loopback.sh /data/
+# For each test run:
 #    adb shell "nohup sh /data/loopback.sh &"
+# Quickly connect USB audio if needed, either manually or via Tigertail switch.
+# Wait until the test completes, restore USB to host if needed, and then:
+#    adb pull /data/loopreport.txt
+#    adb pull /data/loopback_all.wav
+#    adb pull /data/loopback_echos.wav
 
 SLEEP_TIME=10
-TEST_COMMAND="aaudio_loopback -pl -Pl -C1 -n2 -m2 -tm -d5"
+TEST_COMMAND="/data/nativetest/aaudio_loopback/aaudio_loopback -pl -Pl -C1 -n2 -m2 -te -d5"
+# Partial list of options:
+#   -pl (output) performance mode: low latency
+#   -Pl input performance mode: low latency
+#   -C1 input channel count: 1
+#   -n2 number of bursts: 2
+#   -m2 mmap policy: 2
+#   -t? test mode: -tm for sine magnitude, -te for echo latency, -tf for file latency
+#   -d5 device ID
+# For full list of available options, see AAudioArgsParser.h and loopback.cpp
 
 echo "Plug in USB Mir and Fun Plug."
 echo "Test will start in ${SLEEP_TIME} seconds: ${TEST_COMMAND}"
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index 88d7401..0e61589 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -272,7 +272,9 @@
                     if (strlen(arg) > 2) {
                         policy = atoi(&arg[2]);
                     }
-                    AAudio_setMMapPolicy(policy);
+                    if (!AAudio_setMMapPolicy(policy)) {
+                        printf("ERROR: invalid MMAP policy mode %i\n", policy);
+                    }
                 } break;
                 case 'n':
                     setNumberOfBursts(atoi(&arg[2]));
@@ -363,7 +365,7 @@
                 mode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
                 break;
             default:
-                printf("ERROR invalid performance mode %c\n", c);
+                printf("ERROR: invalid performance mode %c\n", c);
                 break;
         }
         return mode;
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 2207cb8c..1493b26 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -44,7 +44,15 @@
 #define AAUDIO_UNSPECIFIED           0
 
 enum {
+    /**
+     * Audio data will travel out of the device, for example through a speaker.
+     */
     AAUDIO_DIRECTION_OUTPUT,
+
+
+    /**
+     * Audio data will travel into the device, for example from a microphone.
+     */
     AAUDIO_DIRECTION_INPUT
 };
 typedef int32_t aaudio_direction_t;
@@ -52,33 +60,112 @@
 enum {
     AAUDIO_FORMAT_INVALID = -1,
     AAUDIO_FORMAT_UNSPECIFIED = 0,
+
+    /**
+     * This format uses the int16_t data type.
+     * The maximum range of the data is -32768 to 32767.
+     */
     AAUDIO_FORMAT_PCM_I16,
+
+    /**
+     * This format uses the float data type.
+     * The nominal range of the data is [-1.0f, 1.0f).
+     * Values outside that range may be clipped.
+     *
+     * See also 'floatData' at
+     * https://developer.android.com/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)
+     */
     AAUDIO_FORMAT_PCM_FLOAT
 };
 typedef int32_t aaudio_format_t;
 
+/**
+ * These result codes are returned from AAudio functions to indicate success or failure.
+ * Note that error return codes may change in the future so applications should generally
+ * not rely on specific return codes.
+ */
 enum {
+    /**
+     * The call was successful.
+     */
     AAUDIO_OK,
     AAUDIO_ERROR_BASE = -900, // TODO review
+
+    /**
+     * The audio device was disconnected. This could occur, for example, when headphones
+     * are plugged in or unplugged. The stream cannot be used after the device is disconnected.
+     * Applications should stop and close the stream.
+     * If this error is received in an error callback then another thread should be
+     * used to stop and close the stream.
+     */
     AAUDIO_ERROR_DISCONNECTED,
+
+    /**
+     * An invalid parameter was passed to AAudio.
+     */
     AAUDIO_ERROR_ILLEGAL_ARGUMENT,
     // reserved
     AAUDIO_ERROR_INTERNAL = AAUDIO_ERROR_ILLEGAL_ARGUMENT + 2,
+
+    /**
+     * The requested operation is not appropriate for the current state of AAudio.
+     */
     AAUDIO_ERROR_INVALID_STATE,
     // reserved
     // reserved
+    /* The server rejected the handle used to identify the stream.
+     */
     AAUDIO_ERROR_INVALID_HANDLE = AAUDIO_ERROR_INVALID_STATE + 3,
     // reserved
+
+    /**
+     * The function is not implemented for this stream.
+     */
     AAUDIO_ERROR_UNIMPLEMENTED = AAUDIO_ERROR_INVALID_HANDLE + 2,
+
+    /**
+     * A resource or information is unavailable.
+     * This could occur when an application tries to open too many streams,
+     * or a timestamp is not available.
+     */
     AAUDIO_ERROR_UNAVAILABLE,
     AAUDIO_ERROR_NO_FREE_HANDLES,
+
+    /**
+     * Memory could not be allocated.
+     */
     AAUDIO_ERROR_NO_MEMORY,
+
+    /**
+     * A NULL pointer was passed to AAudio.
+     * Or a NULL pointer was detected internally.
+     */
     AAUDIO_ERROR_NULL,
+
+    /**
+     * An operation took longer than expected.
+     */
     AAUDIO_ERROR_TIMEOUT,
     AAUDIO_ERROR_WOULD_BLOCK,
+
+    /**
+     * The requested data format is not supported.
+     */
     AAUDIO_ERROR_INVALID_FORMAT,
+
+    /**
+     * A requested was out of range.
+     */
     AAUDIO_ERROR_OUT_OF_RANGE,
+
+    /**
+     * The audio service was not available.
+     */
     AAUDIO_ERROR_NO_SERVICE,
+
+    /**
+     * The requested sample rate was not supported.
+     */
     AAUDIO_ERROR_INVALID_RATE
 };
 typedef int32_t  aaudio_result_t;
@@ -126,15 +213,15 @@
     AAUDIO_PERFORMANCE_MODE_NONE = 10,
 
     /**
-     * Extending battery life is most important.
+     * Extending battery life is more important than low latency.
      *
      * This mode is not supported in input streams.
-     * Mode NONE will be used if this is requested.
+     * For input, mode NONE will be used if this is requested.
      */
     AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
 
     /**
-     * Reducing latency is most important.
+     * Reducing latency is more important than battery life.
      */
     AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
 };
@@ -289,6 +376,11 @@
 };
 typedef int32_t aaudio_input_preset_t;
 
+/**
+ * These may be used with AAudioStreamBuilder_setSessionId().
+ *
+ * Added in API level 28.
+ */
 enum {
     /**
      * Do not allocate a session ID.
@@ -302,7 +394,7 @@
     /**
      * Allocate a session ID that can be used to attach and control
      * effects using the Java AudioEffects API.
-     * Note that the use of this flag may result in higher latency.
+     * Note that using this may result in higher latency.
      *
      * Note that this matches the value of AudioManager.AUDIO_SESSION_ID_GENERATE.
      *
@@ -331,7 +423,7 @@
  *
  * @return pointer to a text representation of an AAudio result code.
  */
-AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode);
+AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) __INTRODUCED_IN(26);
 
 /**
  * The text is the ASCII symbol corresponding to the stream state,
@@ -341,7 +433,8 @@
  *
  * @return pointer to a text representation of an AAudio state.
  */
-AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state);
+AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state)
+        __INTRODUCED_IN(26);
 
 // ============================================================
 // StreamBuilder
@@ -359,7 +452,8 @@
  *
  * AAudioStreamBuilder_delete() must be called when you are done using the builder.
  */
-AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder);
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder)
+        __INTRODUCED_IN(26);
 
 /**
  * Request an audio device identified device using an ID.
@@ -372,7 +466,7 @@
  * @param deviceId device identifier or AAUDIO_UNSPECIFIED
  */
 AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
-                                                     int32_t deviceId);
+                                                int32_t deviceId) __INTRODUCED_IN(26);
 
 /**
  * Request a sample rate in Hertz.
@@ -389,7 +483,7 @@
  * @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
  */
 AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
-                                                       int32_t sampleRate);
+                                                  int32_t sampleRate) __INTRODUCED_IN(26);
 
 /**
  * Request a number of channels for the stream.
@@ -406,7 +500,7 @@
  * @param channelCount Number of channels desired.
  */
 AAUDIO_API void AAudioStreamBuilder_setChannelCount(AAudioStreamBuilder* builder,
-                                                   int32_t channelCount);
+                                                    int32_t channelCount) __INTRODUCED_IN(26);
 
 /**
  * Identical to AAudioStreamBuilder_setChannelCount().
@@ -415,7 +509,7 @@
  * @param samplesPerFrame Number of samples in a frame.
  */
 AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
-                                                       int32_t samplesPerFrame);
+                                                       int32_t samplesPerFrame) __INTRODUCED_IN(26);
 
 /**
  * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
@@ -432,7 +526,7 @@
  * @param format common formats are AAUDIO_FORMAT_PCM_FLOAT and AAUDIO_FORMAT_PCM_I16.
  */
 AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
-                                                   aaudio_format_t format);
+                                              aaudio_format_t format) __INTRODUCED_IN(26);
 
 /**
  * Request a mode for sharing the device.
@@ -446,7 +540,7 @@
  * @param sharingMode AAUDIO_SHARING_MODE_SHARED or AAUDIO_SHARING_MODE_EXCLUSIVE
  */
 AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
-                                                        aaudio_sharing_mode_t sharingMode);
+        aaudio_sharing_mode_t sharingMode) __INTRODUCED_IN(26);
 
 /**
  * Request the direction for a stream.
@@ -457,7 +551,7 @@
  * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
  */
 AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
-                                                            aaudio_direction_t direction);
+        aaudio_direction_t direction) __INTRODUCED_IN(26);
 
 /**
  * Set the requested buffer capacity in frames.
@@ -469,18 +563,24 @@
  * @param numFrames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
  */
 AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
-                                                                 int32_t numFrames);
+        int32_t numFrames) __INTRODUCED_IN(26);
 
 /**
  * Set the requested performance mode.
  *
+ * Supported modes are AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_PERFORMANCE_MODE_POWER_SAVING
+ * and AAUDIO_PERFORMANCE_MODE_LOW_LATENCY.
+ *
  * The default, if you do not call this function, is AAUDIO_PERFORMANCE_MODE_NONE.
  *
+ * You may not get the mode you requested.
+ * You can call AAudioStream_getPerformanceMode() to find out the final mode for the stream.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param mode the desired performance mode, eg. AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
  */
 AAUDIO_API void AAudioStreamBuilder_setPerformanceMode(AAudioStreamBuilder* builder,
-                                                aaudio_performance_mode_t mode);
+        aaudio_performance_mode_t mode) __INTRODUCED_IN(26);
 
 /**
  * Set the intended use case for the stream.
@@ -497,7 +597,7 @@
  * @param usage the desired usage, eg. AAUDIO_USAGE_GAME
  */
 AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
-                                                       aaudio_usage_t usage);
+        aaudio_usage_t usage) __INTRODUCED_IN(28);
 
 /**
  * Set the type of audio data that the stream will carry.
@@ -514,7 +614,7 @@
  * @param contentType the type of audio data, eg. AAUDIO_CONTENT_TYPE_SPEECH
  */
 AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
-                                             aaudio_content_type_t contentType);
+        aaudio_content_type_t contentType) __INTRODUCED_IN(28);
 
 /**
  * Set the input (capture) preset for the stream.
@@ -534,7 +634,7 @@
  * @param inputPreset the desired configuration for recording
  */
 AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
-                                                   aaudio_input_preset_t inputPreset);
+        aaudio_input_preset_t inputPreset) __INTRODUCED_IN(28);
 
 /** Set the requested session ID.
  *
@@ -550,10 +650,12 @@
  * and then used with this function when opening another stream.
  * This allows effects to be shared between streams.
  *
- * Session IDs from AAudio can be used the Android Java APIs and vice versa.
+ * Session IDs from AAudio can be used with the Android Java APIs and vice versa.
  * So a session ID from an AAudio stream can be passed to Java
  * and effects applied using the Java AudioEffect API.
  *
+ * Note that allocating or setting a session ID may result in a stream with higher latency.
+ *
  * Allocated session IDs will always be positive and nonzero.
  *
  * Added in API level 28.
@@ -562,7 +664,7 @@
  * @param sessionId an allocated sessionID or AAUDIO_SESSION_ID_ALLOCATE
  */
 AAUDIO_API void AAudioStreamBuilder_setSessionId(AAudioStreamBuilder* builder,
-                                                aaudio_session_id_t sessionId);
+        aaudio_session_id_t sessionId) __INTRODUCED_IN(28);
 
 /**
  * Return one of these values from the data callback function.
@@ -612,6 +714,14 @@
  * <li>use any mutexes or other synchronization primitives</li>
  * <li>sleep</li>
  * <li>stop or close the stream</li>
+ * <li>AAudioStream_read()</li>
+ * <li>AAudioStream_write()</li>
+ * </ul>
+ *
+ * The following are OK to call from the data callback:
+ * <ul>
+ * <li>AAudioStream_get*()</li>
+ * <li>AAudio_convertResultToText()</li>
  * </ul>
  *
  * If you need to move data, eg. MIDI commands, in or out of the callback function then
@@ -652,8 +762,7 @@
  *          to the callback functions.
  */
 AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
-                                                 AAudioStream_dataCallback callback,
-                                                 void *userData);
+        AAudioStream_dataCallback callback, void *userData) __INTRODUCED_IN(26);
 
 /**
  * Set the requested data callback buffer size in frames.
@@ -679,12 +788,28 @@
  * @param numFrames the desired buffer size in frames or AAUDIO_UNSPECIFIED
  */
 AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
-                                                             int32_t numFrames);
+                                                             int32_t numFrames) __INTRODUCED_IN(26);
 
 /**
  * Prototype for the callback function that is passed to
  * AAudioStreamBuilder_setErrorCallback().
  *
+ * The following may NOT be called from the error callback:
+ * <ul>
+ * <li>AAudioStream_requestStop()</li>
+ * <li>AAudioStream_requestPause()</li>
+ * <li>AAudioStream_close()</li>
+ * <li>AAudioStream_waitForStateChange()</li>
+ * <li>AAudioStream_read()</li>
+ * <li>AAudioStream_write()</li>
+ * </ul>
+ *
+ * The following are OK to call from the error callback:
+ * <ul>
+ * <li>AAudioStream_get*()</li>
+ * <li>AAudio_convertResultToText()</li>
+ * </ul>
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param userData the same address that was passed to AAudioStreamBuilder_setErrorCallback()
  * @param error an AAUDIO_ERROR_* value.
@@ -716,8 +841,7 @@
  *          to the callback functions.
  */
 AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
-                                                AAudioStream_errorCallback callback,
-                                                void *userData);
+        AAudioStream_errorCallback callback, void *userData) __INTRODUCED_IN(26);
 
 /**
  * Open a stream based on the options in the StreamBuilder.
@@ -730,7 +854,7 @@
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
-                                                     AAudioStream** stream);
+        AAudioStream** stream) __INTRODUCED_IN(26);
 
 /**
  * Delete the resources associated with the StreamBuilder.
@@ -738,7 +862,8 @@
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder* builder);
+AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
+    __INTRODUCED_IN(26);
 
 // ============================================================
 // Stream Control
@@ -750,7 +875,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream* stream);
+AAUDIO_API aaudio_result_t  AAudioStream_close(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Asynchronously request to start playing the stream. For output streams, one should
@@ -761,7 +886,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream* stream);
+AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Asynchronous request for the stream to pause.
@@ -775,7 +900,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream* stream);
+AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Asynchronous request for the stream to flush.
@@ -789,7 +914,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream* stream);
+AAUDIO_API aaudio_result_t  AAudioStream_requestFlush(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Asynchronous request for the stream to stop.
@@ -799,7 +924,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return AAUDIO_OK or a negative error.
  */
-AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream* stream);
+AAUDIO_API aaudio_result_t  AAudioStream_requestStop(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Query the current state of the client, eg. AAUDIO_STREAM_STATE_PAUSING
@@ -811,7 +936,7 @@
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  */
-AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream);
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Wait until the current state no longer matches the input state.
@@ -836,9 +961,8 @@
  * @return AAUDIO_OK or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* stream,
-                                            aaudio_stream_state_t inputState,
-                                            aaudio_stream_state_t *nextState,
-                                            int64_t timeoutNanoseconds);
+        aaudio_stream_state_t inputState, aaudio_stream_state_t *nextState,
+        int64_t timeoutNanoseconds) __INTRODUCED_IN(26);
 
 // ============================================================
 // Stream I/O
@@ -856,6 +980,8 @@
  *
  * This call is "strong non-blocking" unless it has to wait for data.
  *
+ * If the call times out then zero or a partial frame count will be returned.
+ *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
  * @param buffer The address of the first sample.
  * @param numFrames Number of frames to read. Only complete frames will be written.
@@ -863,9 +989,7 @@
  * @return The number of frames actually read or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream* stream,
-                               void *buffer,
-                               int32_t numFrames,
-                               int64_t timeoutNanoseconds);
+        void *buffer, int32_t numFrames, int64_t timeoutNanoseconds) __INTRODUCED_IN(26);
 
 /**
  * Write data to the stream.
@@ -879,6 +1003,8 @@
  *
  * This call is "strong non-blocking" unless it has to wait for room in the buffer.
  *
+ * If the call times out then zero or a partial frame count will be returned.
+ *
  * @param stream A stream created using AAudioStreamBuilder_openStream().
  * @param buffer The address of the first sample.
  * @param numFrames Number of frames to write. Only complete frames will be written.
@@ -886,9 +1012,7 @@
  * @return The number of frames actually written or a negative error.
  */
 AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream* stream,
-                               const void *buffer,
-                               int32_t numFrames,
-                               int64_t timeoutNanoseconds);
+        const void *buffer, int32_t numFrames, int64_t timeoutNanoseconds) __INTRODUCED_IN(26);
 
 // ============================================================
 // Stream - queries
@@ -903,14 +1027,15 @@
  * This cannot be set higher than AAudioStream_getBufferCapacityInFrames().
  *
  * Note that you will probably not get the exact size you request.
- * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
+ * You can check the return value or call AAudioStream_getBufferSizeInFrames()
+ * to see what the actual final size is.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param numFrames requested number of frames that can be filled without blocking
  * @return actual buffer size in frames or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
-                                                      int32_t numFrames);
+        int32_t numFrames) __INTRODUCED_IN(26);
 
 /**
  * Query the maximum number of frames that can be filled without blocking.
@@ -918,7 +1043,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return buffer size in frames.
  */
-AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Query the number of frames that the application should read or write at
@@ -933,7 +1058,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return burst size
  */
-AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Query maximum buffer capacity in frames.
@@ -941,7 +1066,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return  buffer capacity in frames
  */
-AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Query the size of the buffer that will be passed to the dataProc callback
@@ -962,7 +1087,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return callback buffer size in frames or AAUDIO_UNSPECIFIED
  */
-AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * An XRun is an Underrun or an Overrun.
@@ -979,13 +1104,13 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return the underrun or overrun count
  */
-AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual sample rate
  */
-AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * A stream has one or more channels of data.
@@ -994,7 +1119,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual number of channels
  */
-AAUDIO_API int32_t AAudioStream_getChannelCount(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getChannelCount(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Identical to AAudioStream_getChannelCount().
@@ -1002,43 +1127,46 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual number of samples frame
  */
-AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual device ID
  */
-AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* stream);
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return actual data format
  */
-AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream);
+AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Provide actual sharing mode.
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return  actual sharing mode
  */
-AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream* stream);
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream* stream)
+        __INTRODUCED_IN(26);
 
 /**
  * Get the performance mode used by the stream.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  */
-AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* stream);
+AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* stream)
+        __INTRODUCED_IN(26);
 
 /**
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return direction
  */
-AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream* stream);
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Passes back the number of frames that have been written since the stream was created.
- * For an output stream, this will be advanced by the application calling write().
+ * For an output stream, this will be advanced by the application calling write()
+ * or by a data callback.
  * For an input stream, this will be advanced by the endpoint.
  *
  * The frame position is monotonically increasing.
@@ -1046,19 +1174,20 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return frames written
  */
-AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream);
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Passes back the number of frames that have been read since the stream was created.
  * For an output stream, this will be advanced by the endpoint.
- * For an input stream, this will be advanced by the application calling read().
+ * For an input stream, this will be advanced by the application calling read()
+ * or by a data callback.
  *
  * The frame position is monotonically increasing.
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return frames read
  */
-AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream);
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream) __INTRODUCED_IN(26);
 
 /**
  * Passes back the session ID associated with this stream.
@@ -1082,7 +1211,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return session ID or AAUDIO_SESSION_ID_NONE
  */
-AAUDIO_API aaudio_session_id_t AAudioStream_getSessionId(AAudioStream* stream);
+AAUDIO_API aaudio_session_id_t AAudioStream_getSessionId(AAudioStream* stream) __INTRODUCED_IN(28);
 
 /**
  * Passes back the time at which a particular frame was presented.
@@ -1107,9 +1236,7 @@
  * @return AAUDIO_OK or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* stream,
-                                      clockid_t clockid,
-                                      int64_t *framePosition,
-                                      int64_t *timeNanoseconds);
+        clockid_t clockid, int64_t *framePosition, int64_t *timeNanoseconds) __INTRODUCED_IN(26);
 
 /**
  * Return the use case for the stream.
@@ -1119,7 +1246,7 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return frames read
  */
-AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream);
+AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream) __INTRODUCED_IN(28);
 
 /**
  * Return the content type for the stream.
@@ -1129,7 +1256,8 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return content type, for example AAUDIO_CONTENT_TYPE_MUSIC
  */
-AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream);
+AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream)
+        __INTRODUCED_IN(28);
 
 /**
  * Return the input preset for the stream.
@@ -1139,7 +1267,8 @@
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return input preset, for example AAUDIO_INPUT_PRESET_CAMCORDER
  */
-AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream);
+AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
+        __INTRODUCED_IN(28);
 
 #ifdef __cplusplus
 }
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 788833b..b9e28a0 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -57,6 +57,7 @@
 
     shared_libs: [
         "libaudioclient",
+        "libaudioutils",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 2a3e668..9204824 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -156,7 +156,7 @@
     setInputPreset(configurationOutput.getInputPreset());
 
     // Save device format so we can do format conversion and volume scaling together.
-    mDeviceFormat = configurationOutput.getFormat();
+    setDeviceFormat(configurationOutput.getFormat());
 
     result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
     if (result != AAUDIO_OK) {
@@ -392,19 +392,25 @@
 }
 
 aaudio_result_t AudioStreamInternal::startClient(const android::AudioClient& client,
-                                                 audio_port_handle_t *clientHandle) {
+                                                 audio_port_handle_t *portHandle) {
+    ALOGV("%s() called", __func__);
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-
-    return mServiceInterface.startClient(mServiceStreamHandle, client, clientHandle);
+    aaudio_result_t result =  mServiceInterface.startClient(mServiceStreamHandle,
+                                                            client, portHandle);
+    ALOGV("%s(%d) returning %d", __func__, *portHandle, result);
+    return result;
 }
 
-aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t clientHandle) {
+aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t portHandle) {
+    ALOGV("%s(%d) called", __func__, portHandle);
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    return mServiceInterface.stopClient(mServiceStreamHandle, clientHandle);
+    aaudio_result_t result = mServiceInterface.stopClient(mServiceStreamHandle, portHandle);
+    ALOGV("%s(%d) returning %d", __func__, portHandle, result);
+    return result;
 }
 
 aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
@@ -501,9 +507,9 @@
             ALOGW("%s - AAUDIO_SERVICE_EVENT_DISCONNECTED - FIFO cleared", __func__);
             break;
         case AAUDIO_SERVICE_EVENT_VOLUME:
+            ALOGD("%s - AAUDIO_SERVICE_EVENT_VOLUME %lf", __func__, message->event.dataDouble);
             mStreamVolume = (float)message->event.dataDouble;
             doSetVolume();
-            ALOGD("%s - AAUDIO_SERVICE_EVENT_VOLUME %lf", __func__, message->event.dataDouble);
             break;
         case AAUDIO_SERVICE_EVENT_XRUN:
             mXRunCount = static_cast<int32_t>(message->event.dataLong);
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 0e0724b..0425cd5 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -138,8 +138,6 @@
     // Calculate timeout for an operation involving framesPerOperation.
     int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
-    aaudio_format_t getDeviceFormat() const { return mDeviceFormat; }
-
     int32_t getDeviceChannelCount() const { return mDeviceChannelCount; }
 
     /**
@@ -195,9 +193,6 @@
 
     int64_t                  mServiceLatencyNanos = 0;
 
-    // Sometimes the hardware is operating with a different format or channel count from the app.
-    // Then we require conversion in AAudio.
-    aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t                  mDeviceChannelCount = 0;
 };
 
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 0c3b1fa..795ba2c 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -117,7 +117,7 @@
         // Still haven't got any timestamps from server.
         // Keep waiting until we get some valid timestamps then start writing to the
         // current buffer position.
-        ALOGD("%s() wait for valid timestamps", __func__);
+        ALOGV("%s() wait for valid timestamps", __func__);
         // Sleep very briefly and hope we get a timestamp soon.
         *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
         ATRACE_END();
@@ -310,6 +310,9 @@
 //------------------------------------------------------------------------------
 // Implementation of PlayerBase
 status_t AudioStreamInternalPlay::doSetVolume() {
-    mVolumeRamp.setTarget(mStreamVolume * getDuckAndMuteVolume());
+    float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
+    ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
+          __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
+    mVolumeRamp.setTarget(combinedVolume);
     return android::NO_ERROR;
 }
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 61e03db..358021b 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -367,7 +367,6 @@
     return err ? AAudioConvert_androidToAAudioResult(-errno) : mThreadRegistrationResult;
 }
 
-
 aaudio_data_callback_result_t AudioStream::maybeCallDataCallback(void *audioData,
                                                                  int32_t numFrames) {
     aaudio_data_callback_result_t result = AAUDIO_CALLBACK_RESULT_STOP;
@@ -429,6 +428,12 @@
 }
 #endif
 
+void AudioStream::setDuckAndMuteVolume(float duckAndMuteVolume) {
+    ALOGD("%s() to %f", __func__, duckAndMuteVolume);
+    mDuckAndMuteVolume = duckAndMuteVolume;
+    doSetVolume(); // apply this change
+}
+
 AudioStream::MyPlayerBase::MyPlayerBase(AudioStream *parent) : mParent(parent) {
 }
 
@@ -450,7 +455,6 @@
     }
 }
 
-
 void AudioStream::MyPlayerBase::destroy() {
     unregisterWithAudioManager();
 }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 5273e36..31b895c 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -252,6 +252,20 @@
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
 
+    /**
+     * This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
+     */
+    int32_t getBytesPerDeviceFrame() const {
+        return mSamplesPerFrame * getBytesPerDeviceSample();
+    }
+
+    /**
+     * This is only valid after setDeviceFormat() has been called.
+     */
+    int32_t getBytesPerDeviceSample() const {
+        return AAudioConvert_formatToSizeInBytes(getDeviceFormat());
+    }
+
     virtual int64_t getFramesWritten() = 0;
 
     virtual int64_t getFramesRead() = 0;
@@ -314,10 +328,7 @@
     }
 
     // This is used by the AudioManager to duck and mute the stream when changing audio focus.
-    void setDuckAndMuteVolume(float duckAndMuteVolume) {
-        mDuckAndMuteVolume = duckAndMuteVolume;
-        doSetVolume(); // apply this change
-    }
+    void setDuckAndMuteVolume(float duckAndMuteVolume);
 
     float getDuckAndMuteVolume() const {
         return mDuckAndMuteVolume;
@@ -471,6 +482,17 @@
         mFormat = format;
     }
 
+    /**
+     * This should not be called after the open() call.
+     */
+    void setDeviceFormat(aaudio_format_t format) {
+        mDeviceFormat = format;
+    }
+
+    aaudio_format_t getDeviceFormat() const {
+        return mDeviceFormat;
+    }
+
     void setState(aaudio_stream_state_t state);
 
     void setDeviceId(int32_t deviceId) {
@@ -485,9 +507,23 @@
 
     float                mDuckAndMuteVolume = 1.0f;
 
-
 protected:
 
+    /**
+     * Either convert the data from device format to app format and return a pointer
+     * to the conversion buffer,
+     * OR just pass back the original pointer.
+     *
+     * Note that this is only used for the INPUT path.
+     *
+     * @param audioData
+     * @param numFrames
+     * @return original pointer or the conversion buffer
+     */
+    virtual const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+        return audioData;
+    }
+
     void setPeriodNanoseconds(int64_t periodNanoseconds) {
         mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
     }
@@ -539,6 +575,10 @@
 
     int32_t                     mSessionId = AAUDIO_UNSPECIFIED;
 
+    // Sometimes the hardware is operating with a different format from the app.
+    // Then we require conversion in AAudio.
+    aaudio_format_t             mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+
     // callback ----------------------------------
 
     AAudioStream_dataCallback   mDataCallbackProc = nullptr;  // external callback functions
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 293a6a8..4ef765d 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -87,7 +87,7 @@
             break;
 
         default:
-            ALOGE("bad direction = %d", direction);
+            ALOGE("%s() bad direction = %d", __func__, direction);
             result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     }
     return result;
@@ -99,7 +99,7 @@
 aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
     AudioStream *audioStream = nullptr;
     if (streamPtr == nullptr) {
-        ALOGE("build() streamPtr is null");
+        ALOGE("%s() streamPtr is null", __func__);
         return AAUDIO_ERROR_NULL;
     }
     *streamPtr = nullptr;
@@ -124,13 +124,11 @@
     if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
         mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
     }
-    ALOGD("mmapPolicy = %d, mapExclusivePolicy = %d",
-          mmapPolicy, mapExclusivePolicy);
 
     aaudio_sharing_mode_t sharingMode = getSharingMode();
     if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
         && (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
-        ALOGW("EXCLUSIVE sharing mode not supported. Use SHARED.");
+        ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
         sharingMode = AAUDIO_SHARING_MODE_SHARED;
         setSharingMode(sharingMode);
     }
@@ -141,16 +139,22 @@
     // TODO Support other performance settings in MMAP mode.
     // Disable MMAP if low latency not requested.
     if (getPerformanceMode() != AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
-        ALOGD("build() MMAP not available because AAUDIO_PERFORMANCE_MODE_LOW_LATENCY not used.");
+        ALOGD("%s() MMAP not available because AAUDIO_PERFORMANCE_MODE_LOW_LATENCY not used.",
+              __func__);
         allowMMap = false;
     }
 
     // SessionID and Effects are only supported in Legacy mode.
     if (getSessionId() != AAUDIO_SESSION_ID_NONE) {
-        ALOGD("build() MMAP not available because sessionId used.");
+        ALOGD("%s() MMAP not available because sessionId used.", __func__);
         allowMMap = false;
     }
 
+    if (!allowMMap && !allowLegacy) {
+        ALOGE("%s() no backend available: neither MMAP nor legacy path are allowed", __func__);
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+
     result = builder_createStream(getDirection(), sharingMode, allowMMap, &audioStream);
     if (result == AAUDIO_OK) {
         // Open the stream using the parameters from the builder.
@@ -163,7 +167,7 @@
             audioStream = nullptr;
 
             if (isMMap && allowLegacy) {
-                ALOGD("build() MMAP stream did not open so try Legacy path");
+                ALOGV("%s() MMAP stream did not open so try Legacy path", __func__);
                 // If MMAP stream failed to open then TRY using a legacy stream.
                 result = builder_createStream(getDirection(), sharingMode,
                                               false, &audioStream);
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index e6e7c8e..b09258e 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -22,6 +22,8 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <algorithm>
+
 #include "FifoControllerBase.h"
 #include "FifoController.h"
 #include "FifoControllerIndirect.h"
@@ -43,7 +45,7 @@
     int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
     mStorage = new uint8_t[bytesPerBuffer];
     mStorageOwned = true;
-    ALOGD("capacityInFrames = %d, bytesPerFrame = %d",
+    ALOGV("capacityInFrames = %d, bytesPerFrame = %d",
           capacityInFrames, bytesPerFrame);
 }
 
@@ -85,15 +87,14 @@
     wrappingBuffer->data[1] = nullptr;
     wrappingBuffer->numFrames[1] = 0;
     if (framesAvailable > 0) {
-
         uint8_t *source = &mStorage[convertFramesToBytes(startIndex)];
         // Does the available data cross the end of the FIFO?
         if ((startIndex + framesAvailable) > mFrameCapacity) {
             wrappingBuffer->data[0] = source;
-            wrappingBuffer->numFrames[0] = mFrameCapacity - startIndex;
+            fifo_frames_t firstFrames = mFrameCapacity - startIndex;
+            wrappingBuffer->numFrames[0] = firstFrames;
             wrappingBuffer->data[1] = &mStorage[0];
-            wrappingBuffer->numFrames[1] = mFrameCapacity - startIndex;
-
+            wrappingBuffer->numFrames[1] = framesAvailable - firstFrames;
         } else {
             wrappingBuffer->data[0] = source;
             wrappingBuffer->numFrames[0] = framesAvailable;
@@ -102,18 +103,19 @@
         wrappingBuffer->data[0] = nullptr;
         wrappingBuffer->numFrames[0] = 0;
     }
-
 }
 
 fifo_frames_t FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
-    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+    // The FIFO might be overfull so clip to capacity.
+    fifo_frames_t framesAvailable = std::min(mFifo->getFullFramesAvailable(), mFrameCapacity);
     fifo_frames_t startIndex = mFifo->getReadIndex();
     fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
     return framesAvailable;
 }
 
 fifo_frames_t FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
-    fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
+    // The FIFO might have underrun so clip to capacity.
+    fifo_frames_t framesAvailable = std::min(mFifo->getEmptyFramesAvailable(), mFrameCapacity);
     fifo_frames_t startIndex = mFifo->getWriteIndex();
     fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
     return framesAvailable;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index 3352b33..a6b9f5d 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -19,10 +19,12 @@
 #include <utils/Log.h>
 
 #include <stdint.h>
-#include <utils/String16.h>
+
+#include <aaudio/AAudio.h>
+#include <audio_utils/primitives.h>
 #include <media/AudioTrack.h>
 #include <media/AudioTimestamp.h>
-#include <aaudio/AAudio.h>
+#include <utils/String16.h>
 
 #include "core/AudioStream.h"
 #include "legacy/AudioStreamLegacy.h"
@@ -48,14 +50,17 @@
     return AudioStreamLegacy_callback;
 }
 
-aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer, int32_t numFrames) {
+aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer,
+                                                                        int32_t numFrames) {
+    void *finalAudioData = buffer;
     if (getDirection() == AAUDIO_DIRECTION_INPUT) {
         // Increment before because we already got the data from the device.
         incrementFramesRead(numFrames);
+        finalAudioData = (void *) maybeConvertDeviceData(buffer, numFrames);
     }
 
     // Call using the AAudio callback interface.
-    aaudio_data_callback_result_t callbackResult = maybeCallDataCallback(buffer, numFrames);
+    aaudio_data_callback_result_t callbackResult = maybeCallDataCallback(finalAudioData, numFrames);
 
     if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE
             && getDirection() == AAUDIO_DIRECTION_OUTPUT) {
@@ -67,15 +72,15 @@
 
 // Implement FixedBlockProcessor
 int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
-    int32_t numFrames = numBytes / getBytesPerFrame();
+    int32_t numFrames = numBytes / getBytesPerDeviceFrame();
     return (int32_t) callDataCallbackFrames(buffer, numFrames);
 }
 
 void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
     aaudio_data_callback_result_t callbackResult;
-    // This illegal size can be used to AudioFlinger to stop calling us.
+    // This illegal size can be used to tell AudioFlinger to stop calling us.
     // This takes advantage of AudioFlinger killing the stream.
-    // TODO need API change in AudioRecord and AudioTrack
+    // TODO add to API in AudioRecord and AudioTrack
     const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
 
     switch (opcode) {
@@ -100,7 +105,7 @@
 
                 // If the caller specified an exact size then use a block size adapter.
                 if (mBlockAdapter != nullptr) {
-                    int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+                    int32_t byteCount = audioBuffer->frameCount * getBytesPerDeviceFrame();
                     callbackResult = mBlockAdapter->processVariableBlock(
                             (uint8_t *) audioBuffer->raw, byteCount);
                 } else {
@@ -109,7 +114,7 @@
                                                             audioBuffer->frameCount);
                 }
                 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
-                    audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
+                    audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
                 } else { // STOP or invalid result
                     ALOGW("%s() callback requested stop, fake an error", __func__);
                     audioBuffer->size = SIZE_STOP_CALLBACKS;
@@ -179,19 +184,17 @@
     int64_t localPosition;
     status_t status = extendedTimestamp->getBestTimestamp(&localPosition, timeNanoseconds,
                                                           timebase, &location);
-    // use MonotonicCounter to prevent retrograde motion.
-    mTimestampPosition.update32((int32_t)localPosition);
-    *framePosition = mTimestampPosition.get();
+    if (status == OK) {
+        // use MonotonicCounter to prevent retrograde motion.
+        mTimestampPosition.update32((int32_t) localPosition);
+        *framePosition = mTimestampPosition.get();
+    }
 
 //    ALOGD("getBestTimestamp() fposition: server = %6lld, kernel = %6lld, location = %d",
 //          (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_SERVER],
 //          (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_KERNEL],
 //          (int)location);
-    if (status == WOULD_BLOCK) {
-        return AAUDIO_ERROR_INVALID_STATE;
-    } else {
-        return AAudioConvert_androidToAAudioResult(status);
-    }
+    return AAudioConvert_androidToAAudioResult(status);
 }
 
 void AudioStreamLegacy::onAudioDeviceUpdate(audio_port_handle_t deviceId)
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 28158e2..505f2ee 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -19,13 +19,15 @@
 #include <utils/Log.h>
 
 #include <stdint.h>
-#include <utils/String16.h>
-#include <media/AudioRecord.h>
-#include <aaudio/AAudio.h>
 
-#include "AudioClock.h"
+#include <aaudio/AAudio.h>
+#include <audio_utils/primitives.h>
+#include <media/AudioRecord.h>
+#include <utils/String16.h>
+
 #include "legacy/AudioStreamLegacy.h"
 #include "legacy/AudioStreamRecord.h"
+#include "utility/AudioClock.h"
 #include "utility/FixedBlockWriter.h"
 
 using namespace android;
@@ -63,10 +65,6 @@
     size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
                         : builder.getBufferCapacity();
 
-    // TODO implement an unspecified Android format then use that.
-    audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
-            ? AUDIO_FORMAT_PCM_FLOAT
-            : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
     aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -82,6 +80,35 @@
             break;
     }
 
+    // Preserve behavior of API 26
+    if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
+        setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+    }
+
+    // Maybe change device format to get a FAST path.
+    // AudioRecord does not support FAST mode for FLOAT data.
+    // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
+    // So IF the user asks for low latency FLOAT
+    // AND the sampleRate is likely to be compatible with FAST
+    // THEN request I16 and convert to FLOAT when passing to user.
+    // Note that hard coding 48000 Hz is not ideal because the sampleRate
+    // for a FAST path might not be 48000 Hz.
+    // It normally is but there is a chance that it is not.
+    // And there is no reliable way to know that in advance.
+    // Luckily the consequences of a wrong guess are minor.
+    // We just may not get a FAST track.
+    // But we wouldn't have anyway without this hack.
+    constexpr int32_t kMostLikelySampleRateForFast = 48000;
+    if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
+            && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
+            && (samplesPerFrame <= 2) // FAST only for mono and stereo
+            && (getSampleRate() == kMostLikelySampleRateForFast
+                || getSampleRate() == AAUDIO_UNSPECIFIED)) {
+        setDeviceFormat(AAUDIO_FORMAT_PCM_I16);
+    } else {
+        setDeviceFormat(getFormat());
+    }
+
     uint32_t notificationFrames = 0;
 
     // Setup the callback if there is one.
@@ -96,9 +123,6 @@
     }
     mCallbackBufferSize = builder.getFramesPerDataCallback();
 
-    ALOGD("open(), request notificationFrames = %u, frameCount = %u",
-          notificationFrames, (uint)frameCount);
-
     // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
     audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
                                            ? AUDIO_PORT_HANDLE_NONE
@@ -120,39 +144,59 @@
     aaudio_session_id_t requestedSessionId = builder.getSessionId();
     audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
 
-    mAudioRecord = new AudioRecord(
-            mOpPackageName // const String16& opPackageName TODO does not compile
-            );
-    mAudioRecord->set(
-            AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
-            getSampleRate(),
-            format,
-            channelMask,
-            frameCount,
-            callback,
-            callbackData,
-            notificationFrames,
-            false /*threadCanCallJava*/,
-            sessionId,
-            streamTransferType,
-            flags,
-            AUDIO_UID_INVALID, // DEFAULT uid
-            -1,                // DEFAULT pid
-            &attributes,
-            selectedDeviceId
-            );
+    // ----------- open the AudioRecord ---------------------
+    // Might retry, but never more than once.
+    for (int i = 0; i < 2; i ++) {
+        audio_format_t requestedInternalFormat =
+                AAudioConvert_aaudioToAndroidDataFormat(getDeviceFormat());
 
-    // Did we get a valid track?
-    status_t status = mAudioRecord->initCheck();
-    if (status != OK) {
-        close();
-        ALOGE("open(), initCheck() returned %d", status);
-        return AAudioConvert_androidToAAudioResult(status);
+        mAudioRecord = new AudioRecord(
+                mOpPackageName // const String16& opPackageName TODO does not compile
+        );
+        mAudioRecord->set(
+                AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
+                getSampleRate(),
+                requestedInternalFormat,
+                channelMask,
+                frameCount,
+                callback,
+                callbackData,
+                notificationFrames,
+                false /*threadCanCallJava*/,
+                sessionId,
+                streamTransferType,
+                flags,
+                AUDIO_UID_INVALID, // DEFAULT uid
+                -1,                // DEFAULT pid
+                &attributes,
+                selectedDeviceId
+        );
+
+        // Did we get a valid track?
+        status_t status = mAudioRecord->initCheck();
+        if (status != OK) {
+            close();
+            ALOGE("open(), initCheck() returned %d", status);
+            return AAudioConvert_androidToAAudioResult(status);
+        }
+
+        // Check to see if it was worth hacking the deviceFormat.
+        bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
+                           == AUDIO_INPUT_FLAG_FAST;
+        if (getFormat() != getDeviceFormat() && !gotFastPath) {
+            // We tried to get a FAST path by switching the device format.
+            // But it didn't work. So we might as well reopen using the same
+            // format for device and for app.
+            ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
+            mAudioRecord.clear();
+            setDeviceFormat(getFormat());
+        } else {
+            break; // Keep the one we just opened.
+        }
     }
 
     // Get the actual values from the AudioRecord.
     setSamplesPerFrame(mAudioRecord->channelCount());
-    setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
 
     int32_t actualSampleRate = mAudioRecord->getSampleRate();
     ALOGW_IF(actualSampleRate != getSampleRate(),
@@ -169,6 +213,29 @@
         mBlockAdapter = nullptr;
     }
 
+    // Allocate format conversion buffer if needed.
+    if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
+        && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+
+        if (builder.getDataCallbackProc() != nullptr) {
+            // If we have a callback then we need to convert the data into an internal float
+            // array and then pass that entire array to the app.
+            mFormatConversionBufferSizeInFrames =
+                    (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
+                    ? mCallbackBufferSize : getFramesPerBurst();
+            int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
+            mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
+        } else {
+            // If we don't have a callback then we will read into an internal short array
+            // and then convert into the app float array in read().
+            mFormatConversionBufferSizeInFrames = getFramesPerBurst();
+            int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
+            mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
+        }
+        ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
+              __func__, mFormatConversionBufferSizeInFrames);
+    }
+
     // Update performance mode based on the actual stream.
     // For example, if the sample rate does not match native then you won't get a FAST track.
     audio_input_flags_t actualFlags = mAudioRecord->getFlags();
@@ -216,6 +283,24 @@
     return AudioStream::close();
 }
 
+const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+    if (mFormatConversionBufferFloat.get() != nullptr) {
+        LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
+                            "%s() conversion size %d too large for buffer %d",
+                            __func__, numFrames, mFormatConversionBufferSizeInFrames);
+
+        int32_t numSamples = numFrames * getSamplesPerFrame();
+        // Only conversion supported is I16 to FLOAT
+        memcpy_to_float_from_i16(
+                    mFormatConversionBufferFloat.get(),
+                    (const int16_t *) audioData,
+                    numSamples);
+        return mFormatConversionBufferFloat.get();
+    } else {
+        return audioData;
+    }
+}
+
 void AudioStreamRecord::processCallback(int event, void *info) {
     switch (event) {
         case AudioRecord::EVENT_MORE_DATA:
@@ -265,7 +350,7 @@
     mTimestampPosition.set(getFramesRead());
     mAudioRecord->stop();
     mCallbackEnabled.store(false);
-    mFramesRead.reset32();
+    mFramesWritten.reset32(); // service writes frames, service position reset on flush
     mTimestampPosition.reset32();
     // Pass false to prevent errorCallback from being called after disconnect
     // when app has already requested a stop().
@@ -302,9 +387,10 @@
                                       int32_t numFrames,
                                       int64_t timeoutNanoseconds)
 {
-    int32_t bytesPerFrame = getBytesPerFrame();
+    int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
     int32_t numBytes;
-    aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+    // This will detect out of range values for numFrames.
+    aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
     if (result != AAUDIO_OK) {
         return result;
     }
@@ -315,19 +401,49 @@
 
     // TODO add timeout to AudioRecord
     bool blocking = (timeoutNanoseconds > 0);
-    ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
-    if (bytesRead == WOULD_BLOCK) {
+
+    ssize_t bytesActuallyRead = 0;
+    ssize_t totalBytesRead = 0;
+    if (mFormatConversionBufferI16.get() != nullptr) {
+        // Convert I16 data to float using an intermediate buffer.
+        float *floatBuffer = (float *) buffer;
+        int32_t framesLeft = numFrames;
+        // Perform conversion using multiple read()s if necessary.
+        while (framesLeft > 0) {
+            // Read into short internal buffer.
+            int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
+            size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
+            bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
+            if (bytesActuallyRead <= 0) {
+                break;
+            }
+            totalBytesRead += bytesActuallyRead;
+            int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
+            // Convert into app float buffer.
+            size_t numSamples = framesToConvert * getSamplesPerFrame();
+            memcpy_to_float_from_i16(
+                    floatBuffer,
+                    mFormatConversionBufferI16.get(),
+                    numSamples);
+            floatBuffer += numSamples;
+            framesLeft -= framesToConvert;
+        }
+    } else {
+        bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
+        totalBytesRead = bytesActuallyRead;
+    }
+    if (bytesActuallyRead == WOULD_BLOCK) {
         return 0;
-    } else if (bytesRead < 0) {
-        // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
-        // AudioRecord invalidation
-        if (bytesRead == DEAD_OBJECT) {
+    } else if (bytesActuallyRead < 0) {
+        // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
+        // AudioRecord invalidation.
+        if (bytesActuallyRead == DEAD_OBJECT) {
             setState(AAUDIO_STREAM_STATE_DISCONNECTED);
             return AAUDIO_ERROR_DISCONNECTED;
         }
-        return AAudioConvert_androidToAAudioResult(bytesRead);
+        return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
     }
-    int32_t framesRead = (int32_t)(bytesRead / bytesPerFrame);
+    int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
     incrementFramesRead(framesRead);
 
     result = updateStateMachine();
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index c1723ba..2f41d34 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -76,6 +76,8 @@
         return incrementFramesRead(frames);
     }
 
+    const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) override;
+
 private:
     android::sp<android::AudioRecord> mAudioRecord;
     // adapts between variable sized blocks and fixed size blocks
@@ -83,6 +85,11 @@
 
     // TODO add 64-bit position reporting to AudioRecord and use it.
     android::String16                mOpPackageName;
+
+    // Only one type of conversion buffer is used.
+    std::unique_ptr<float[]>         mFormatConversionBufferFloat;
+    std::unique_ptr<int16_t[]>       mFormatConversionBufferI16;
+    int32_t                          mFormatConversionBufferSizeInFrames = 0;
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 023e8af..505cd77 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -181,6 +181,7 @@
     aaudio_format_t aaudioFormat =
             AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
     setFormat(aaudioFormat);
+    setDeviceFormat(aaudioFormat);
 
     int32_t actualSampleRate = mAudioTrack->getSampleRate();
     ALOGW_IF(actualSampleRate != getSampleRate(),
@@ -309,7 +310,7 @@
     setState(AAUDIO_STREAM_STATE_FLUSHING);
     incrementFramesRead(getFramesWritten() - getFramesRead());
     mAudioTrack->flush();
-    mFramesWritten.reset32();
+    mFramesRead.reset32(); // service reads frames, service position reset on flush
     mTimestampPosition.reset32();
     return AAUDIO_OK;
 }
@@ -323,7 +324,7 @@
     setState(AAUDIO_STREAM_STATE_STOPPING);
     incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
     mTimestampPosition.set(getFramesWritten());
-    mFramesWritten.reset32();
+    mFramesRead.reset32(); // service reads frames, service position reset on stop
     mTimestampPosition.reset32();
     mAudioTrack->stop();
     mCallbackEnabled.store(false);
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 4a2a0a8..40ebb76 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -59,10 +59,15 @@
     return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
 }
 
+// Clip to valid range of a float sample to prevent excessive volume.
+// By using fmin and fmax we also protect against NaN.
+static float clipToMinMaxHeadroom(float input) {
+    return fmin(MAX_HEADROOM, fmax(MIN_HEADROOM, input));
+}
+
 static float clipAndClampFloatToPcm16(float sample, float scaler) {
     // Clip to valid range of a float sample to prevent excessive volume.
-    if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
-    else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+    sample = clipToMinMaxHeadroom(sample);
 
     // Scale and convert to a short.
     float fval = sample * scaler;
@@ -127,6 +132,7 @@
     }
 }
 
+
 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
 void AAudio_linearRamp(const float *source,
                        float *destination,
@@ -139,10 +145,8 @@
     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
             float sample = *source++;
-
             // Clip to valid range of a float sample to prevent excessive volume.
-            if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
-            else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+            sample = clipToMinMaxHeadroom(sample);
 
             *destination++ = sample * scaler;
         }
@@ -240,8 +244,7 @@
         float sample = *source++;
 
         // Clip to valid range of a float sample to prevent excessive volume.
-        if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
-        else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+        sample = clipToMinMaxHeadroom(sample);
 
         const float scaler = amplitude1 + (frameIndex * delta);
         float sampleScaled = sample * scaler;
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index beec9e2..68194db 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -160,3 +160,10 @@
         "libutils",
     ],
 }
+
+cc_test {
+    name: "test_atomic_fifo",
+    defaults: ["libaaudio_tests_defaults"],
+    srcs: ["test_atomic_fifo.cpp"],
+    shared_libs: ["libaaudio"],
+}
diff --git a/media/libaaudio/tests/test_atomic_fifo.cpp b/media/libaaudio/tests/test_atomic_fifo.cpp
new file mode 100644
index 0000000..0085217
--- /dev/null
+++ b/media/libaaudio/tests/test_atomic_fifo.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+#include <stdlib.h>
+
+#include "fifo/FifoBuffer.h"
+#include "fifo/FifoController.h"
+
+using android::fifo_frames_t;
+using android::FifoController;
+using android::FifoBuffer;
+using android::WrappingBuffer;
+
+//void foo() {
+TEST(test_fifi_controller, fifo_indices) {
+    // Values are arbitrary primes designed to trigger edge cases.
+    constexpr int capacity = 83;
+    constexpr int threshold = 47;
+    FifoController   fifoController(capacity, threshold);
+    ASSERT_EQ(capacity, fifoController.getCapacity());
+    ASSERT_EQ(threshold, fifoController.getThreshold());
+
+    ASSERT_EQ(0, fifoController.getReadCounter());
+    ASSERT_EQ(0, fifoController.getWriteCounter());
+    ASSERT_EQ(0, fifoController.getFullFramesAvailable());
+    ASSERT_EQ(threshold, fifoController.getEmptyFramesAvailable());
+
+    // Pretend to write some data.
+    constexpr int advance1 = 23;
+    fifoController.advanceWriteIndex(advance1);
+    int advanced = advance1;
+    ASSERT_EQ(0, fifoController.getReadCounter());
+    ASSERT_EQ(0, fifoController.getReadIndex());
+    ASSERT_EQ(advanced, fifoController.getWriteCounter());
+    ASSERT_EQ(advanced, fifoController.getWriteIndex());
+    ASSERT_EQ(advanced, fifoController.getFullFramesAvailable());
+    ASSERT_EQ(threshold - advanced, fifoController.getEmptyFramesAvailable());
+
+    // Pretend to read the data.
+    fifoController.advanceReadIndex(advance1);
+    ASSERT_EQ(advanced, fifoController.getReadCounter());
+    ASSERT_EQ(advanced, fifoController.getReadIndex());
+    ASSERT_EQ(advanced, fifoController.getWriteCounter());
+    ASSERT_EQ(advanced, fifoController.getWriteIndex());
+    ASSERT_EQ(0, fifoController.getFullFramesAvailable());
+    ASSERT_EQ(threshold, fifoController.getEmptyFramesAvailable());
+
+    // Write past end of buffer.
+    constexpr int advance2 = 13 + capacity - advance1;
+    fifoController.advanceWriteIndex(advance2);
+    advanced += advance2;
+    ASSERT_EQ(advance1, fifoController.getReadCounter());
+    ASSERT_EQ(advance1, fifoController.getReadIndex());
+    ASSERT_EQ(advanced, fifoController.getWriteCounter());
+    ASSERT_EQ(advanced - capacity, fifoController.getWriteIndex());
+    ASSERT_EQ(advance2, fifoController.getFullFramesAvailable());
+    ASSERT_EQ(threshold - advance2, fifoController.getEmptyFramesAvailable());
+}
+
+// TODO consider using a template for other data types.
+class TestFifoBuffer {
+public:
+    explicit TestFifoBuffer(fifo_frames_t capacity, fifo_frames_t threshold = 0)
+        : mFifoBuffer(sizeof(int16_t), capacity) {
+        // For reading and writing.
+        mData = new int16_t[capacity];
+        if (threshold <= 0) {
+            threshold = capacity;
+        }
+        mFifoBuffer.setThreshold(threshold);
+        mThreshold = threshold;
+    }
+
+    void checkMisc() {
+        ASSERT_EQ((int32_t)(2 * sizeof(int16_t)), mFifoBuffer.convertFramesToBytes(2));
+        ASSERT_EQ(mThreshold, mFifoBuffer.getThreshold());
+    }
+
+    // Verify that the available frames in each part add up correctly.
+    void checkWrappingBuffer() {
+        WrappingBuffer wrappingBuffer;
+        fifo_frames_t framesAvailable =
+                mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+        fifo_frames_t wrapAvailable = mFifoBuffer.getEmptyRoomAvailable(&wrappingBuffer);
+        EXPECT_EQ(framesAvailable, wrapAvailable);
+        fifo_frames_t bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
+        EXPECT_EQ(framesAvailable, bothAvailable);
+
+        framesAvailable =
+                mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+        wrapAvailable = mFifoBuffer.getFullDataAvailable(&wrappingBuffer);
+        EXPECT_EQ(framesAvailable, wrapAvailable);
+        bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
+        EXPECT_EQ(framesAvailable, bothAvailable);
+    }
+
+    // Write data but do not overflow.
+    void writeData(fifo_frames_t numFrames) {
+        fifo_frames_t framesAvailable =
+                mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+        fifo_frames_t framesToWrite = std::min(framesAvailable, numFrames);
+        for (int i = 0; i < framesToWrite; i++) {
+            mData[i] = mNextWriteIndex++;
+        }
+        fifo_frames_t actual = mFifoBuffer.write(mData, framesToWrite);
+        ASSERT_EQ(framesToWrite, actual);
+    }
+
+    // Read data but do not underflow.
+    void verifyData(fifo_frames_t numFrames) {
+        fifo_frames_t framesAvailable =
+                mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+        fifo_frames_t framesToRead = std::min(framesAvailable, numFrames);
+        fifo_frames_t actual = mFifoBuffer.read(mData, framesToRead);
+        ASSERT_EQ(framesToRead, actual);
+        for (int i = 0; i < framesToRead; i++) {
+            ASSERT_EQ(mNextVerifyIndex++, mData[i]);
+        }
+    }
+
+    // Wrap around the end of the buffer.
+    void checkWrappingWriteRead() {
+        constexpr int frames1 = 43;
+        constexpr int frames2 = 15;
+
+        writeData(frames1);
+        checkWrappingBuffer();
+        verifyData(frames1);
+        checkWrappingBuffer();
+
+        writeData(frames2);
+        checkWrappingBuffer();
+        verifyData(frames2);
+        checkWrappingBuffer();
+    }
+
+    // Write and Read a specific amount of data.
+    void checkWriteRead() {
+        const fifo_frames_t capacity = mFifoBuffer.getBufferCapacityInFrames();
+        // Wrap around with the smaller region in the second half.
+        const int frames1 = capacity - 4;
+        const int frames2 = 7; // arbitrary, small
+        writeData(frames1);
+        verifyData(frames1);
+        writeData(frames2);
+        verifyData(frames2);
+    }
+
+    // Write and Read a specific amount of data.
+    void checkWriteReadSmallLarge() {
+        const fifo_frames_t capacity = mFifoBuffer.getBufferCapacityInFrames();
+        // Wrap around with the larger region in the second half.
+        const int frames1 = capacity - 4;
+        const int frames2 = capacity - 9; // arbitrary, large
+        writeData(frames1);
+        verifyData(frames1);
+        writeData(frames2);
+        verifyData(frames2);
+    }
+
+    // Randomly read or write up to the maximum amount of data.
+    void checkRandomWriteRead() {
+        for (int i = 0; i < 20; i++) {
+            fifo_frames_t framesEmpty =
+                    mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+            fifo_frames_t numFrames = (fifo_frames_t)(drand48() * framesEmpty);
+            writeData(numFrames);
+
+            fifo_frames_t framesFull =
+                    mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+            numFrames = (fifo_frames_t)(drand48() * framesFull);
+            verifyData(numFrames);
+        }
+    }
+
+    FifoBuffer     mFifoBuffer;
+    int16_t       *mData;
+    fifo_frames_t  mNextWriteIndex = 0;
+    fifo_frames_t  mNextVerifyIndex = 0;
+    fifo_frames_t  mThreshold;
+};
+
+TEST(test_fifo_buffer, fifo_read_write) {
+    constexpr int capacity = 51; // arbitrary
+    TestFifoBuffer tester(capacity);
+    tester.checkMisc();
+    tester.checkWriteRead();
+}
+
+TEST(test_fifo_buffer, fifo_wrapping_read_write) {
+    constexpr int capacity = 59; // arbitrary, a little bigger this time
+    TestFifoBuffer tester(capacity);
+    tester.checkWrappingWriteRead();
+}
+
+TEST(test_fifo_buffer, fifo_read_write_small_large) {
+    constexpr int capacity = 51; // arbitrary
+    TestFifoBuffer tester(capacity);
+    tester.checkWriteReadSmallLarge();
+}
+
+TEST(test_fifo_buffer, fifo_random_read_write) {
+    constexpr int capacity = 51; // arbitrary
+    TestFifoBuffer tester(capacity);
+    tester.checkRandomWriteRead();
+}
+
+TEST(test_fifo_buffer, fifo_random_threshold) {
+    constexpr int capacity = 67; // arbitrary
+    constexpr int threshold = 37; // arbitrary
+    TestFifoBuffer tester(capacity, threshold);
+    tester.checkRandomWriteRead();
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 2df37a8..6146c0e 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -49,6 +49,7 @@
         "libaudiomanager",
         "libmedia_helper",
         "libmediametrics",
+        "libmediautils",
     ],
     export_shared_lib_headers: ["libbinder"],
 
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 21d3fa6..f9df5b1 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -99,6 +99,11 @@
     static constexpr char kAudioRecordLatency[] = "android.media.audiorecord.latency";
     static constexpr char kAudioRecordSampleRate[] = "android.media.audiorecord.samplerate";
     static constexpr char kAudioRecordChannelCount[] = "android.media.audiorecord.channels";
+    static constexpr char kAudioRecordCreated[] = "android.media.audiorecord.createdMs";
+    static constexpr char kAudioRecordDuration[] = "android.media.audiorecord.durationMs";
+    static constexpr char kAudioRecordCount[] = "android.media.audiorecord.n";
+    static constexpr char kAudioRecordError[] = "android.media.audiorecord.errcode";
+    static constexpr char kAudioRecordErrorFunction[] = "android.media.audiorecord.errfunc";
 
     // constructor guarantees mAnalyticsItem is valid
 
@@ -109,6 +114,24 @@
                                audioFormatTypeString(record->mFormat).c_str());
     mAnalyticsItem->setCString(kAudioRecordSource,
                                audioSourceString(record->mAttributes.source).c_str());
+
+    // log total duration recording, including anything currently running [and count].
+    nsecs_t active = 0;
+    if (mStartedNs != 0) {
+        active = systemTime() - mStartedNs;
+    }
+    mAnalyticsItem->setInt64(kAudioRecordDuration, (mDurationNs + active) / (1000 * 1000));
+    mAnalyticsItem->setInt32(kAudioRecordCount, mCount);
+
+    // XXX I don't know that this adds a lot of value, long term
+    if (mCreatedNs != 0) {
+        mAnalyticsItem->setInt64(kAudioRecordCreated, mCreatedNs / (1000 * 1000));
+    }
+
+    if (mLastError != NO_ERROR) {
+        mAnalyticsItem->setInt32(kAudioRecordError, mLastError);
+        mAnalyticsItem->setCString(kAudioRecordErrorFunction, mLastErrorFunc.c_str());
+    }
 }
 
 // hand the user a snapshot of the metrics.
@@ -354,6 +377,9 @@
 
 exit:
     mStatus = status;
+    if (status != NO_ERROR) {
+        mMediaMetrics.markError(status, __FUNCTION__);
+    }
     return status;
 }
 
@@ -412,8 +438,14 @@
             get_sched_policy(0, &mPreviousSchedulingGroup);
             androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
         }
+
+        // we've successfully started, log that time
+        mMediaMetrics.logStart(systemTime());
     }
 
+    if (status != NO_ERROR) {
+        mMediaMetrics.markError(status, __FUNCTION__);
+    }
     return status;
 }
 
@@ -438,6 +470,9 @@
         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
         set_sched_policy(0, mPreviousSchedulingGroup);
     }
+
+    // we've successfully started, log that time
+    mMediaMetrics.logStop(systemTime());
 }
 
 bool AudioRecord::stopped() const
@@ -1231,6 +1266,14 @@
     ALOGW("dead IAudioRecord, creating a new one from %s()", from);
     ++mSequence;
 
+    const int INITIAL_RETRIES = 3;
+    int retries = INITIAL_RETRIES;
+retry:
+    if (retries < INITIAL_RETRIES) {
+        // refresh the audio configuration cache in this process to make sure we get new
+        // input parameters and new IAudioRecord in createRecord_l()
+        AudioSystem::clearAudioConfigCache();
+    }
     mFlags = mOrigFlags;
 
     // if the new IAudioRecord is created, createRecord_l() will modify the
@@ -1239,7 +1282,11 @@
     Modulo<uint32_t> position(mProxy->getPosition());
     mNewPosition = position + mUpdatePeriod;
     status_t result = createRecord_l(position, mOpPackageName);
-    if (result == NO_ERROR) {
+
+    if (result != NO_ERROR) {
+        ALOGW("%s(): createRecord_l failed, do not retry", __func__);
+        retries = 0;
+    } else {
         if (mActive) {
             // callback thread or sync event hasn't changed
             // FIXME this fails if we have a new AudioFlinger instance
@@ -1248,6 +1295,14 @@
         }
         mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
     }
+
+    if (result != NO_ERROR) {
+        ALOGW("%s() failed status %d, retries %d", __func__, result, retries);
+        if (--retries > 0) {
+            goto retry;
+        }
+    }
+
     if (result != NO_ERROR) {
         ALOGW("restoreRecord_l() failed status %d", result);
         mActive = false;
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 1c4a80e..c072901 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -20,6 +20,7 @@
 #include <utils/Log.h>
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
+#include <binder/IPCThreadState.h>
 #include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
 #include <media/IAudioFlinger.h>
@@ -75,7 +76,9 @@
         af = gAudioFlinger;
     }
     if (afc != 0) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
         af->registerClient(afc);
+        IPCThreadState::self()->restoreCallingIdentity(token);
     }
     return af;
 }
@@ -767,7 +770,10 @@
         ap = gAudioPolicyService;
     }
     if (apc != 0) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
         ap->registerClient(apc);
+        ap->setAudioPortCallbacksEnabled(apc->isAudioPortCbEnabled());
+        IPCThreadState::self()->restoreCallingIdentity(token);
     }
 
     return ap;
@@ -1280,6 +1286,24 @@
     return af->getMicrophones(microphones);
 }
 
+status_t AudioSystem::getSurroundFormats(unsigned int *numSurroundFormats,
+                                         audio_format_t *surroundFormats,
+                                         bool *surroundFormatsEnabled,
+                                         bool reported)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->getSurroundFormats(
+            numSurroundFormats, surroundFormats, surroundFormatsEnabled, reported);
+}
+
+status_t AudioSystem::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->setSurroundFormatEnabled(audioFormat, enabled);
+}
+
 // ---------------------------------------------------------------------------
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 86791c2..ab9efe8 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -806,7 +806,7 @@
         return;
     }
     AutoMutex lock(mLock);
-    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
+    if (mState == STATE_ACTIVE) {
         return;
     }
     flush_l();
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index b4c179d..a018b22 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -725,12 +725,13 @@
         const size_t mask = overflowBit - 1;
         int32_t newRear = (rear & ~mask) | (stop & mask);
         ssize_t filled = newRear - front;
-        if (filled < 0) {
+        // overflowBit is unsigned, so cast to signed for comparison.
+        if (filled >= (ssize_t)overflowBit) {
             // front and rear offsets span the overflow bit of the p2 mask
-            // so rebasing newrear.
+            // so rebasing newRear on the rear offset is off by the overflow bit.
             ALOGV("stop wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
-            newRear += overflowBit;
-            filled += overflowBit;
+            newRear -= overflowBit;
+            filled -= overflowBit;
         }
         if (0 <= filled && (size_t) filled <= mFrameCount) {
             // we're stopped, return the stop level as newRear
@@ -1230,6 +1231,21 @@
     return 0;
 }
 
+__attribute__((no_sanitize("integer")))
+size_t AudioRecordServerProxy::framesReadySafe() const
+{
+    if (mIsShutdown) {
+        return 0;
+    }
+    const int32_t front = android_atomic_acquire_load(&mCblk->u.mStreaming.mFront);
+    const int32_t rear = mCblk->u.mStreaming.mRear;
+    const ssize_t filled = rear - front;
+    if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+        return 0; // error condition, silently return 0.
+    }
+    return filled;
+}
+
 // ---------------------------------------------------------------------------
 
 }   // namespace android
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 77cfe4d..37c62a8 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -25,8 +25,7 @@
 #include <binder/IPCThreadState.h>
 #include <binder/Parcel.h>
 #include <media/TimeCheck.h>
-#include <private/android_filesystem_config.h>
-
+#include <mediautils/ServiceUtilities.h>
 #include "IAudioFlinger.h"
 
 namespace android {
@@ -870,7 +869,6 @@
     switch (code) {
         case SET_STREAM_VOLUME:
         case SET_STREAM_MUTE:
-        case SET_MODE:
         case OPEN_OUTPUT:
         case OPEN_DUPLICATE_OUTPUT:
         case CLOSE_OUTPUT:
@@ -891,7 +889,15 @@
         case SET_RECORD_SILENCED:
             ALOGW("%s: transaction %d received from PID %d",
                   __func__, code, IPCThreadState::self()->getCallingPid());
-            return INVALID_OPERATION;
+            // return status only for non void methods
+            switch (code) {
+                case SET_RECORD_SILENCED:
+                    break;
+                default:
+                    reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                    break;
+            }
+            return OK;
         default:
             break;
     }
@@ -904,12 +910,19 @@
         case SET_MIC_MUTE:
         case SET_LOW_RAM_DEVICE:
         case SYSTEM_READY: {
-            uid_t multiUserClientUid = IPCThreadState::self()->getCallingUid() % AID_USER_OFFSET;
-            if (multiUserClientUid >= AID_APP_START) {
+            if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
                       IPCThreadState::self()->getCallingUid());
-                return INVALID_OPERATION;
+                // return status only for non void methods
+                switch (code) {
+                    case SYSTEM_READY:
+                        break;
+                    default:
+                        reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                        break;
+                }
+                return OK;
             }
         } break;
         default:
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index a49b2cb..316105c 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -24,11 +24,10 @@
 
 #include <binder/IPCThreadState.h>
 #include <binder/Parcel.h>
-
 #include <media/AudioEffect.h>
 #include <media/IAudioPolicyService.h>
 #include <media/TimeCheck.h>
-#include <private/android_filesystem_config.h>
+#include <mediautils/ServiceUtilities.h>
 #include <system/audio.h>
 
 namespace android {
@@ -80,7 +79,9 @@
     SET_AUDIO_PORT_CALLBACK_ENABLED,
     SET_MASTER_MONO,
     GET_MASTER_MONO,
-    GET_STREAM_VOLUME_DB
+    GET_STREAM_VOLUME_DB,
+    GET_SURROUND_FORMATS,
+    SET_SURROUND_FORMAT_ENABLED
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -829,6 +830,54 @@
         }
         return reply.readFloat();
     }
+
+    virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+                                        audio_format_t *surroundFormats,
+                                        bool *surroundFormatsEnabled,
+                                        bool reported)
+    {
+        if (numSurroundFormats == NULL || (*numSurroundFormats != 0 &&
+                (surroundFormats == NULL || surroundFormatsEnabled == NULL))) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        unsigned int numSurroundFormatsReq = *numSurroundFormats;
+        data.writeUint32(numSurroundFormatsReq);
+        data.writeBool(reported);
+        status_t status = remote()->transact(GET_SURROUND_FORMATS, data, &reply);
+        if (status == NO_ERROR && (status = (status_t)reply.readInt32()) == NO_ERROR) {
+            *numSurroundFormats = reply.readUint32();
+        }
+        if (status == NO_ERROR) {
+            if (numSurroundFormatsReq > *numSurroundFormats) {
+                numSurroundFormatsReq = *numSurroundFormats;
+            }
+            if (numSurroundFormatsReq > 0) {
+                status = reply.read(surroundFormats,
+                                    numSurroundFormatsReq * sizeof(audio_format_t));
+                if (status != NO_ERROR) {
+                    return status;
+                }
+                status = reply.read(surroundFormatsEnabled,
+                                    numSurroundFormatsReq * sizeof(bool));
+            }
+        }
+        return status;
+    }
+
+    virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(audioFormat);
+        data.writeBool(enabled);
+        status_t status = remote()->transact(SET_SURROUND_FORMAT_ENABLED, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -857,7 +906,16 @@
         case RELEASE_SOUNDTRIGGER_SESSION:
             ALOGW("%s: transaction %d received from PID %d",
                   __func__, code, IPCThreadState::self()->getCallingPid());
-            return INVALID_OPERATION;
+            // return status only for non void methods
+            switch (code) {
+                case RELEASE_OUTPUT:
+                case RELEASE_INPUT:
+                    break;
+                default:
+                    reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                    break;
+            }
+            return OK;
         default:
             break;
     }
@@ -867,38 +925,29 @@
         case SET_DEVICE_CONNECTION_STATE:
         case HANDLE_DEVICE_CONFIG_CHANGE:
         case SET_PHONE_STATE:
-        case SET_RINGER_MODE:
-        case SET_FORCE_USE:
+//FIXME: Allow SET_FORCE_USE calls from system apps until a better use case routing API is available
+//      case SET_FORCE_USE:
         case INIT_STREAM_VOLUME:
         case SET_STREAM_VOLUME:
         case REGISTER_POLICY_MIXES:
-        case SET_MASTER_MONO: {
-            uid_t multiUserClientUid = IPCThreadState::self()->getCallingUid() % AID_USER_OFFSET;
-            if (multiUserClientUid >= AID_APP_START) {
+        case SET_MASTER_MONO:
+        case START_AUDIO_SOURCE:
+        case STOP_AUDIO_SOURCE:
+        case GET_SURROUND_FORMATS:
+        case SET_SURROUND_FORMAT_ENABLED: {
+            if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
                       IPCThreadState::self()->getCallingUid());
-                return INVALID_OPERATION;
+                reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                return OK;
             }
         } break;
         default:
             break;
     }
 
-    // FIXME: extend timeout for SET_DEVICE_CONNECTION_STATE and HANDLE_DEVICE_CONFIG_CHANGE
-    // while we investigate why BT A2DP device connection/disconnection can sometimes
-    // take more than 5 seconds
-    uint32_t timeoutMs = TimeCheck::kDefaultTimeOutMs;
-    switch (code) {
-        case SET_DEVICE_CONNECTION_STATE:
-        case HANDLE_DEVICE_CONFIG_CHANGE:
-            timeoutMs *= 2;
-            break;
-        default:
-            break;
-    }
-
-    TimeCheck check("IAudioPolicyService", timeoutMs);
+    TimeCheck check("IAudioPolicyService");
 
     switch (code) {
         case SET_DEVICE_CONNECTION_STATE: {
@@ -989,7 +1038,7 @@
 
         case GET_OUTPUT_FOR_ATTR: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_attributes_t attr;
+            audio_attributes_t attr = {};
             bool hasAttributes = data.readInt32() != 0;
             if (hasAttributes) {
                 data.read(&attr, sizeof(audio_attributes_t));
@@ -1058,7 +1107,7 @@
 
         case GET_INPUT_FOR_ATTR: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_attributes_t attr;
+            audio_attributes_t attr = {};
             data.read(&attr, sizeof(audio_attributes_t));
             sanetizeAudioAttributes(&attr);
             audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
@@ -1160,8 +1209,11 @@
 
         case GET_OUTPUT_FOR_EFFECT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            effect_descriptor_t desc;
-            data.read(&desc, sizeof(effect_descriptor_t));
+            effect_descriptor_t desc = {};
+            if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+                android_errorWriteLog(0x534e4554, "73126106");
+            }
+            (void)sanitizeEffectDescriptor(&desc);
             audio_io_handle_t output = getOutputForEffect(&desc);
             reply->writeInt32(static_cast <int>(output));
             return NO_ERROR;
@@ -1169,8 +1221,11 @@
 
         case REGISTER_EFFECT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            effect_descriptor_t desc;
-            data.read(&desc, sizeof(effect_descriptor_t));
+            effect_descriptor_t desc = {};
+            if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+                android_errorWriteLog(0x534e4554, "73126106");
+            }
+            (void)sanitizeEffectDescriptor(&desc);
             audio_io_handle_t io = data.readInt32();
             uint32_t strategy = data.readInt32();
             audio_session_t session = (audio_session_t) data.readInt32();
@@ -1229,7 +1284,7 @@
                 count = AudioEffect::kMaxPreProcessing;
             }
             uint32_t retCount = count;
-            effect_descriptor_t *descriptors = new effect_descriptor_t[count];
+            effect_descriptor_t *descriptors = new effect_descriptor_t[count]{};
             status_t status = queryDefaultPreProcessing(audioSession, descriptors, &retCount);
             reply->writeInt32(status);
             if (status != NO_ERROR && status != NO_MEMORY) {
@@ -1248,7 +1303,7 @@
 
         case IS_OFFLOAD_SUPPORTED: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_offload_info_t info;
+            audio_offload_info_t info = {};
             data.read(&info, sizeof(audio_offload_info_t));
             bool isSupported = isOffloadSupported(info);
             reply->writeInt32(isSupported);
@@ -1303,7 +1358,7 @@
 
         case CREATE_AUDIO_PATCH: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            struct audio_patch patch;
+            struct audio_patch patch = {};
             data.read(&patch, sizeof(struct audio_patch));
             audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
             if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
@@ -1319,7 +1374,7 @@
 
         case RELEASE_AUDIO_PATCH: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_patch_handle_t handle;
+            audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
             data.read(&handle, sizeof(audio_patch_handle_t));
             status_t status = releaseAudioPatch(handle);
             reply->writeInt32(status);
@@ -1358,8 +1413,9 @@
 
         case SET_AUDIO_PORT_CONFIG: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            struct audio_port_config config;
+            struct audio_port_config config = {};
             data.read(&config, sizeof(struct audio_port_config));
+            (void)sanitizeAudioPortConfig(&config);
             status_t status = setAudioPortConfig(&config);
             reply->writeInt32(status);
             return NO_ERROR;
@@ -1433,9 +1489,10 @@
 
         case START_AUDIO_SOURCE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            struct audio_port_config source;
+            struct audio_port_config source = {};
             data.read(&source, sizeof(struct audio_port_config));
-            audio_attributes_t attributes;
+            (void)sanitizeAudioPortConfig(&source);
+            audio_attributes_t attributes = {};
             data.read(&attributes, sizeof(audio_attributes_t));
             sanetizeAudioAttributes(&attributes);
             audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
@@ -1483,11 +1540,63 @@
             return NO_ERROR;
         }
 
+        case GET_SURROUND_FORMATS: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            unsigned int numSurroundFormatsReq = data.readUint32();
+            if (numSurroundFormatsReq > MAX_ITEMS_PER_LIST) {
+                numSurroundFormatsReq = MAX_ITEMS_PER_LIST;
+            }
+            bool reported = data.readBool();
+            unsigned int numSurroundFormats = numSurroundFormatsReq;
+            audio_format_t *surroundFormats = (audio_format_t *)calloc(
+                    numSurroundFormats, sizeof(audio_format_t));
+            bool *surroundFormatsEnabled = (bool *)calloc(numSurroundFormats, sizeof(bool));
+            if (numSurroundFormatsReq > 0 &&
+                    (surroundFormats == NULL || surroundFormatsEnabled == NULL)) {
+                free(surroundFormats);
+                free(surroundFormatsEnabled);
+                reply->writeInt32(NO_MEMORY);
+                return NO_ERROR;
+            }
+            status_t status = getSurroundFormats(
+                    &numSurroundFormats, surroundFormats, surroundFormatsEnabled, reported);
+            reply->writeInt32(status);
+
+            if (status == NO_ERROR) {
+                reply->writeUint32(numSurroundFormats);
+                if (numSurroundFormatsReq > numSurroundFormats) {
+                    numSurroundFormatsReq = numSurroundFormats;
+                }
+                reply->write(surroundFormats, numSurroundFormatsReq * sizeof(audio_format_t));
+                reply->write(surroundFormatsEnabled, numSurroundFormatsReq * sizeof(bool));
+            }
+            free(surroundFormats);
+            free(surroundFormatsEnabled);
+            return NO_ERROR;
+        }
+
+        case SET_SURROUND_FORMAT_ENABLED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_format_t audioFormat = (audio_format_t) data.readInt32();
+            bool enabled = data.readBool();
+            status_t status = setSurroundFormatEnabled(audioFormat, enabled);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
 }
 
+/** returns true if string overflow was prevented by zero termination */
+template <size_t size>
+static bool preventStringOverflow(char (&s)[size]) {
+    if (strnlen(s, size) < size) return false;
+    s[size - 1] = '\0';
+    return true;
+}
+
 void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
 {
     const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
@@ -1497,6 +1606,27 @@
     attr->tags[tagsMaxSize - 1] = '\0';
 }
 
+/** returns BAD_VALUE if sanitization was required. */
+status_t BnAudioPolicyService::sanitizeEffectDescriptor(effect_descriptor_t* desc)
+{
+    if (preventStringOverflow(desc->name)
+        | /* always */ preventStringOverflow(desc->implementor)) {
+        android_errorWriteLog(0x534e4554, "73126106"); // SafetyNet logging
+        return BAD_VALUE;
+    }
+    return NO_ERROR;
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t BnAudioPolicyService::sanitizeAudioPortConfig(struct audio_port_config* config)
+{
+    if (config->type == AUDIO_PORT_TYPE_DEVICE &&
+        preventStringOverflow(config->ext.device.address)) {
+        return BAD_VALUE;
+    }
+    return NO_ERROR;
+}
+
 // ----------------------------------------------------------------------------
 
 } // namespace android
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
index 59ac1db..24837e3 100644
--- a/media/libaudioclient/include/media/AudioParameter.h
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -64,6 +64,9 @@
     static const char * const keyPresentationId;
     static const char * const keyProgramId;
 
+    //  keyAudioLanguagePreferred: Preferred audio language
+    static const char * const keyAudioLanguagePreferred;
+
     //  keyStreamConnect / Disconnect: value is an int in audio_devices_t
     static const char * const keyStreamConnect;
     static const char * const keyStreamDisconnect;
@@ -81,6 +84,11 @@
 
     static const char * const valueListSeparator;
 
+    // keyReconfigA2dp: Ask HwModule to reconfigure A2DP offloaded codec
+    // keyReconfigA2dpSupported: Query if HwModule supports A2DP offload codec config
+    static const char * const keyReconfigA2dp;
+    static const char * const keyReconfigA2dpSupported;
+
     String8 toString() const { return toStringImpl(true); }
     String8 keysToString() const { return toStringImpl(false); }
 
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index c07c397..cf446a5 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -704,7 +704,10 @@
 private:
     class MediaMetrics {
       public:
-        MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiorecord")) {
+        MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiorecord")),
+                         mCreatedNs(systemTime(SYSTEM_TIME_REALTIME)),
+                         mStartedNs(0), mDurationNs(0), mCount(0),
+                         mLastError(NO_ERROR) {
         }
         ~MediaMetrics() {
             // mAnalyticsItem alloc failure will be flagged in the constructor
@@ -715,8 +718,20 @@
         }
         void gather(const AudioRecord *record);
         MediaAnalyticsItem *dup() { return mAnalyticsItem->dup(); }
+
+        void logStart(nsecs_t when) { mStartedNs = when; mCount++; }
+        void logStop(nsecs_t when) { mDurationNs += (when-mStartedNs); mStartedNs = 0;}
+        void markError(status_t errcode, const char *func)
+                 { mLastError = errcode; mLastErrorFunc = func;}
       private:
         std::unique_ptr<MediaAnalyticsItem> mAnalyticsItem;
+        nsecs_t mCreatedNs;     // XXX: perhaps not worth it in production
+        nsecs_t mStartedNs;
+        nsecs_t mDurationNs;
+        int32_t mCount;
+
+        status_t mLastError;
+        std::string mLastErrorFunc;
     };
     MediaMetrics mMediaMetrics;
 };
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 22b700d..4c0f796 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -340,6 +340,15 @@
 
     static status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
+    // numSurroundFormats holds the maximum number of formats and bool value allowed in the array.
+    // When numSurroundFormats is 0, surroundFormats and surroundFormatsEnabled will not be
+    // populated. The actual number of surround formats should be returned at numSurroundFormats.
+    static status_t getSurroundFormats(unsigned int *numSurroundFormats,
+                                       audio_format_t *surroundFormats,
+                                       bool *surroundFormatsEnabled,
+                                       bool reported);
+    static status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
@@ -431,6 +440,7 @@
 
         int addAudioPortCallback(const sp<AudioPortCallback>& callback);
         int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+        bool isAudioPortCbEnabled() const { return (mAudioPortCallbacks.size() != 0); }
 
         // DeathRecipient
         virtual void binderDied(const wp<IBinder>& who);
diff --git a/media/libaudioclient/include/media/AudioTimestamp.h b/media/libaudioclient/include/media/AudioTimestamp.h
index 498de8e..e5925dd 100644
--- a/media/libaudioclient/include/media/AudioTimestamp.h
+++ b/media/libaudioclient/include/media/AudioTimestamp.h
@@ -135,8 +135,23 @@
         return INVALID_OPERATION;
     }
 
+    double getOutputServerLatencyMs(uint32_t sampleRate) const {
+        return getLatencyMs(sampleRate, LOCATION_SERVER, LOCATION_KERNEL);
+    }
+
+    double getLatencyMs(uint32_t sampleRate, Location location1, Location location2) const {
+        if (sampleRate > 0 && mTimeNs[location1] > 0 && mTimeNs[location2] > 0) {
+            const int64_t frameDifference =
+                    mPosition[location1] - mPosition[location2];
+            const int64_t timeDifferenceNs =
+                    mTimeNs[location1] - mTimeNs[location2];
+            return ((double)frameDifference * 1e9 / sampleRate - timeDifferenceNs) * 1e-6;
+        }
+        return 0.;
+    }
+
     // convert fields to a printable string
-    std::string toString() {
+    std::string toString() const {
         std::stringstream ss;
 
         ss << "BOOTTIME offset " << mTimebaseOffset[TIMEBASE_BOOTTIME] << "\n";
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 949d593..c3876af 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -166,6 +166,12 @@
     virtual status_t getMasterMono(bool *mono) = 0;
     virtual float    getStreamVolumeDB(
             audio_stream_type_t stream, int index, audio_devices_t device) = 0;
+
+    virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+                                        audio_format_t *surroundFormats,
+                                        bool *surroundFormatsEnabled,
+                                        bool reported) = 0;
+    virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
 };
 
 
@@ -180,6 +186,8 @@
                                     uint32_t flags = 0);
 private:
     void sanetizeAudioAttributes(audio_attributes_t* attr);
+    status_t sanitizeEffectDescriptor(effect_descriptor_t* desc);
+    status_t sanitizeAudioPortConfig(struct audio_port_config* config);
 };
 
 // ----------------------------------------------------------------------------
diff --git a/media/libaudiohal/2.0/Android.bp b/media/libaudiohal/2.0/Android.bp
deleted file mode 100644
index 574b435..0000000
--- a/media/libaudiohal/2.0/Android.bp
+++ /dev/null
@@ -1,54 +0,0 @@
-cc_library_shared {
-    name: "libaudiohal@2.0",
-
-    srcs: [
-        "DeviceHalLocal.cpp",
-        "DevicesFactoryHalHybrid.cpp",
-        "DevicesFactoryHalLocal.cpp",
-        "StreamHalLocal.cpp",
-
-        "ConversionHelperHidl.cpp",
-        "DeviceHalHidl.cpp",
-        "DevicesFactoryHalHidl.cpp",
-        "EffectBufferHalHidl.cpp",
-        "EffectHalHidl.cpp",
-        "EffectsFactoryHalHidl.cpp",
-        "StreamHalHidl.cpp",
-    ],
-
-    export_include_dirs: ["."],
-
-    cflags: [
-        "-Wall",
-        "-Werror",
-    ],
-    shared_libs: [
-        "libaudiohal_deathhandler",
-        "libaudioutils",
-        "libcutils",
-        "liblog",
-        "libutils",
-        "libhardware",
-        "libbase",
-        "libfmq",
-        "libhwbinder",
-        "libhidlbase",
-        "libhidlmemory",
-        "libhidltransport",
-        "android.hardware.audio@2.0",
-        "android.hardware.audio.common@2.0",
-        "android.hardware.audio.common@2.0-util",
-        "android.hardware.audio.effect@2.0",
-        "android.hidl.allocator@1.0",
-        "android.hidl.memory@1.0",
-        "libmedia_helper",
-        "libmediautils",
-    ],
-    header_libs: [
-        "libaudiohal_headers"
-    ],
-
-    export_shared_lib_headers: [
-        "libfmq",
-    ],
-}
diff --git a/media/libaudiohal/2.0/ConversionHelperHidl.cpp b/media/libaudiohal/2.0/ConversionHelperHidl.cpp
deleted file mode 100644
index f60bf8b..0000000
--- a/media/libaudiohal/2.0/ConversionHelperHidl.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "HalHidl"
-#include <media/AudioParameter.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-
-using ::android::hardware::audio::V2_0::Result;
-
-namespace android {
-
-// static
-status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
-    AudioParameter halKeys(keys);
-    if (halKeys.size() == 0) return BAD_VALUE;
-    hidlKeys->resize(halKeys.size());
-    //FIXME:  keyStreamSupportedChannels and keyStreamSupportedSamplingRates come with a
-    // "keyFormat=<value>" pair. We need to transform it into a single key string so that it is
-    // carried over to the legacy HAL via HIDL.
-    String8 value;
-    bool keepFormatValue = halKeys.size() == 2 &&
-         (halKeys.get(String8(AudioParameter::keyStreamSupportedChannels), value) == NO_ERROR ||
-         halKeys.get(String8(AudioParameter::keyStreamSupportedSamplingRates), value) == NO_ERROR);
-
-    for (size_t i = 0; i < halKeys.size(); ++i) {
-        String8 key;
-        status_t status = halKeys.getAt(i, key);
-        if (status != OK) return status;
-        if (keepFormatValue && key == AudioParameter::keyFormat) {
-            AudioParameter formatParam;
-            halKeys.getAt(i, key, value);
-            formatParam.add(key, value);
-            key = formatParam.toString();
-        }
-        (*hidlKeys)[i] = key.string();
-    }
-    return OK;
-}
-
-// static
-status_t ConversionHelperHidl::parametersFromHal(
-        const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams) {
-    AudioParameter params(kvPairs);
-    if (params.size() == 0) return BAD_VALUE;
-    hidlParams->resize(params.size());
-    for (size_t i = 0; i < params.size(); ++i) {
-        String8 key, value;
-        status_t status = params.getAt(i, key, value);
-        if (status != OK) return status;
-        (*hidlParams)[i].key = key.string();
-        (*hidlParams)[i].value = value.string();
-    }
-    return OK;
-}
-
-// static
-void ConversionHelperHidl::parametersToHal(
-        const hidl_vec<ParameterValue>& parameters, String8 *values) {
-    AudioParameter params;
-    for (size_t i = 0; i < parameters.size(); ++i) {
-        params.add(String8(parameters[i].key.c_str()), String8(parameters[i].value.c_str()));
-    }
-    values->setTo(params.toString());
-}
-
-ConversionHelperHidl::ConversionHelperHidl(const char* className)
-        : mClassName(className) {
-}
-
-// static
-status_t ConversionHelperHidl::analyzeResult(const Result& result) {
-    switch (result) {
-        case Result::OK: return OK;
-        case Result::INVALID_ARGUMENTS: return BAD_VALUE;
-        case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
-        case Result::NOT_INITIALIZED: return NO_INIT;
-        case Result::NOT_SUPPORTED: return INVALID_OPERATION;
-        default: return NO_INIT;
-    }
-}
-
-void ConversionHelperHidl::emitError(const char* funcName, const char* description) {
-    ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
-}
-
-}  // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.cpp b/media/libaudiohal/2.0/DeviceHalHidl.cpp
deleted file mode 100644
index 5b99d70..0000000
--- a/media/libaudiohal/2.0/DeviceHalHidl.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-
-#define LOG_TAG "DeviceHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IPrimaryDevice.h>
-#include <cutils/native_handle.h>
-#include <hwbinder/IPCThreadState.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "HidlUtils.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioConfig;
-using ::android::hardware::audio::common::V2_0::AudioDevice;
-using ::android::hardware::audio::common::V2_0::AudioInputFlag;
-using ::android::hardware::audio::common::V2_0::AudioOutputFlag;
-using ::android::hardware::audio::common::V2_0::AudioPatchHandle;
-using ::android::hardware::audio::common::V2_0::AudioPort;
-using ::android::hardware::audio::common::V2_0::AudioPortConfig;
-using ::android::hardware::audio::common::V2_0::AudioMode;
-using ::android::hardware::audio::common::V2_0::AudioSource;
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::V2_0::DeviceAddress;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-
-namespace android {
-
-namespace {
-
-status_t deviceAddressFromHal(
-        audio_devices_t device, const char* halAddress, DeviceAddress* address) {
-    address->device = AudioDevice(device);
-
-    if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
-        return OK;
-    }
-    const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
-    if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
-    if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
-            || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
-        int status = sscanf(halAddress,
-                "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
-                &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
-                &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
-        return status == 6 ? OK : BAD_VALUE;
-    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
-            || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
-        int status = sscanf(halAddress,
-                "%hhu.%hhu.%hhu.%hhu",
-                &address->address.ipv4[0], &address->address.ipv4[1],
-                &address->address.ipv4[2], &address->address.ipv4[3]);
-        return status == 4 ? OK : BAD_VALUE;
-    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
-            || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
-        int status = sscanf(halAddress,
-                "card=%d;device=%d",
-                &address->address.alsa.card, &address->address.alsa.device);
-        return status == 2 ? OK : BAD_VALUE;
-    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
-            || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
-        if (halAddress != NULL) {
-            address->busAddress = halAddress;
-            return OK;
-        }
-        return BAD_VALUE;
-    } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
-            || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
-        if (halAddress != NULL) {
-            address->rSubmixAddress = halAddress;
-            return OK;
-        }
-        return BAD_VALUE;
-    }
-    return OK;
-}
-
-}  // namespace
-
-DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
-        : ConversionHelperHidl("Device"), mDevice(device),
-          mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
-}
-
-DeviceHalHidl::~DeviceHalHidl() {
-    if (mDevice != 0) {
-        mDevice.clear();
-        hardware::IPCThreadState::self()->flushCommands();
-    }
-}
-
-status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
-    // Obsolete.
-    return INVALID_OPERATION;
-}
-
-status_t DeviceHalHidl::initCheck() {
-    if (mDevice == 0) return NO_INIT;
-    return processReturn("initCheck", mDevice->initCheck());
-}
-
-status_t DeviceHalHidl::setVoiceVolume(float volume) {
-    if (mDevice == 0) return NO_INIT;
-    if (mPrimaryDevice == 0) return INVALID_OPERATION;
-    return processReturn("setVoiceVolume", mPrimaryDevice->setVoiceVolume(volume));
-}
-
-status_t DeviceHalHidl::setMasterVolume(float volume) {
-    if (mDevice == 0) return NO_INIT;
-    if (mPrimaryDevice == 0) return INVALID_OPERATION;
-    return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
-}
-
-status_t DeviceHalHidl::getMasterVolume(float *volume) {
-    if (mDevice == 0) return NO_INIT;
-    if (mPrimaryDevice == 0) return INVALID_OPERATION;
-    Result retval;
-    Return<void> ret = mPrimaryDevice->getMasterVolume(
-            [&](Result r, float v) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *volume = v;
-                }
-            });
-    return processReturn("getMasterVolume", ret, retval);
-}
-
-status_t DeviceHalHidl::setMode(audio_mode_t mode) {
-    if (mDevice == 0) return NO_INIT;
-    if (mPrimaryDevice == 0) return INVALID_OPERATION;
-    return processReturn("setMode", mPrimaryDevice->setMode(AudioMode(mode)));
-}
-
-status_t DeviceHalHidl::setMicMute(bool state) {
-    if (mDevice == 0) return NO_INIT;
-    return processReturn("setMicMute", mDevice->setMicMute(state));
-}
-
-status_t DeviceHalHidl::getMicMute(bool *state) {
-    if (mDevice == 0) return NO_INIT;
-    Result retval;
-    Return<void> ret = mDevice->getMicMute(
-            [&](Result r, bool mute) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *state = mute;
-                }
-            });
-    return processReturn("getMicMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setMasterMute(bool state) {
-    if (mDevice == 0) return NO_INIT;
-    return processReturn("setMasterMute", mDevice->setMasterMute(state));
-}
-
-status_t DeviceHalHidl::getMasterMute(bool *state) {
-    if (mDevice == 0) return NO_INIT;
-    Result retval;
-    Return<void> ret = mDevice->getMasterMute(
-            [&](Result r, bool mute) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *state = mute;
-                }
-            });
-    return processReturn("getMasterMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
-    if (mDevice == 0) return NO_INIT;
-    hidl_vec<ParameterValue> hidlParams;
-    status_t status = parametersFromHal(kvPairs, &hidlParams);
-    if (status != OK) return status;
-    return processReturn("setParameters", mDevice->setParameters(hidlParams));
-}
-
-status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
-    values->clear();
-    if (mDevice == 0) return NO_INIT;
-    hidl_vec<hidl_string> hidlKeys;
-    status_t status = keysFromHal(keys, &hidlKeys);
-    if (status != OK) return status;
-    Result retval;
-    Return<void> ret = mDevice->getParameters(
-            hidlKeys,
-            [&](Result r, const hidl_vec<ParameterValue>& parameters) {
-                retval = r;
-                if (retval == Result::OK) {
-                    parametersToHal(parameters, values);
-                }
-            });
-    return processReturn("getParameters", ret, retval);
-}
-
-status_t DeviceHalHidl::getInputBufferSize(
-        const struct audio_config *config, size_t *size) {
-    if (mDevice == 0) return NO_INIT;
-    AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
-    Result retval;
-    Return<void> ret = mDevice->getInputBufferSize(
-            hidlConfig,
-            [&](Result r, uint64_t bufferSize) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *size = static_cast<size_t>(bufferSize);
-                }
-            });
-    return processReturn("getInputBufferSize", ret, retval);
-}
-
-status_t DeviceHalHidl::openOutputStream(
-        audio_io_handle_t handle,
-        audio_devices_t devices,
-        audio_output_flags_t flags,
-        struct audio_config *config,
-        const char *address,
-        sp<StreamOutHalInterface> *outStream) {
-    if (mDevice == 0) return NO_INIT;
-    DeviceAddress hidlDevice;
-    status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
-    if (status != OK) return status;
-    AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mDevice->openOutputStream(
-            handle,
-            hidlDevice,
-            hidlConfig,
-            AudioOutputFlag(flags),
-            [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *outStream = new StreamOutHalHidl(result);
-                }
-                HidlUtils::audioConfigToHal(suggestedConfig, config);
-            });
-    return processReturn("openOutputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::openInputStream(
-        audio_io_handle_t handle,
-        audio_devices_t devices,
-        struct audio_config *config,
-        audio_input_flags_t flags,
-        const char *address,
-        audio_source_t source,
-        sp<StreamInHalInterface> *inStream) {
-    if (mDevice == 0) return NO_INIT;
-    DeviceAddress hidlDevice;
-    status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
-    if (status != OK) return status;
-    AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mDevice->openInputStream(
-            handle,
-            hidlDevice,
-            hidlConfig,
-            AudioInputFlag(flags),
-            AudioSource(source),
-            [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *inStream = new StreamInHalHidl(result);
-                }
-                HidlUtils::audioConfigToHal(suggestedConfig, config);
-            });
-    return processReturn("openInputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
-    if (mDevice == 0) return NO_INIT;
-    return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
-}
-
-status_t DeviceHalHidl::createAudioPatch(
-        unsigned int num_sources,
-        const struct audio_port_config *sources,
-        unsigned int num_sinks,
-        const struct audio_port_config *sinks,
-        audio_patch_handle_t *patch) {
-    if (mDevice == 0) return NO_INIT;
-    hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
-    HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
-    HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
-    Result retval;
-    Return<void> ret = mDevice->createAudioPatch(
-            hidlSources, hidlSinks,
-            [&](Result r, AudioPatchHandle hidlPatch) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *patch = static_cast<audio_patch_handle_t>(hidlPatch);
-                }
-            });
-    return processReturn("createAudioPatch", ret, retval);
-}
-
-status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
-    if (mDevice == 0) return NO_INIT;
-    return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
-}
-
-status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
-    if (mDevice == 0) return NO_INIT;
-    AudioPort hidlPort;
-    HidlUtils::audioPortFromHal(*port, &hidlPort);
-    Result retval;
-    Return<void> ret = mDevice->getAudioPort(
-            hidlPort,
-            [&](Result r, const AudioPort& p) {
-                retval = r;
-                if (retval == Result::OK) {
-                    HidlUtils::audioPortToHal(p, port);
-                }
-            });
-    return processReturn("getAudioPort", ret, retval);
-}
-
-status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
-    if (mDevice == 0) return NO_INIT;
-    AudioPortConfig hidlConfig;
-    HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
-    return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
-}
-
-status_t DeviceHalHidl::getMicrophones(
-        std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
-    if (mDevice == 0) return NO_INIT;
-    return INVALID_OPERATION;
-}
-
-status_t DeviceHalHidl::dump(int fd) {
-    if (mDevice == 0) return NO_INIT;
-    native_handle_t* hidlHandle = native_handle_create(1, 0);
-    hidlHandle->data[0] = fd;
-    Return<void> ret = mDevice->debugDump(hidlHandle);
-    native_handle_delete(hidlHandle);
-    return processReturn("dump", ret);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.cpp b/media/libaudiohal/2.0/DeviceHalLocal.cpp
deleted file mode 100644
index ec3bf78..0000000
--- a/media/libaudiohal/2.0/DeviceHalLocal.cpp
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DeviceHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
-        : mDev(dev) {
-}
-
-DeviceHalLocal::~DeviceHalLocal() {
-    int status = audio_hw_device_close(mDev);
-    ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
-    mDev = 0;
-}
-
-status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
-    if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
-    *devices = mDev->get_supported_devices(mDev);
-    return OK;
-}
-
-status_t DeviceHalLocal::initCheck() {
-    return mDev->init_check(mDev);
-}
-
-status_t DeviceHalLocal::setVoiceVolume(float volume) {
-    return mDev->set_voice_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMasterVolume(float volume) {
-    if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
-    return mDev->set_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::getMasterVolume(float *volume) {
-    if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
-    return mDev->get_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMode(audio_mode_t mode) {
-    return mDev->set_mode(mDev, mode);
-}
-
-status_t DeviceHalLocal::setMicMute(bool state) {
-    return mDev->set_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMicMute(bool *state) {
-    return mDev->get_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setMasterMute(bool state) {
-    if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
-    return mDev->set_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMasterMute(bool *state) {
-    if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
-    return mDev->get_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
-    return mDev->set_parameters(mDev, kvPairs.string());
-}
-
-status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
-    char *halValues = mDev->get_parameters(mDev, keys.string());
-    if (halValues != NULL) {
-        values->setTo(halValues);
-        free(halValues);
-    } else {
-        values->clear();
-    }
-    return OK;
-}
-
-status_t DeviceHalLocal::getInputBufferSize(
-        const struct audio_config *config, size_t *size) {
-    *size = mDev->get_input_buffer_size(mDev, config);
-    return OK;
-}
-
-status_t DeviceHalLocal::openOutputStream(
-        audio_io_handle_t handle,
-        audio_devices_t devices,
-        audio_output_flags_t flags,
-        struct audio_config *config,
-        const char *address,
-        sp<StreamOutHalInterface> *outStream) {
-    audio_stream_out_t *halStream;
-    ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
-            "srate: %d format %#x channels %x address %s",
-            handle, devices, flags,
-            config->sample_rate, config->format, config->channel_mask,
-            address);
-    int openResut = mDev->open_output_stream(
-            mDev, handle, devices, flags, config, &halStream, address);
-    if (openResut == OK) {
-        *outStream = new StreamOutHalLocal(halStream, this);
-    }
-    ALOGV("open_output_stream status %d stream %p", openResut, halStream);
-    return openResut;
-}
-
-status_t DeviceHalLocal::openInputStream(
-        audio_io_handle_t handle,
-        audio_devices_t devices,
-        struct audio_config *config,
-        audio_input_flags_t flags,
-        const char *address,
-        audio_source_t source,
-        sp<StreamInHalInterface> *inStream) {
-    audio_stream_in_t *halStream;
-    ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
-            "srate: %d format %#x channels %x address %s source %d",
-            handle, devices, flags,
-            config->sample_rate, config->format, config->channel_mask,
-            address, source);
-    int openResult = mDev->open_input_stream(
-            mDev, handle, devices, config, &halStream, flags, address, source);
-    if (openResult == OK) {
-        *inStream = new StreamInHalLocal(halStream, this);
-    }
-    ALOGV("open_input_stream status %d stream %p", openResult, inStream);
-    return openResult;
-}
-
-status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
-    *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
-    return OK;
-}
-
-status_t DeviceHalLocal::createAudioPatch(
-        unsigned int num_sources,
-        const struct audio_port_config *sources,
-        unsigned int num_sinks,
-        const struct audio_port_config *sinks,
-        audio_patch_handle_t *patch) {
-    if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        return mDev->create_audio_patch(
-                mDev, num_sources, sources, num_sinks, sinks, patch);
-    } else {
-        return INVALID_OPERATION;
-    }
-}
-
-status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
-    if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        return mDev->release_audio_patch(mDev, patch);
-    } else {
-        return INVALID_OPERATION;
-    }
-}
-
-status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
-    return mDev->get_audio_port(mDev, port);
-}
-
-status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
-    if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
-        return mDev->set_audio_port_config(mDev, config);
-    else
-        return INVALID_OPERATION;
-}
-
-status_t DeviceHalLocal::getMicrophones(
-        std::vector<media::MicrophoneInfo> *microphones __unused) {
-    return INVALID_OPERATION;
-}
-
-status_t DeviceHalLocal::dump(int fd) {
-    return mDev->dump(mDev, fd);
-}
-
-void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
-    mDev->close_output_stream(mDev, stream_out);
-}
-
-void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
-    mDev->close_input_stream(mDev, stream_in);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
deleted file mode 100644
index 5b33592..0000000
--- a/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "DevicesFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IDevice.h>
-#include <media/audiohal/hidl/HalDeathHandler.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "DeviceHalHidl.h"
-#include "DevicesFactoryHalHidl.h"
-
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
-    mDevicesFactory = IDevicesFactory::getService();
-    if (mDevicesFactory != 0) {
-        // It is assumed that DevicesFactory is owned by AudioFlinger
-        // and thus have the same lifespan.
-        mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
-    } else {
-        ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
-        exit(1);
-    }
-    // The MSD factory is optional
-    mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
-    // TODO: Register death handler, and add 'restart' directive to audioserver.rc
-}
-
-DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
-}
-
-// static
-status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
-    if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
-        *device = IDevicesFactory::Device::PRIMARY;
-        return OK;
-    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
-        *device = IDevicesFactory::Device::A2DP;
-        return OK;
-    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
-        *device = IDevicesFactory::Device::USB;
-        return OK;
-    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
-        *device = IDevicesFactory::Device::R_SUBMIX;
-        return OK;
-    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
-        *device = IDevicesFactory::Device::STUB;
-        return OK;
-    }
-    ALOGE("Invalid device name %s", name);
-    return BAD_VALUE;
-}
-
-status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
-    if (mDevicesFactory == 0) return NO_INIT;
-    IDevicesFactory::Device hidlDevice;
-    status_t status = nameFromHal(name, &hidlDevice);
-    if (status != OK) return status;
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mDevicesFactory->openDevice(
-            hidlDevice,
-            [&](Result r, const sp<IDevice>& result) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *device = new DeviceHalHidl(result);
-                }
-            });
-    if (ret.isOk()) {
-        if (retval == Result::OK) return OK;
-        else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
-        else return NO_INIT;
-    }
-    return FAILED_TRANSACTION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp b/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
deleted file mode 100644
index 13a9acd..0000000
--- a/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DevicesFactoryHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <string.h>
-
-#include <hardware/audio.h>
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "DevicesFactoryHalLocal.h"
-
-namespace android {
-
-static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
-{
-    const hw_module_t *mod;
-    int rc;
-
-    rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
-    if (rc) {
-        ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
-                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
-        goto out;
-    }
-    rc = audio_hw_device_open(mod, dev);
-    if (rc) {
-        ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
-                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
-        goto out;
-    }
-    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
-        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
-        rc = BAD_VALUE;
-        audio_hw_device_close(*dev);
-        goto out;
-    }
-    return OK;
-
-out:
-    *dev = NULL;
-    return rc;
-}
-
-status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
-    audio_hw_device_t *dev;
-    status_t rc = load_audio_interface(name, &dev);
-    if (rc == OK) {
-        *device = new DeviceHalLocal(dev);
-    }
-    return rc;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.cpp b/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
deleted file mode 100644
index 226a500..0000000
--- a/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <atomic>
-
-#define LOG_TAG "EffectBufferHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hidl/allocator/1.0/IAllocator.h>
-#include <hidlmemory/mapping.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-
-using ::android::hardware::Return;
-using ::android::hidl::allocator::V1_0::IAllocator;
-
-namespace android {
-
-// static
-uint64_t EffectBufferHalHidl::makeUniqueId() {
-    static std::atomic<uint64_t> counter{1};
-    return counter++;
-}
-
-status_t EffectBufferHalHidl::allocate(
-        size_t size, sp<EffectBufferHalInterface>* buffer) {
-    return mirror(nullptr, size, buffer);
-}
-
-status_t EffectBufferHalHidl::mirror(
-        void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
-    sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
-    status_t result = static_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
-    if (result == OK) {
-        tempBuffer->setExternalData(external);
-        *buffer = tempBuffer;
-    }
-    return result;
-}
-
-EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
-        : mBufferSize(size), mFrameCountChanged(false),
-          mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
-    mHidlBuffer.id = makeUniqueId();
-    mHidlBuffer.frameCount = 0;
-}
-
-EffectBufferHalHidl::~EffectBufferHalHidl() {
-}
-
-status_t EffectBufferHalHidl::init() {
-    sp<IAllocator> ashmem = IAllocator::getService("ashmem");
-    if (ashmem == 0) {
-        ALOGE("Failed to retrieve ashmem allocator service");
-        return NO_INIT;
-    }
-    status_t retval = NO_MEMORY;
-    Return<void> result = ashmem->allocate(
-            mBufferSize,
-            [&](bool success, const hidl_memory& memory) {
-                if (success) {
-                    mHidlBuffer.data = memory;
-                    retval = OK;
-                }
-            });
-    if (result.isOk() && retval == OK) {
-        mMemory = hardware::mapMemory(mHidlBuffer.data);
-        if (mMemory != 0) {
-            mMemory->update();
-            mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
-            memset(mAudioBuffer.raw, 0, mMemory->getSize());
-            mMemory->commit();
-        } else {
-            ALOGE("Failed to map allocated ashmem");
-            retval = NO_MEMORY;
-        }
-    } else {
-        ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
-    }
-    return result.isOk() ? retval : FAILED_TRANSACTION;
-}
-
-audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
-    return &mAudioBuffer;
-}
-
-void* EffectBufferHalHidl::externalData() const {
-    return mExternalData;
-}
-
-void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
-    mHidlBuffer.frameCount = frameCount;
-    mAudioBuffer.frameCount = frameCount;
-    mFrameCountChanged = true;
-}
-
-bool EffectBufferHalHidl::checkFrameCountChange() {
-    bool result = mFrameCountChanged;
-    mFrameCountChanged = false;
-    return result;
-}
-
-void EffectBufferHalHidl::setExternalData(void* external) {
-    mExternalData = external;
-}
-
-void EffectBufferHalHidl::update() {
-    update(mBufferSize);
-}
-
-void EffectBufferHalHidl::commit() {
-    commit(mBufferSize);
-}
-
-void EffectBufferHalHidl::update(size_t size) {
-    if (mExternalData == nullptr) return;
-    mMemory->update();
-    if (size > mBufferSize) size = mBufferSize;
-    memcpy(mAudioBuffer.raw, mExternalData, size);
-    mMemory->commit();
-}
-
-void EffectBufferHalHidl::commit(size_t size) {
-    if (mExternalData == nullptr) return;
-    if (size > mBufferSize) size = mBufferSize;
-    memcpy(mExternalData, mAudioBuffer.raw, size);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectHalHidl.cpp b/media/libaudiohal/2.0/EffectHalHidl.cpp
deleted file mode 100644
index 4fb032c..0000000
--- a/media/libaudiohal/2.0/EffectHalHidl.cpp
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <hwbinder/IPCThreadState.h>
-#include <media/EffectsFactoryApi.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-#include "EffectHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::effect::V2_0::AudioBuffer;
-using ::android::hardware::audio::effect::V2_0::EffectBufferAccess;
-using ::android::hardware::audio::effect::V2_0::EffectConfigParameters;
-using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-
-namespace android {
-
-EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
-        : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
-}
-
-EffectHalHidl::~EffectHalHidl() {
-    if (mEffect != 0) {
-        close();
-        mEffect.clear();
-        hardware::IPCThreadState::self()->flushCommands();
-    }
-    if (mEfGroup) {
-        EventFlag::deleteEventFlag(&mEfGroup);
-    }
-}
-
-// static
-void EffectHalHidl::effectDescriptorToHal(
-        const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
-    HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
-    HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
-    halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
-    halDescriptor->cpuLoad = descriptor.cpuLoad;
-    halDescriptor->memoryUsage = descriptor.memoryUsage;
-    memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
-    memcpy(halDescriptor->implementor,
-            descriptor.implementor.data(), descriptor.implementor.size());
-}
-
-// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
-// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
-
-// static
-void EffectHalHidl::effectBufferConfigFromHal(
-        const buffer_config_t& halConfig, EffectBufferConfig* config) {
-    config->samplingRateHz = halConfig.samplingRate;
-    config->channels = AudioChannelMask(halConfig.channels);
-    config->format = AudioFormat(halConfig.format);
-    config->accessMode = EffectBufferAccess(halConfig.accessMode);
-    config->mask = EffectConfigParameters(halConfig.mask);
-}
-
-// static
-void EffectHalHidl::effectBufferConfigToHal(
-        const EffectBufferConfig& config, buffer_config_t* halConfig) {
-    halConfig->buffer.frameCount = 0;
-    halConfig->buffer.raw = NULL;
-    halConfig->samplingRate = config.samplingRateHz;
-    halConfig->channels = static_cast<uint32_t>(config.channels);
-    halConfig->bufferProvider.cookie = NULL;
-    halConfig->bufferProvider.getBuffer = NULL;
-    halConfig->bufferProvider.releaseBuffer = NULL;
-    halConfig->format = static_cast<uint8_t>(config.format);
-    halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
-    halConfig->mask = static_cast<uint8_t>(config.mask);
-}
-
-// static
-void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
-    effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
-    effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
-}
-
-// static
-void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
-    effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
-    effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
-}
-
-// static
-status_t EffectHalHidl::analyzeResult(const Result& result) {
-    switch (result) {
-        case Result::OK: return OK;
-        case Result::INVALID_ARGUMENTS: return BAD_VALUE;
-        case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
-        case Result::NOT_INITIALIZED: return NO_INIT;
-        case Result::NOT_SUPPORTED: return INVALID_OPERATION;
-        case Result::RESULT_TOO_BIG: return NO_MEMORY;
-        default: return NO_INIT;
-    }
-}
-
-status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
-    if (!mBuffersChanged) {
-        if (buffer.get() == nullptr || mInBuffer.get() == nullptr) {
-            mBuffersChanged = buffer.get() != mInBuffer.get();
-        } else {
-            mBuffersChanged = buffer->audioBuffer() != mInBuffer->audioBuffer();
-        }
-    }
-    mInBuffer = buffer;
-    return OK;
-}
-
-status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
-    if (!mBuffersChanged) {
-        if (buffer.get() == nullptr || mOutBuffer.get() == nullptr) {
-            mBuffersChanged = buffer.get() != mOutBuffer.get();
-        } else {
-            mBuffersChanged = buffer->audioBuffer() != mOutBuffer->audioBuffer();
-        }
-    }
-    mOutBuffer = buffer;
-    return OK;
-}
-
-status_t EffectHalHidl::process() {
-    return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
-}
-
-status_t EffectHalHidl::processReverse() {
-    return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
-}
-
-status_t EffectHalHidl::prepareForProcessing() {
-    std::unique_ptr<StatusMQ> tempStatusMQ;
-    Result retval;
-    Return<void> ret = mEffect->prepareForProcessing(
-            [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
-                retval = r;
-                if (retval == Result::OK) {
-                    tempStatusMQ.reset(new StatusMQ(statusMQ));
-                    if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
-                        EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
-                    }
-                }
-            });
-    if (!ret.isOk() || retval != Result::OK) {
-        return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
-    }
-    if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
-        ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
-        ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
-                "Status message queue for effects is invalid");
-        ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
-        return NO_INIT;
-    }
-    mStatusMQ = std::move(tempStatusMQ);
-    return OK;
-}
-
-bool EffectHalHidl::needToResetBuffers() {
-    if (mBuffersChanged) return true;
-    bool inBufferFrameCountUpdated = mInBuffer->checkFrameCountChange();
-    bool outBufferFrameCountUpdated = mOutBuffer->checkFrameCountChange();
-    return inBufferFrameCountUpdated || outBufferFrameCountUpdated;
-}
-
-status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
-    if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
-    status_t status;
-    if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
-        return status;
-    }
-    if (needToResetBuffers() && (status = setProcessBuffers()) != OK) {
-        return status;
-    }
-    // The data is already in the buffers, just need to flush it and wake up the server side.
-    std::atomic_thread_fence(std::memory_order_release);
-    mEfGroup->wake(mqFlag);
-    uint32_t efState = 0;
-retry:
-    status_t ret = mEfGroup->wait(
-            static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState);
-    if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
-        Result retval = Result::NOT_INITIALIZED;
-        mStatusMQ->read(&retval);
-        if (retval == Result::OK || retval == Result::INVALID_STATE) {
-            // Sync back the changed contents of the buffer.
-            std::atomic_thread_fence(std::memory_order_acquire);
-        }
-        return analyzeResult(retval);
-    }
-    if (ret == -EAGAIN || ret == -EINTR) {
-        // Spurious wakeup. This normally retries no more than once.
-        goto retry;
-    }
-    return ret;
-}
-
-status_t EffectHalHidl::setProcessBuffers() {
-    Return<Result> ret = mEffect->setProcessBuffers(
-            static_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
-            static_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
-    if (ret.isOk() && ret == Result::OK) {
-        mBuffersChanged = false;
-        return OK;
-    }
-    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
-        uint32_t *replySize, void *pReplyData) {
-    if (mEffect == 0) return NO_INIT;
-
-    // Special cases.
-    if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
-        return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
-    } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
-        return getConfigImpl(cmdCode, replySize, pReplyData);
-    }
-
-    // Common case.
-    hidl_vec<uint8_t> hidlData;
-    if (pCmdData != nullptr && cmdSize > 0) {
-        hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
-    }
-    status_t status;
-    uint32_t replySizeStub = 0;
-    if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
-    Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
-            [&](int32_t s, const hidl_vec<uint8_t>& result) {
-                status = s;
-                if (status == 0) {
-                    if (*replySize > result.size()) *replySize = result.size();
-                    if (pReplyData != nullptr && *replySize > 0) {
-                        memcpy(pReplyData, &result[0], *replySize);
-                    }
-                }
-            });
-    return ret.isOk() ? status : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
-    if (mEffect == 0) return NO_INIT;
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mEffect->getDescriptor(
-            [&](Result r, const EffectDescriptor& result) {
-                retval = r;
-                if (retval == Result::OK) {
-                    effectDescriptorToHal(result, pDescriptor);
-                }
-            });
-    return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::close() {
-    if (mEffect == 0) return NO_INIT;
-    Return<Result> ret = mEffect->close();
-    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getConfigImpl(
-        uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
-    if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
-        return BAD_VALUE;
-    }
-    status_t result = FAILED_TRANSACTION;
-    Return<void> ret;
-    if (cmdCode == EFFECT_CMD_GET_CONFIG) {
-        ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
-            result = analyzeResult(r);
-            if (r == Result::OK) {
-                effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
-            }
-        });
-    } else {
-        ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
-            result = analyzeResult(r);
-            if (r == Result::OK) {
-                effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
-            }
-        });
-    }
-    if (!ret.isOk()) {
-        result = FAILED_TRANSACTION;
-    }
-    return result;
-}
-
-status_t EffectHalHidl::setConfigImpl(
-        uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
-    if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
-            replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
-        return BAD_VALUE;
-    }
-    const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
-    if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
-            halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
-            halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
-            halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
-        ALOGE("Buffer provider callbacks are not supported");
-    }
-    EffectConfig hidlConfig;
-    effectConfigFromHal(*halConfig, &hidlConfig);
-    Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
-            mEffect->setConfig(hidlConfig, nullptr, nullptr) :
-            mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
-    status_t result = FAILED_TRANSACTION;
-    if (ret.isOk()) {
-        result = analyzeResult(ret);
-        *static_cast<int32_t*>(pReplyData) = result;
-    }
-    return result;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
deleted file mode 100644
index 0d40e6d..0000000
--- a/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectsFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <cutils/native_handle.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-#include "EffectHalHidl.h"
-#include "EffectsFactoryHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::common::V2_0::Uuid;
-using ::android::hardware::audio::effect::V2_0::IEffect;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
-    mEffectsFactory = IEffectsFactory::getService();
-    if (mEffectsFactory == 0) {
-        ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
-        exit(1);
-    }
-}
-
-EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
-}
-
-status_t EffectsFactoryHalHidl::queryAllDescriptors() {
-    if (mEffectsFactory == 0) return NO_INIT;
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mEffectsFactory->getAllDescriptors(
-            [&](Result r, const hidl_vec<EffectDescriptor>& result) {
-                retval = r;
-                if (retval == Result::OK) {
-                    mLastDescriptors = result;
-                }
-            });
-    if (ret.isOk()) {
-        return retval == Result::OK ? OK : NO_INIT;
-    }
-    mLastDescriptors.resize(0);
-    return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
-    status_t queryResult = queryAllDescriptors();
-    if (queryResult == OK) {
-        *pNumEffects = mLastDescriptors.size();
-    }
-    return queryResult;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
-        uint32_t index, effect_descriptor_t *pDescriptor) {
-    // TODO: We need somehow to track the changes on the server side
-    // or figure out how to convert everybody to query all the descriptors at once.
-    // TODO: check for nullptr
-    if (mLastDescriptors.size() == 0) {
-        status_t queryResult = queryAllDescriptors();
-        if (queryResult != OK) return queryResult;
-    }
-    if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
-    EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
-    return OK;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
-        const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
-    // TODO: check for nullptr
-    if (mEffectsFactory == 0) return NO_INIT;
-    Uuid hidlUuid;
-    HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
-            [&](Result r, const EffectDescriptor& result) {
-                retval = r;
-                if (retval == Result::OK) {
-                    EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
-                }
-            });
-    if (ret.isOk()) {
-        if (retval == Result::OK) return OK;
-        else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
-        else return NO_INIT;
-    }
-    return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::createEffect(
-        const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
-        sp<EffectHalInterface> *effect) {
-    if (mEffectsFactory == 0) return NO_INIT;
-    Uuid hidlUuid;
-    HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mEffectsFactory->createEffect(
-            hidlUuid, sessionId, ioId,
-            [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *effect = new EffectHalHidl(result, effectId);
-                }
-            });
-    if (ret.isOk()) {
-        if (retval == Result::OK) return OK;
-        else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
-        else return NO_INIT;
-    }
-    return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
-    if (mEffectsFactory == 0) return NO_INIT;
-    native_handle_t* hidlHandle = native_handle_create(1, 0);
-    hidlHandle->data[0] = fd;
-    Return<void> ret = mEffectsFactory->debugDump(hidlHandle);
-    native_handle_delete(hidlHandle);
-    return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) {
-    return EffectBufferHalHidl::allocate(size, buffer);
-}
-
-status_t EffectsFactoryHalHidl::mirrorBuffer(void* external, size_t size,
-                          sp<EffectBufferHalInterface>* buffer) {
-    return EffectBufferHalHidl::mirror(external, size, buffer);
-}
-
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.cpp b/media/libaudiohal/2.0/StreamHalHidl.cpp
deleted file mode 100644
index 9869cd2..0000000
--- a/media/libaudiohal/2.0/StreamHalHidl.cpp
+++ /dev/null
@@ -1,768 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IStreamOutCallback.h>
-#include <hwbinder/IPCThreadState.h>
-#include <mediautils/SchedulingPolicyService.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::audio::common::V2_0::ThreadInfo;
-using ::android::hardware::audio::V2_0::AudioDrain;
-using ::android::hardware::audio::V2_0::IStreamOutCallback;
-using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::V2_0::MmapBufferInfo;
-using ::android::hardware::audio::V2_0::MmapPosition;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::audio::V2_0::TimeSpec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::V2_0::IStreamIn::ReadCommand;
-
-namespace android {
-
-StreamHalHidl::StreamHalHidl(IStream *stream)
-        : ConversionHelperHidl("Stream"),
-          mStream(stream),
-          mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT),
-          mCachedBufferSize(0){
-
-    // Instrument audio signal power logging.
-    // Note: This assumes channel mask, format, and sample rate do not change after creation.
-    if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
-        // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
-        Return<void> ret = mStream->getAudioProperties(
-                [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
-                mStreamPowerLog.init(sr,
-                        static_cast<audio_channel_mask_t>(m),
-                        static_cast<audio_format_t>(f));
-            });
-    }
-}
-
-StreamHalHidl::~StreamHalHidl() {
-    mStream = nullptr;
-}
-
-status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
-    if (!mStream) return NO_INIT;
-    return processReturn("getSampleRate", mStream->getSampleRate(), rate);
-}
-
-status_t StreamHalHidl::getBufferSize(size_t *size) {
-    if (!mStream) return NO_INIT;
-    status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
-    if (status == OK) {
-        mCachedBufferSize = *size;
-    }
-    return status;
-}
-
-status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
-    if (!mStream) return NO_INIT;
-    return processReturn("getChannelMask", mStream->getChannelMask(), mask);
-}
-
-status_t StreamHalHidl::getFormat(audio_format_t *format) {
-    if (!mStream) return NO_INIT;
-    return processReturn("getFormat", mStream->getFormat(), format);
-}
-
-status_t StreamHalHidl::getAudioProperties(
-        uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
-    if (!mStream) return NO_INIT;
-    Return<void> ret = mStream->getAudioProperties(
-            [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
-                *sampleRate = sr;
-                *mask = static_cast<audio_channel_mask_t>(m);
-                *format = static_cast<audio_format_t>(f);
-            });
-    return processReturn("getAudioProperties", ret);
-}
-
-status_t StreamHalHidl::setParameters(const String8& kvPairs) {
-    if (!mStream) return NO_INIT;
-    hidl_vec<ParameterValue> hidlParams;
-    status_t status = parametersFromHal(kvPairs, &hidlParams);
-    if (status != OK) return status;
-    return processReturn("setParameters", mStream->setParameters(hidlParams));
-}
-
-status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
-    values->clear();
-    if (!mStream) return NO_INIT;
-    hidl_vec<hidl_string> hidlKeys;
-    status_t status = keysFromHal(keys, &hidlKeys);
-    if (status != OK) return status;
-    Result retval;
-    Return<void> ret = mStream->getParameters(
-            hidlKeys,
-            [&](Result r, const hidl_vec<ParameterValue>& parameters) {
-                retval = r;
-                if (retval == Result::OK) {
-                    parametersToHal(parameters, values);
-                }
-            });
-    return processReturn("getParameters", ret, retval);
-}
-
-status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
-    if (!mStream) return NO_INIT;
-    return processReturn("addEffect", mStream->addEffect(
-                    static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
-    if (!mStream) return NO_INIT;
-    return processReturn("removeEffect", mStream->removeEffect(
-                    static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::standby() {
-    if (!mStream) return NO_INIT;
-    return processReturn("standby", mStream->standby());
-}
-
-status_t StreamHalHidl::dump(int fd) {
-    if (!mStream) return NO_INIT;
-    native_handle_t* hidlHandle = native_handle_create(1, 0);
-    hidlHandle->data[0] = fd;
-    Return<void> ret = mStream->debugDump(hidlHandle);
-    native_handle_delete(hidlHandle);
-    mStreamPowerLog.dump(fd);
-    return processReturn("dump", ret);
-}
-
-status_t StreamHalHidl::start() {
-    if (!mStream) return NO_INIT;
-    return processReturn("start", mStream->start());
-}
-
-status_t StreamHalHidl::stop() {
-    if (!mStream) return NO_INIT;
-    return processReturn("stop", mStream->stop());
-}
-
-status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
-                                  struct audio_mmap_buffer_info *info) {
-    Result retval;
-    Return<void> ret = mStream->createMmapBuffer(
-            minSizeFrames,
-            [&](Result r, const MmapBufferInfo& hidlInfo) {
-                retval = r;
-                if (retval == Result::OK) {
-                    const native_handle *handle = hidlInfo.sharedMemory.handle();
-                    if (handle->numFds > 0) {
-                        info->shared_memory_fd = handle->data[0];
-                        info->buffer_size_frames = hidlInfo.bufferSizeFrames;
-                        info->burst_size_frames = hidlInfo.burstSizeFrames;
-                        // info->shared_memory_address is not needed in HIDL context
-                        info->shared_memory_address = NULL;
-                    } else {
-                        retval = Result::NOT_INITIALIZED;
-                    }
-                }
-            });
-    return processReturn("createMmapBuffer", ret, retval);
-}
-
-status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
-    Result retval;
-    Return<void> ret = mStream->getMmapPosition(
-            [&](Result r, const MmapPosition& hidlPosition) {
-                retval = r;
-                if (retval == Result::OK) {
-                    position->time_nanoseconds = hidlPosition.timeNanoseconds;
-                    position->position_frames = hidlPosition.positionFrames;
-                }
-            });
-    return processReturn("getMmapPosition", ret, retval);
-}
-
-status_t StreamHalHidl::setHalThreadPriority(int priority) {
-    mHalThreadPriority = priority;
-    return OK;
-}
-
-status_t StreamHalHidl::getCachedBufferSize(size_t *size) {
-    if (mCachedBufferSize != 0) {
-        *size = mCachedBufferSize;
-        return OK;
-    }
-    return getBufferSize(size);
-}
-
-bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
-    if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
-        return true;
-    }
-    int err = requestPriority(
-            threadPid, threadId,
-            mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
-    ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
-            mHalThreadPriority, threadPid, threadId, err);
-    // Audio will still work, but latency will be higher and sometimes unacceptable.
-    return err == 0;
-}
-
-namespace {
-
-/* Notes on callback ownership.
-
-This is how (Hw)Binder ownership model looks like. The server implementation
-is owned by Binder framework (via sp<>). Proxies are owned by clients.
-When the last proxy disappears, Binder framework releases the server impl.
-
-Thus, it is not needed to keep any references to StreamOutCallback (this is
-the server impl) -- it will live as long as HAL server holds a strong ref to
-IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
-from the destructor of StreamOutHalHidl.
-
-The callback only keeps a weak reference to the stream. The stream is owned
-by AudioFlinger.
-
-*/
-
-struct StreamOutCallback : public IStreamOutCallback {
-    StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
-
-    // IStreamOutCallback implementation
-    Return<void> onWriteReady()  override {
-        sp<StreamOutHalHidl> stream = mStream.promote();
-        if (stream != 0) {
-            stream->onWriteReady();
-        }
-        return Void();
-    }
-
-    Return<void> onDrainReady()  override {
-        sp<StreamOutHalHidl> stream = mStream.promote();
-        if (stream != 0) {
-            stream->onDrainReady();
-        }
-        return Void();
-    }
-
-    Return<void> onError()  override {
-        sp<StreamOutHalHidl> stream = mStream.promote();
-        if (stream != 0) {
-            stream->onError();
-        }
-        return Void();
-    }
-
-  private:
-    wp<StreamOutHalHidl> mStream;
-};
-
-}  // namespace
-
-StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
-        : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
-}
-
-StreamOutHalHidl::~StreamOutHalHidl() {
-    if (mStream != 0) {
-        if (mCallback.unsafe_get()) {
-            processReturn("clearCallback", mStream->clearCallback());
-        }
-        processReturn("close", mStream->close());
-        mStream.clear();
-    }
-    mCallback.clear();
-    hardware::IPCThreadState::self()->flushCommands();
-    if (mEfGroup) {
-        EventFlag::deleteEventFlag(&mEfGroup);
-    }
-}
-
-status_t StreamOutHalHidl::getFrameSize(size_t *size) {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
-    if (mStream == 0) return NO_INIT;
-    if (mWriterClient == gettid() && mCommandMQ) {
-        return callWriterThread(
-                WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
-                [&](const WriteStatus& writeStatus) {
-                    *latency = writeStatus.reply.latencyMs;
-                });
-    } else {
-        return processReturn("getLatency", mStream->getLatency(), latency);
-    }
-}
-
-status_t StreamOutHalHidl::setVolume(float left, float right) {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("setVolume", mStream->setVolume(left, right));
-}
-
-status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
-    if (mStream == 0) return NO_INIT;
-    *written = 0;
-
-    if (bytes == 0 && !mDataMQ) {
-        // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
-        ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
-        return OK;
-    }
-
-    status_t status;
-    if (!mDataMQ) {
-        // In case if playback starts close to the end of a compressed track, the bytes
-        // that need to be written is less than the actual buffer size. Need to use
-        // full buffer size for the MQ since otherwise after seeking back to the middle
-        // data will be truncated.
-        size_t bufferSize;
-        if ((status = getCachedBufferSize(&bufferSize)) != OK) {
-            return status;
-        }
-        if (bytes > bufferSize) bufferSize = bytes;
-        if ((status = prepareForWriting(bufferSize)) != OK) {
-            return status;
-        }
-    }
-
-    status = callWriterThread(
-            WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
-            [&] (const WriteStatus& writeStatus) {
-                *written = writeStatus.reply.written;
-                // Diagnostics of the cause of b/35813113.
-                ALOGE_IF(*written > bytes,
-                        "hal reports more bytes written than asked for: %lld > %lld",
-                        (long long)*written, (long long)bytes);
-            });
-    mStreamPowerLog.log(buffer, *written);
-    return status;
-}
-
-status_t StreamOutHalHidl::callWriterThread(
-        WriteCommand cmd, const char* cmdName,
-        const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
-    if (!mCommandMQ->write(&cmd)) {
-        ALOGE("command message queue write failed for \"%s\"", cmdName);
-        return -EAGAIN;
-    }
-    if (data != nullptr) {
-        size_t availableToWrite = mDataMQ->availableToWrite();
-        if (dataSize > availableToWrite) {
-            ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",
-                    (long long)dataSize, (long long)availableToWrite);
-            dataSize = availableToWrite;
-        }
-        if (!mDataMQ->write(data, dataSize)) {
-            ALOGE("data message queue write failed for \"%s\"", cmdName);
-        }
-    }
-    mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
-
-    // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
-    uint32_t efState = 0;
-retry:
-    status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState);
-    if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
-        WriteStatus writeStatus;
-        writeStatus.retval = Result::NOT_INITIALIZED;
-        if (!mStatusMQ->read(&writeStatus)) {
-            ALOGE("status message read failed for \"%s\"", cmdName);
-        }
-        if (writeStatus.retval == Result::OK) {
-            ret = OK;
-            callback(writeStatus);
-        } else {
-            ret = processReturn(cmdName, writeStatus.retval);
-        }
-        return ret;
-    }
-    if (ret == -EAGAIN || ret == -EINTR) {
-        // Spurious wakeup. This normally retries no more than once.
-        goto retry;
-    }
-    return ret;
-}
-
-status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
-    std::unique_ptr<CommandMQ> tempCommandMQ;
-    std::unique_ptr<DataMQ> tempDataMQ;
-    std::unique_ptr<StatusMQ> tempStatusMQ;
-    Result retval;
-    pid_t halThreadPid, halThreadTid;
-    Return<void> ret = mStream->prepareForWriting(
-            1, bufferSize,
-            [&](Result r,
-                    const CommandMQ::Descriptor& commandMQ,
-                    const DataMQ::Descriptor& dataMQ,
-                    const StatusMQ::Descriptor& statusMQ,
-                    const ThreadInfo& halThreadInfo) {
-                retval = r;
-                if (retval == Result::OK) {
-                    tempCommandMQ.reset(new CommandMQ(commandMQ));
-                    tempDataMQ.reset(new DataMQ(dataMQ));
-                    tempStatusMQ.reset(new StatusMQ(statusMQ));
-                    if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
-                        EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
-                    }
-                    halThreadPid = halThreadInfo.pid;
-                    halThreadTid = halThreadInfo.tid;
-                }
-            });
-    if (!ret.isOk() || retval != Result::OK) {
-        return processReturn("prepareForWriting", ret, retval);
-    }
-    if (!tempCommandMQ || !tempCommandMQ->isValid() ||
-            !tempDataMQ || !tempDataMQ->isValid() ||
-            !tempStatusMQ || !tempStatusMQ->isValid() ||
-            !mEfGroup) {
-        ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
-        ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
-                "Command message queue for writing is invalid");
-        ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
-        ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
-        ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
-        ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
-                "Status message queue for writing is invalid");
-        ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
-        return NO_INIT;
-    }
-    requestHalThreadPriority(halThreadPid, halThreadTid);
-
-    mCommandMQ = std::move(tempCommandMQ);
-    mDataMQ = std::move(tempDataMQ);
-    mStatusMQ = std::move(tempStatusMQ);
-    mWriterClient = gettid();
-    return OK;
-}
-
-status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
-    if (mStream == 0) return NO_INIT;
-    Result retval;
-    Return<void> ret = mStream->getRenderPosition(
-            [&](Result r, uint32_t d) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *dspFrames = d;
-                }
-            });
-    return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
-    if (mStream == 0) return NO_INIT;
-    Result retval;
-    Return<void> ret = mStream->getNextWriteTimestamp(
-            [&](Result r, int64_t t) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *timestamp = t;
-                }
-            });
-    return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
-    if (mStream == 0) return NO_INIT;
-    status_t status = processReturn(
-            "setCallback", mStream->setCallback(new StreamOutCallback(this)));
-    if (status == OK) {
-        mCallback = callback;
-    }
-    return status;
-}
-
-status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
-    if (mStream == 0) return NO_INIT;
-    Return<void> ret = mStream->supportsPauseAndResume(
-            [&](bool p, bool r) {
-                *supportsPause = p;
-                *supportsResume = r;
-            });
-    return processReturn("supportsPauseAndResume", ret);
-}
-
-status_t StreamOutHalHidl::pause() {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("pause", mStream->pause());
-}
-
-status_t StreamOutHalHidl::resume() {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("pause", mStream->resume());
-}
-
-status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
-}
-
-status_t StreamOutHalHidl::drain(bool earlyNotify) {
-    if (mStream == 0) return NO_INIT;
-    return processReturn(
-            "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
-}
-
-status_t StreamOutHalHidl::flush() {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("pause", mStream->flush());
-}
-
-status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
-    if (mStream == 0) return NO_INIT;
-    if (mWriterClient == gettid() && mCommandMQ) {
-        return callWriterThread(
-                WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
-                [&](const WriteStatus& writeStatus) {
-                    *frames = writeStatus.reply.presentationPosition.frames;
-                    timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
-                    timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
-                });
-    } else {
-        Result retval;
-        Return<void> ret = mStream->getPresentationPosition(
-                [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
-                    retval = r;
-                    if (retval == Result::OK) {
-                        *frames = hidlFrames;
-                        timestamp->tv_sec = hidlTimeStamp.tvSec;
-                        timestamp->tv_nsec = hidlTimeStamp.tvNSec;
-                    }
-                });
-        return processReturn("getPresentationPosition", ret, retval);
-    }
-}
-
-status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
-    // Audio HAL V2.0 does not support propagating source metadata
-    return INVALID_OPERATION;
-}
-
-void StreamOutHalHidl::onWriteReady() {
-    sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
-    if (callback == 0) return;
-    ALOGV("asyncCallback onWriteReady");
-    callback->onWriteReady();
-}
-
-void StreamOutHalHidl::onDrainReady() {
-    sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
-    if (callback == 0) return;
-    ALOGV("asyncCallback onDrainReady");
-    callback->onDrainReady();
-}
-
-void StreamOutHalHidl::onError() {
-    sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
-    if (callback == 0) return;
-    ALOGV("asyncCallback onError");
-    callback->onError();
-}
-
-
-StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
-        : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
-}
-
-StreamInHalHidl::~StreamInHalHidl() {
-    if (mStream != 0) {
-        processReturn("close", mStream->close());
-        mStream.clear();
-        hardware::IPCThreadState::self()->flushCommands();
-    }
-    if (mEfGroup) {
-        EventFlag::deleteEventFlag(&mEfGroup);
-    }
-}
-
-status_t StreamInHalHidl::getFrameSize(size_t *size) {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamInHalHidl::setGain(float gain) {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("setGain", mStream->setGain(gain));
-}
-
-status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
-    if (mStream == 0) return NO_INIT;
-    *read = 0;
-
-    if (bytes == 0 && !mDataMQ) {
-        // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
-        return OK;
-    }
-
-    status_t status;
-    if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
-        return status;
-    }
-
-    ReadParameters params;
-    params.command = ReadCommand::READ;
-    params.params.read = bytes;
-    status = callReaderThread(params, "read",
-            [&](const ReadStatus& readStatus) {
-                const size_t availToRead = mDataMQ->availableToRead();
-                if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
-                    ALOGE("data message queue read failed for \"read\"");
-                }
-                ALOGW_IF(availToRead != readStatus.reply.read,
-                        "HAL read report inconsistent: mq = %d, status = %d",
-                        (int32_t)availToRead, (int32_t)readStatus.reply.read);
-                *read = readStatus.reply.read;
-            });
-    mStreamPowerLog.log(buffer, *read);
-    return status;
-}
-
-status_t StreamInHalHidl::callReaderThread(
-        const ReadParameters& params, const char* cmdName,
-        StreamInHalHidl::ReaderCallback callback) {
-    if (!mCommandMQ->write(&params)) {
-        ALOGW("command message queue write failed");
-        return -EAGAIN;
-    }
-    mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
-
-    // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
-    uint32_t efState = 0;
-retry:
-    status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState);
-    if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
-        ReadStatus readStatus;
-        readStatus.retval = Result::NOT_INITIALIZED;
-        if (!mStatusMQ->read(&readStatus)) {
-            ALOGE("status message read failed for \"%s\"", cmdName);
-        }
-         if (readStatus.retval == Result::OK) {
-            ret = OK;
-            callback(readStatus);
-        } else {
-            ret = processReturn(cmdName, readStatus.retval);
-        }
-        return ret;
-    }
-    if (ret == -EAGAIN || ret == -EINTR) {
-        // Spurious wakeup. This normally retries no more than once.
-        goto retry;
-    }
-    return ret;
-}
-
-status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
-    std::unique_ptr<CommandMQ> tempCommandMQ;
-    std::unique_ptr<DataMQ> tempDataMQ;
-    std::unique_ptr<StatusMQ> tempStatusMQ;
-    Result retval;
-    pid_t halThreadPid, halThreadTid;
-    Return<void> ret = mStream->prepareForReading(
-            1, bufferSize,
-            [&](Result r,
-                    const CommandMQ::Descriptor& commandMQ,
-                    const DataMQ::Descriptor& dataMQ,
-                    const StatusMQ::Descriptor& statusMQ,
-                    const ThreadInfo& halThreadInfo) {
-                retval = r;
-                if (retval == Result::OK) {
-                    tempCommandMQ.reset(new CommandMQ(commandMQ));
-                    tempDataMQ.reset(new DataMQ(dataMQ));
-                    tempStatusMQ.reset(new StatusMQ(statusMQ));
-                    if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
-                        EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
-                    }
-                    halThreadPid = halThreadInfo.pid;
-                    halThreadTid = halThreadInfo.tid;
-                }
-            });
-    if (!ret.isOk() || retval != Result::OK) {
-        return processReturn("prepareForReading", ret, retval);
-    }
-    if (!tempCommandMQ || !tempCommandMQ->isValid() ||
-            !tempDataMQ || !tempDataMQ->isValid() ||
-            !tempStatusMQ || !tempStatusMQ->isValid() ||
-            !mEfGroup) {
-        ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
-        ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
-                "Command message queue for writing is invalid");
-        ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
-        ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
-        ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
-        ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
-                "Status message queue for reading is invalid");
-        ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
-        return NO_INIT;
-    }
-    requestHalThreadPriority(halThreadPid, halThreadTid);
-
-    mCommandMQ = std::move(tempCommandMQ);
-    mDataMQ = std::move(tempDataMQ);
-    mStatusMQ = std::move(tempStatusMQ);
-    mReaderClient = gettid();
-    return OK;
-}
-
-status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
-    if (mStream == 0) return NO_INIT;
-    return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
-}
-
-status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
-    if (mStream == 0) return NO_INIT;
-    if (mReaderClient == gettid() && mCommandMQ) {
-        ReadParameters params;
-        params.command = ReadCommand::GET_CAPTURE_POSITION;
-        return callReaderThread(params, "getCapturePosition",
-                [&](const ReadStatus& readStatus) {
-                    *frames = readStatus.reply.capturePosition.frames;
-                    *time = readStatus.reply.capturePosition.time;
-                });
-    } else {
-        Result retval;
-        Return<void> ret = mStream->getCapturePosition(
-                [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
-                    retval = r;
-                    if (retval == Result::OK) {
-                        *frames = hidlFrames;
-                        *time = hidlTime;
-                    }
-                });
-        return processReturn("getCapturePosition", ret, retval);
-    }
-}
-
-status_t StreamInHalHidl::getActiveMicrophones(
-        std::vector<media::MicrophoneInfo> *microphones __unused) {
-    if (mStream == 0) return NO_INIT;
-    return INVALID_OPERATION;
-}
-
-status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
-    // Audio HAL V2.0 does not support propagating sink metadata
-    return INVALID_OPERATION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.cpp b/media/libaudiohal/2.0/StreamHalLocal.cpp
deleted file mode 100644
index 98107e5..0000000
--- a/media/libaudiohal/2.0/StreamHalLocal.cpp
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <hardware/audio.h>
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
-        : mDevice(device),
-          mStream(stream) {
-    // Instrument audio signal power logging.
-    // Note: This assumes channel mask, format, and sample rate do not change after creation.
-    if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
-        mStreamPowerLog.init(mStream->get_sample_rate(mStream),
-                mStream->get_channels(mStream),
-                mStream->get_format(mStream));
-    }
-}
-
-StreamHalLocal::~StreamHalLocal() {
-    mStream = 0;
-    mDevice.clear();
-}
-
-status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
-    *rate = mStream->get_sample_rate(mStream);
-    return OK;
-}
-
-status_t StreamHalLocal::getBufferSize(size_t *size) {
-    *size = mStream->get_buffer_size(mStream);
-    return OK;
-}
-
-status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
-    *mask = mStream->get_channels(mStream);
-    return OK;
-}
-
-status_t StreamHalLocal::getFormat(audio_format_t *format) {
-    *format = mStream->get_format(mStream);
-    return OK;
-}
-
-status_t StreamHalLocal::getAudioProperties(
-        uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
-    *sampleRate = mStream->get_sample_rate(mStream);
-    *mask = mStream->get_channels(mStream);
-    *format = mStream->get_format(mStream);
-    return OK;
-}
-
-status_t StreamHalLocal::setParameters(const String8& kvPairs) {
-    return mStream->set_parameters(mStream, kvPairs.string());
-}
-
-status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
-    char *halValues = mStream->get_parameters(mStream, keys.string());
-    if (halValues != NULL) {
-        values->setTo(halValues);
-        free(halValues);
-    } else {
-        values->clear();
-    }
-    return OK;
-}
-
-status_t StreamHalLocal::addEffect(sp<EffectHalInterface>) {
-    LOG_ALWAYS_FATAL("Local streams can not have effects");
-    return INVALID_OPERATION;
-}
-
-status_t StreamHalLocal::removeEffect(sp<EffectHalInterface>) {
-    LOG_ALWAYS_FATAL("Local streams can not have effects");
-    return INVALID_OPERATION;
-}
-
-status_t StreamHalLocal::standby() {
-    return mStream->standby(mStream);
-}
-
-status_t StreamHalLocal::dump(int fd) {
-    status_t status = mStream->dump(mStream, fd);
-    mStreamPowerLog.dump(fd);
-    return status;
-}
-
-status_t StreamHalLocal::setHalThreadPriority(int) {
-    // Don't need to do anything as local hal is executed by audioflinger directly
-    // on the same thread.
-    return OK;
-}
-
-StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
-        : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamOutHalLocal::~StreamOutHalLocal() {
-    mCallback.clear();
-    mDevice->closeOutputStream(mStream);
-    mStream = 0;
-}
-
-status_t StreamOutHalLocal::getFrameSize(size_t *size) {
-    *size = audio_stream_out_frame_size(mStream);
-    return OK;
-}
-
-status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
-    *latency = mStream->get_latency(mStream);
-    return OK;
-}
-
-status_t StreamOutHalLocal::setVolume(float left, float right) {
-    if (mStream->set_volume == NULL) return INVALID_OPERATION;
-    return mStream->set_volume(mStream, left, right);
-}
-
-status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
-    ssize_t writeResult = mStream->write(mStream, buffer, bytes);
-    if (writeResult > 0) {
-        *written = writeResult;
-        mStreamPowerLog.log(buffer, *written);
-        return OK;
-    } else {
-        *written = 0;
-        return writeResult;
-    }
-}
-
-status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
-    return mStream->get_render_position(mStream, dspFrames);
-}
-
-status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
-    if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
-    return mStream->get_next_write_timestamp(mStream, timestamp);
-}
-
-status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
-    if (mStream->set_callback == NULL) return INVALID_OPERATION;
-    status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
-    if (result == OK) {
-        mCallback = callback;
-    }
-    return result;
-}
-
-// static
-int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
-    // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
-    // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
-    // already running, because the destructor is invoked after the refcount has been atomically
-    // decremented.
-    wp<StreamOutHalLocal> weakSelf(static_cast<StreamOutHalLocal*>(cookie));
-    sp<StreamOutHalLocal> self = weakSelf.promote();
-    if (self == 0) return 0;
-    sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
-    if (callback == 0) return 0;
-    ALOGV("asyncCallback() event %d", event);
-    switch (event) {
-        case STREAM_CBK_EVENT_WRITE_READY:
-            callback->onWriteReady();
-            break;
-        case STREAM_CBK_EVENT_DRAIN_READY:
-            callback->onDrainReady();
-            break;
-        case STREAM_CBK_EVENT_ERROR:
-            callback->onError();
-            break;
-        default:
-            ALOGW("asyncCallback() unknown event %d", event);
-            break;
-    }
-    return 0;
-}
-
-status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
-    *supportsPause = mStream->pause != NULL;
-    *supportsResume = mStream->resume != NULL;
-    return OK;
-}
-
-status_t StreamOutHalLocal::pause() {
-    if (mStream->pause == NULL) return INVALID_OPERATION;
-    return mStream->pause(mStream);
-}
-
-status_t StreamOutHalLocal::resume() {
-    if (mStream->resume == NULL) return INVALID_OPERATION;
-    return mStream->resume(mStream);
-}
-
-status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
-    *supportsDrain = mStream->drain != NULL;
-    return OK;
-}
-
-status_t StreamOutHalLocal::drain(bool earlyNotify) {
-    if (mStream->drain == NULL) return INVALID_OPERATION;
-    return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
-}
-
-status_t StreamOutHalLocal::flush() {
-    if (mStream->flush == NULL) return INVALID_OPERATION;
-    return mStream->flush(mStream);
-}
-
-status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
-    if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
-    return mStream->get_presentation_position(mStream, frames, timestamp);
-}
-
-status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
-    if (mStream->update_source_metadata == nullptr) {
-        return INVALID_OPERATION;
-    }
-    const source_metadata_t metadata {
-        .track_count = sourceMetadata.tracks.size(),
-        // const cast is fine as it is in a const structure
-        .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
-    };
-    mStream->update_source_metadata(mStream, &metadata);
-    return OK;
-}
-
-status_t StreamOutHalLocal::start() {
-    if (mStream->start == NULL) return INVALID_OPERATION;
-    return mStream->start(mStream);
-}
-
-status_t StreamOutHalLocal::stop() {
-    if (mStream->stop == NULL) return INVALID_OPERATION;
-    return mStream->stop(mStream);
-}
-
-status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
-                                  struct audio_mmap_buffer_info *info) {
-    if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
-    return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
-    if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
-    return mStream->get_mmap_position(mStream, position);
-}
-
-StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
-        : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamInHalLocal::~StreamInHalLocal() {
-    mDevice->closeInputStream(mStream);
-    mStream = 0;
-}
-
-status_t StreamInHalLocal::getFrameSize(size_t *size) {
-    *size = audio_stream_in_frame_size(mStream);
-    return OK;
-}
-
-status_t StreamInHalLocal::setGain(float gain) {
-    return mStream->set_gain(mStream, gain);
-}
-
-status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
-    ssize_t readResult = mStream->read(mStream, buffer, bytes);
-    if (readResult > 0) {
-        *read = readResult;
-        mStreamPowerLog.log( buffer, *read);
-        return OK;
-    } else {
-        *read = 0;
-        return readResult;
-    }
-}
-
-status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
-    *framesLost = mStream->get_input_frames_lost(mStream);
-    return OK;
-}
-
-status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
-    if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
-    return mStream->get_capture_position(mStream, frames, time);
-}
-
-status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
-    if (mStream->update_sink_metadata == nullptr) {
-        return INVALID_OPERATION;
-    }
-    const sink_metadata_t metadata {
-        .track_count = sinkMetadata.tracks.size(),
-        // const cast is fine as it is in a const structure
-        .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
-    };
-    mStream->update_sink_metadata(mStream, &metadata);
-    return OK;
-}
-
-status_t StreamInHalLocal::start() {
-    if (mStream->start == NULL) return INVALID_OPERATION;
-    return mStream->start(mStream);
-}
-
-status_t StreamInHalLocal::stop() {
-    if (mStream->stop == NULL) return INVALID_OPERATION;
-    return mStream->stop(mStream);
-}
-
-status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
-                                  struct audio_mmap_buffer_info *info) {
-    if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
-    return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
-    if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
-    return mStream->get_mmap_position(mStream, position);
-}
-
-status_t StreamInHalLocal::getActiveMicrophones(
-        std::vector<media::MicrophoneInfo> *microphones __unused) {
-    return INVALID_OPERATION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.h b/media/libaudiohal/4.0/ConversionHelperHidl.h
deleted file mode 100644
index 8823a8d..0000000
--- a/media/libaudiohal/4.0/ConversionHelperHidl.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
-#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/types.h>
-#include <hidl/HidlSupport.h>
-#include <system/audio.h>
-#include <utils/String8.h>
-
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::Return;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-
-namespace android {
-namespace V4_0 {
-
-class ConversionHelperHidl {
-  protected:
-    static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
-    static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
-    static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
-    static void microphoneInfoToHal(const MicrophoneInfo& src,
-                                    audio_microphone_characteristic_t *pDst);
-
-    ConversionHelperHidl(const char* className);
-
-    template<typename R, typename T>
-    status_t processReturn(const char* funcName, const Return<R>& ret, T *retval) {
-        if (ret.isOk()) {
-            // This way it also works for enum class to unscoped enum conversion.
-            *retval = static_cast<T>(static_cast<R>(ret));
-            return OK;
-        }
-        return processReturn(funcName, ret);
-    }
-
-    template<typename T>
-    status_t processReturn(const char* funcName, const Return<T>& ret) {
-        if (!ret.isOk()) {
-            emitError(funcName, ret.description().c_str());
-        }
-        return ret.isOk() ? OK : FAILED_TRANSACTION;
-    }
-
-    status_t processReturn(const char* funcName, const Return<hardware::audio::V4_0::Result>& ret) {
-        if (!ret.isOk()) {
-            emitError(funcName, ret.description().c_str());
-        }
-        return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-    }
-
-    template<typename T>
-    status_t processReturn(
-            const char* funcName, const Return<T>& ret, hardware::audio::V4_0::Result retval) {
-        if (!ret.isOk()) {
-            emitError(funcName, ret.description().c_str());
-        }
-        return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
-    }
-
-  private:
-    const char* mClassName;
-
-    static status_t analyzeResult(const hardware::audio::V4_0::Result& result);
-
-    void emitError(const char* funcName, const char* description);
-};
-
-}  // namespace V4_0
-}  // namespace android
-
-#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.h b/media/libaudiohal/4.0/DeviceHalHidl.h
deleted file mode 100644
index 0bd2175..0000000
--- a/media/libaudiohal/4.0/DeviceHalHidl.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/IDevice.h>
-#include <android/hardware/audio/4.0/IPrimaryDevice.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevice;
-using ::android::hardware::audio::V4_0::IPrimaryDevice;
-using ::android::hardware::Return;
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
-{
-  public:
-    // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
-    virtual status_t getSupportedDevices(uint32_t *devices);
-
-    // Check to see if the audio hardware interface has been initialized.
-    virtual status_t initCheck();
-
-    // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
-    virtual status_t setVoiceVolume(float volume);
-
-    // Set the audio volume for all audio activities other than voice call.
-    virtual status_t setMasterVolume(float volume);
-
-    // Get the current master volume value for the HAL.
-    virtual status_t getMasterVolume(float *volume);
-
-    // Called when the audio mode changes.
-    virtual status_t setMode(audio_mode_t mode);
-
-    // Muting control.
-    virtual status_t setMicMute(bool state);
-    virtual status_t getMicMute(bool *state);
-    virtual status_t setMasterMute(bool state);
-    virtual status_t getMasterMute(bool *state);
-
-    // Set global audio parameters.
-    virtual status_t setParameters(const String8& kvPairs);
-
-    // Get global audio parameters.
-    virtual status_t getParameters(const String8& keys, String8 *values);
-
-    // Returns audio input buffer size according to parameters passed.
-    virtual status_t getInputBufferSize(const struct audio_config *config,
-            size_t *size);
-
-    // Creates and opens the audio hardware output stream. The stream is closed
-    // by releasing all references to the returned object.
-    virtual status_t openOutputStream(
-            audio_io_handle_t handle,
-            audio_devices_t devices,
-            audio_output_flags_t flags,
-            struct audio_config *config,
-            const char *address,
-            sp<StreamOutHalInterface> *outStream);
-
-    // Creates and opens the audio hardware input stream. The stream is closed
-    // by releasing all references to the returned object.
-    virtual status_t openInputStream(
-            audio_io_handle_t handle,
-            audio_devices_t devices,
-            struct audio_config *config,
-            audio_input_flags_t flags,
-            const char *address,
-            audio_source_t source,
-            sp<StreamInHalInterface> *inStream);
-
-    // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
-    virtual status_t supportsAudioPatches(bool *supportsPatches);
-
-    // Creates an audio patch between several source and sink ports.
-    virtual status_t createAudioPatch(
-            unsigned int num_sources,
-            const struct audio_port_config *sources,
-            unsigned int num_sinks,
-            const struct audio_port_config *sinks,
-            audio_patch_handle_t *patch);
-
-    // Releases an audio patch.
-    virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
-    // Fills the list of supported attributes for a given audio port.
-    virtual status_t getAudioPort(struct audio_port *port);
-
-    // Set audio port configuration.
-    virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
-    // List microphones
-    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
-    virtual status_t dump(int fd);
-
-  private:
-    friend class DevicesFactoryHalHidl;
-    sp<IDevice> mDevice;
-    sp<IPrimaryDevice> mPrimaryDevice;  // Null if it's not a primary device.
-
-    // Can not be constructed directly by clients.
-    explicit DeviceHalHidl(const sp<IDevice>& device);
-
-    // The destructor automatically closes the device.
-    virtual ~DeviceHalHidl();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.h b/media/libaudiohal/4.0/DeviceHalLocal.h
deleted file mode 100644
index 08341a4..0000000
--- a/media/libaudiohal/4.0/DeviceHalLocal.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
-
-#include <hardware/audio.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalLocal : public DeviceHalInterface
-{
-  public:
-    // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
-    virtual status_t getSupportedDevices(uint32_t *devices);
-
-    // Check to see if the audio hardware interface has been initialized.
-    virtual status_t initCheck();
-
-    // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
-    virtual status_t setVoiceVolume(float volume);
-
-    // Set the audio volume for all audio activities other than voice call.
-    virtual status_t setMasterVolume(float volume);
-
-    // Get the current master volume value for the HAL.
-    virtual status_t getMasterVolume(float *volume);
-
-    // Called when the audio mode changes.
-    virtual status_t setMode(audio_mode_t mode);
-
-    // Muting control.
-    virtual status_t setMicMute(bool state);
-    virtual status_t getMicMute(bool *state);
-    virtual status_t setMasterMute(bool state);
-    virtual status_t getMasterMute(bool *state);
-
-    // Set global audio parameters.
-    virtual status_t setParameters(const String8& kvPairs);
-
-    // Get global audio parameters.
-    virtual status_t getParameters(const String8& keys, String8 *values);
-
-    // Returns audio input buffer size according to parameters passed.
-    virtual status_t getInputBufferSize(const struct audio_config *config,
-            size_t *size);
-
-    // Creates and opens the audio hardware output stream. The stream is closed
-    // by releasing all references to the returned object.
-    virtual status_t openOutputStream(
-            audio_io_handle_t handle,
-            audio_devices_t devices,
-            audio_output_flags_t flags,
-            struct audio_config *config,
-            const char *address,
-            sp<StreamOutHalInterface> *outStream);
-
-    // Creates and opens the audio hardware input stream. The stream is closed
-    // by releasing all references to the returned object.
-    virtual status_t openInputStream(
-            audio_io_handle_t handle,
-            audio_devices_t devices,
-            struct audio_config *config,
-            audio_input_flags_t flags,
-            const char *address,
-            audio_source_t source,
-            sp<StreamInHalInterface> *inStream);
-
-    // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
-    virtual status_t supportsAudioPatches(bool *supportsPatches);
-
-    // Creates an audio patch between several source and sink ports.
-    virtual status_t createAudioPatch(
-            unsigned int num_sources,
-            const struct audio_port_config *sources,
-            unsigned int num_sinks,
-            const struct audio_port_config *sinks,
-            audio_patch_handle_t *patch);
-
-    // Releases an audio patch.
-    virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
-    // Fills the list of supported attributes for a given audio port.
-    virtual status_t getAudioPort(struct audio_port *port);
-
-    // Set audio port configuration.
-    virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
-    // List microphones
-    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
-    virtual status_t dump(int fd);
-
-    void closeOutputStream(struct audio_stream_out *stream_out);
-    void closeInputStream(struct audio_stream_in *stream_in);
-
-  private:
-    audio_hw_device_t *mDev;
-
-    friend class DevicesFactoryHalLocal;
-
-    // Can not be constructed directly by clients.
-    explicit DeviceHalLocal(audio_hw_device_t *dev);
-
-    // The destructor automatically closes the device.
-    virtual ~DeviceHalLocal();
-
-    uint32_t version() const { return mDev->common.version; }
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
deleted file mode 100644
index c83194e..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "DevicesFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/4.0/IDevice.h>
-#include <media/audiohal/hidl/HalDeathHandler.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "DeviceHalHidl.h"
-#include "DevicesFactoryHalHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevice;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-namespace V4_0 {
-
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
-    mDevicesFactory = IDevicesFactory::getService();
-    if (mDevicesFactory != 0) {
-        // It is assumed that DevicesFactory is owned by AudioFlinger
-        // and thus have the same lifespan.
-        mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
-    } else {
-        ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
-        exit(1);
-    }
-    // The MSD factory is optional
-    mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
-    // TODO: Register death handler, and add 'restart' directive to audioserver.rc
-}
-
-DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
-}
-
-status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
-    if (mDevicesFactory == 0) return NO_INIT;
-    Result retval = Result::NOT_INITIALIZED;
-    Return<void> ret = mDevicesFactory->openDevice(
-            name,
-            [&](Result r, const sp<IDevice>& result) {
-                retval = r;
-                if (retval == Result::OK) {
-                    *device = new DeviceHalHidl(result);
-                }
-            });
-    if (ret.isOk()) {
-        if (retval == Result::OK) return OK;
-        else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
-        else return NO_INIT;
-    }
-    return FAILED_TRANSACTION;
-}
-
-} // namespace V4_0
-} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.h b/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
deleted file mode 100644
index 114889b..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include "DeviceHalHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevicesFactory;
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
-{
-  public:
-    // Opens a device with the specified name. To close the device, it is
-    // necessary to release references to the returned object.
-    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
-  private:
-    friend class DevicesFactoryHalHybrid;
-
-    sp<IDevicesFactory> mDevicesFactory;
-    sp<IDevicesFactory> mDevicesFactoryMsd;
-
-    // Can not be constructed directly by clients.
-    DevicesFactoryHalHidl();
-
-    virtual ~DevicesFactoryHalHidl();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
deleted file mode 100644
index 7ff1ec7d..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DevicesFactoryHalHybrid"
-//#define LOG_NDEBUG 0
-
-#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
-#include "DevicesFactoryHalLocal.h"
-#include "DevicesFactoryHalHidl.h"
-
-namespace android {
-namespace V4_0 {
-
-DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
-        : mLocalFactory(new DevicesFactoryHalLocal()),
-          mHidlFactory(new DevicesFactoryHalHidl()) {
-}
-
-DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
-}
-
-status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
-    if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
-        strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
-        return mHidlFactory->openDevice(name, device);
-    }
-    return mLocalFactory->openDevice(name, device);
-}
-
-} // namespace V4_0
-} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.h b/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
deleted file mode 100644
index bc1c521..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
-
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include "DeviceHalLocal.h"
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
-{
-  public:
-    // Opens a device with the specified name. To close the device, it is
-    // necessary to release references to the returned object.
-    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
-  private:
-    friend class DevicesFactoryHalHybrid;
-
-    // Can not be constructed directly by clients.
-    DevicesFactoryHalLocal() {}
-
-    virtual ~DevicesFactoryHalLocal() {}
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.h b/media/libaudiohal/4.0/EffectBufferHalHidl.h
deleted file mode 100644
index 6d578c6..0000000
--- a/media/libaudiohal/4.0/EffectBufferHalHidl.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/types.h>
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidl/HidlSupport.h>
-#include <media/audiohal/EffectBufferHalInterface.h>
-#include <system/audio_effect.h>
-
-using android::hardware::audio::effect::V4_0::AudioBuffer;
-using android::hardware::hidl_memory;
-using android::hidl::memory::V1_0::IMemory;
-
-namespace android {
-namespace V4_0 {
-
-class EffectBufferHalHidl : public EffectBufferHalInterface
-{
-  public:
-    static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
-    static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
-
-    virtual audio_buffer_t* audioBuffer();
-    virtual void* externalData() const;
-
-    virtual size_t getSize() const override { return mBufferSize; }
-
-    virtual void setExternalData(void* external);
-    virtual void setFrameCount(size_t frameCount);
-    virtual bool checkFrameCountChange();
-
-    virtual void update();
-    virtual void commit();
-    virtual void update(size_t size);
-    virtual void commit(size_t size);
-
-    const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
-
-  private:
-    friend class EffectBufferHalInterface;
-
-    static uint64_t makeUniqueId();
-
-    const size_t mBufferSize;
-    bool mFrameCountChanged;
-    void* mExternalData;
-    AudioBuffer mHidlBuffer;
-    sp<IMemory> mMemory;
-    audio_buffer_t mAudioBuffer;
-
-    // Can not be constructed directly by clients.
-    explicit EffectBufferHalHidl(size_t size);
-
-    virtual ~EffectBufferHalHidl();
-
-    status_t init();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectHalHidl.h b/media/libaudiohal/4.0/EffectHalHidl.h
deleted file mode 100644
index 5a4dab1..0000000
--- a/media/libaudiohal/4.0/EffectHalHidl.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/IEffect.h>
-#include <media/audiohal/EffectHalInterface.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
-#include <system/audio_effect.h>
-
-using ::android::hardware::audio::effect::V4_0::EffectBufferConfig;
-using ::android::hardware::audio::effect::V4_0::EffectConfig;
-using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V4_0::IEffect;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-
-namespace android {
-namespace V4_0 {
-
-class EffectHalHidl : public EffectHalInterface
-{
-  public:
-    // Set the input buffer.
-    virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
-
-    // Set the output buffer.
-    virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
-
-    // Effect process function.
-    virtual status_t process();
-
-    // Process reverse stream function. This function is used to pass
-    // a reference stream to the effect engine.
-    virtual status_t processReverse();
-
-    // Send a command and receive a response to/from effect engine.
-    virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
-            uint32_t *replySize, void *pReplyData);
-
-    // Returns the effect descriptor.
-    virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
-
-    // Free resources on the remote side.
-    virtual status_t close();
-
-    // Whether it's a local implementation.
-    virtual bool isLocal() const { return false; }
-
-    uint64_t effectId() const { return mEffectId; }
-
-    static void effectDescriptorToHal(
-            const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor);
-
-  private:
-    friend class EffectsFactoryHalHidl;
-    typedef MessageQueue<
-        hardware::audio::effect::V4_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
-
-    sp<IEffect> mEffect;
-    const uint64_t mEffectId;
-    sp<EffectBufferHalInterface> mInBuffer;
-    sp<EffectBufferHalInterface> mOutBuffer;
-    bool mBuffersChanged;
-    std::unique_ptr<StatusMQ> mStatusMQ;
-    EventFlag* mEfGroup;
-
-    static status_t analyzeResult(const hardware::audio::effect::V4_0::Result& result);
-    static void effectBufferConfigFromHal(
-            const buffer_config_t& halConfig, EffectBufferConfig* config);
-    static void effectBufferConfigToHal(
-            const EffectBufferConfig& config, buffer_config_t* halConfig);
-    static void effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config);
-    static void effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig);
-
-    // Can not be constructed directly by clients.
-    EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
-
-    // The destructor automatically releases the effect.
-    virtual ~EffectHalHidl();
-
-    status_t getConfigImpl(uint32_t cmdCode, uint32_t *replySize, void *pReplyData);
-    status_t prepareForProcessing();
-    bool needToResetBuffers();
-    status_t processImpl(uint32_t mqFlag);
-    status_t setConfigImpl(
-            uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
-            uint32_t *replySize, void *pReplyData);
-    status_t setProcessBuffers();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamHalHidl.h b/media/libaudiohal/4.0/StreamHalHidl.h
deleted file mode 100644
index 2dda0f8..0000000
--- a/media/libaudiohal/4.0/StreamHalHidl.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
-
-#include <atomic>
-
-#include <android/hardware/audio/4.0/IStream.h>
-#include <android/hardware/audio/4.0/IStreamIn.h>
-#include <android/hardware/audio/4.0/IStreamOut.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
-#include <media/audiohal/StreamHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-#include "StreamPowerLog.h"
-
-using ::android::hardware::audio::V4_0::IStream;
-using ::android::hardware::audio::V4_0::IStreamIn;
-using ::android::hardware::audio::V4_0::IStreamOut;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::V4_0::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::V4_0::IStreamIn::ReadStatus;
-using WriteCommand = ::android::hardware::audio::V4_0::IStreamOut::WriteCommand;
-using WriteStatus = ::android::hardware::audio::V4_0::IStreamOut::WriteStatus;
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalHidl;
-
-class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
-{
-  public:
-    // Return the sampling rate in Hz - eg. 44100.
-    virtual status_t getSampleRate(uint32_t *rate);
-
-    // Return size of input/output buffer in bytes for this stream - eg. 4800.
-    virtual status_t getBufferSize(size_t *size);
-
-    // Return the channel mask.
-    virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
-    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
-    virtual status_t getFormat(audio_format_t *format);
-
-    // Convenience method.
-    virtual status_t getAudioProperties(
-            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
-    // Set audio stream parameters.
-    virtual status_t setParameters(const String8& kvPairs);
-
-    // Get audio stream parameters.
-    virtual status_t getParameters(const String8& keys, String8 *values);
-
-    // Add or remove the effect on the stream.
-    virtual status_t addEffect(sp<EffectHalInterface> effect);
-    virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
-    // Put the audio hardware input/output into standby mode.
-    virtual status_t standby();
-
-    virtual status_t dump(int fd);
-
-    // Start a stream operating in mmap mode.
-    virtual status_t start();
-
-    // Stop a stream operating in mmap mode.
-    virtual status_t stop();
-
-    // Retrieve information on the data buffer in mmap mode.
-    virtual status_t createMmapBuffer(int32_t minSizeFrames,
-                                      struct audio_mmap_buffer_info *info);
-
-    // Get current read/write position in the mmap buffer
-    virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
-    // Set the priority of the thread that interacts with the HAL
-    // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
-    virtual status_t setHalThreadPriority(int priority);
-
-  protected:
-    // Subclasses can not be constructed directly by clients.
-    explicit StreamHalHidl(IStream *stream);
-
-    // The destructor automatically closes the stream.
-    virtual ~StreamHalHidl();
-
-    status_t getCachedBufferSize(size_t *size);
-
-    bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
-
-    // mStreamPowerLog is used for audio signal power logging.
-    StreamPowerLog mStreamPowerLog;
-
-  private:
-    const int HAL_THREAD_PRIORITY_DEFAULT = -1;
-    IStream *mStream;
-    int mHalThreadPriority;
-    size_t mCachedBufferSize;
-};
-
-class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
-  public:
-    // Return the frame size (number of bytes per sample) of a stream.
-    virtual status_t getFrameSize(size_t *size);
-
-    // Return the audio hardware driver estimated latency in milliseconds.
-    virtual status_t getLatency(uint32_t *latency);
-
-    // Use this method in situations where audio mixing is done in the hardware.
-    virtual status_t setVolume(float left, float right);
-
-    // Write audio buffer to driver.
-    virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
-    // Return the number of audio frames written by the audio dsp to DAC since
-    // the output has exited standby.
-    virtual status_t getRenderPosition(uint32_t *dspFrames);
-
-    // Get the local time at which the next write to the audio driver will be presented.
-    virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
-    // Set the callback for notifying completion of non-blocking write and drain.
-    virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
-    // Returns whether pause and resume operations are supported.
-    virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
-    // Notifies to the audio driver to resume playback following a pause.
-    virtual status_t pause();
-
-    // Notifies to the audio driver to resume playback following a pause.
-    virtual status_t resume();
-
-    // Returns whether drain operation is supported.
-    virtual status_t supportsDrain(bool *supportsDrain);
-
-    // Requests notification when data buffered by the driver/hardware has been played.
-    virtual status_t drain(bool earlyNotify);
-
-    // Notifies to the audio driver to flush the queued data.
-    virtual status_t flush();
-
-    // Return a recent count of the number of audio frames presented to an external observer.
-    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
-    // Called when the metadata of the stream's source has been changed.
-    status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
-
-    // Methods used by StreamOutCallback (HIDL).
-    void onWriteReady();
-    void onDrainReady();
-    void onError();
-
-  private:
-    friend class DeviceHalHidl;
-    typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
-    typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
-    typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
-    wp<StreamOutHalInterfaceCallback> mCallback;
-    sp<IStreamOut> mStream;
-    std::unique_ptr<CommandMQ> mCommandMQ;
-    std::unique_ptr<DataMQ> mDataMQ;
-    std::unique_ptr<StatusMQ> mStatusMQ;
-    std::atomic<pid_t> mWriterClient;
-    EventFlag* mEfGroup;
-
-    // Can not be constructed directly by clients.
-    StreamOutHalHidl(const sp<IStreamOut>& stream);
-
-    virtual ~StreamOutHalHidl();
-
-    using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
-    status_t callWriterThread(
-            WriteCommand cmd, const char* cmdName,
-            const uint8_t* data, size_t dataSize, WriterCallback callback);
-    status_t prepareForWriting(size_t bufferSize);
-};
-
-class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
-  public:
-    // Return the frame size (number of bytes per sample) of a stream.
-    virtual status_t getFrameSize(size_t *size);
-
-    // Set the input gain for the audio driver.
-    virtual status_t setGain(float gain);
-
-    // Read audio buffer in from driver.
-    virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
-    // Return the amount of input frames lost in the audio driver.
-    virtual status_t getInputFramesLost(uint32_t *framesLost);
-
-    // Return a recent count of the number of audio frames received and
-    // the clock time associated with that frame count.
-    virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
-    // Get active microphones
-    virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
-    // Called when the metadata of the stream's sink has been changed.
-    status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
-
-  private:
-    friend class DeviceHalHidl;
-    typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
-    typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
-    typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
-    sp<IStreamIn> mStream;
-    std::unique_ptr<CommandMQ> mCommandMQ;
-    std::unique_ptr<DataMQ> mDataMQ;
-    std::unique_ptr<StatusMQ> mStatusMQ;
-    std::atomic<pid_t> mReaderClient;
-    EventFlag* mEfGroup;
-
-    // Can not be constructed directly by clients.
-    StreamInHalHidl(const sp<IStreamIn>& stream);
-
-    virtual ~StreamInHalHidl();
-
-    using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
-    status_t callReaderThread(
-            const ReadParameters& params, const char* cmdName, ReaderCallback callback);
-    status_t prepareForReading(size_t bufferSize);
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamHalLocal.h b/media/libaudiohal/4.0/StreamHalLocal.h
deleted file mode 100644
index 7237509..0000000
--- a/media/libaudiohal/4.0/StreamHalLocal.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
-
-#include <media/audiohal/StreamHalInterface.h>
-#include "StreamPowerLog.h"
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalLocal;
-
-class StreamHalLocal : public virtual StreamHalInterface
-{
-  public:
-    // Return the sampling rate in Hz - eg. 44100.
-    virtual status_t getSampleRate(uint32_t *rate);
-
-    // Return size of input/output buffer in bytes for this stream - eg. 4800.
-    virtual status_t getBufferSize(size_t *size);
-
-    // Return the channel mask.
-    virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
-    // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
-    virtual status_t getFormat(audio_format_t *format);
-
-    // Convenience method.
-    virtual status_t getAudioProperties(
-            uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
-    // Set audio stream parameters.
-    virtual status_t setParameters(const String8& kvPairs);
-
-    // Get audio stream parameters.
-    virtual status_t getParameters(const String8& keys, String8 *values);
-
-    // Add or remove the effect on the stream.
-    virtual status_t addEffect(sp<EffectHalInterface> effect);
-    virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
-    // Put the audio hardware input/output into standby mode.
-    virtual status_t standby();
-
-    virtual status_t dump(int fd);
-
-    // Start a stream operating in mmap mode.
-    virtual status_t start() = 0;
-
-    // Stop a stream operating in mmap mode.
-    virtual status_t stop() = 0;
-
-    // Retrieve information on the data buffer in mmap mode.
-    virtual status_t createMmapBuffer(int32_t minSizeFrames,
-                                      struct audio_mmap_buffer_info *info) = 0;
-
-    // Get current read/write position in the mmap buffer
-    virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
-
-    // Set the priority of the thread that interacts with the HAL
-    // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
-    virtual status_t setHalThreadPriority(int priority);
-
-  protected:
-    // Subclasses can not be constructed directly by clients.
-    StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
-
-    // The destructor automatically closes the stream.
-    virtual ~StreamHalLocal();
-
-    sp<DeviceHalLocal> mDevice;
-
-    // mStreamPowerLog is used for audio signal power logging.
-    StreamPowerLog mStreamPowerLog;
-
-  private:
-    audio_stream_t *mStream;
-};
-
-class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
-  public:
-    // Return the frame size (number of bytes per sample) of a stream.
-    virtual status_t getFrameSize(size_t *size);
-
-    // Return the audio hardware driver estimated latency in milliseconds.
-    virtual status_t getLatency(uint32_t *latency);
-
-    // Use this method in situations where audio mixing is done in the hardware.
-    virtual status_t setVolume(float left, float right);
-
-    // Write audio buffer to driver.
-    virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
-    // Return the number of audio frames written by the audio dsp to DAC since
-    // the output has exited standby.
-    virtual status_t getRenderPosition(uint32_t *dspFrames);
-
-    // Get the local time at which the next write to the audio driver will be presented.
-    virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
-    // Set the callback for notifying completion of non-blocking write and drain.
-    virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
-    // Returns whether pause and resume operations are supported.
-    virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
-    // Notifies to the audio driver to resume playback following a pause.
-    virtual status_t pause();
-
-    // Notifies to the audio driver to resume playback following a pause.
-    virtual status_t resume();
-
-    // Returns whether drain operation is supported.
-    virtual status_t supportsDrain(bool *supportsDrain);
-
-    // Requests notification when data buffered by the driver/hardware has been played.
-    virtual status_t drain(bool earlyNotify);
-
-    // Notifies to the audio driver to flush the queued data.
-    virtual status_t flush();
-
-    // Return a recent count of the number of audio frames presented to an external observer.
-    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
-    // Start a stream operating in mmap mode.
-    virtual status_t start();
-
-    // Stop a stream operating in mmap mode.
-    virtual status_t stop();
-
-    // Retrieve information on the data buffer in mmap mode.
-    virtual status_t createMmapBuffer(int32_t minSizeFrames,
-                                      struct audio_mmap_buffer_info *info);
-
-    // Get current read/write position in the mmap buffer
-    virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
-    // Called when the metadata of the stream's source has been changed.
-    status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
-
-  private:
-    audio_stream_out_t *mStream;
-    wp<StreamOutHalInterfaceCallback> mCallback;
-
-    friend class DeviceHalLocal;
-
-    // Can not be constructed directly by clients.
-    StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
-
-    virtual ~StreamOutHalLocal();
-
-    static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
-};
-
-class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
-  public:
-    // Return the frame size (number of bytes per sample) of a stream.
-    virtual status_t getFrameSize(size_t *size);
-
-    // Set the input gain for the audio driver.
-    virtual status_t setGain(float gain);
-
-    // Read audio buffer in from driver.
-    virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
-    // Return the amount of input frames lost in the audio driver.
-    virtual status_t getInputFramesLost(uint32_t *framesLost);
-
-    // Return a recent count of the number of audio frames received and
-    // the clock time associated with that frame count.
-    virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
-    // Start a stream operating in mmap mode.
-    virtual status_t start();
-
-    // Stop a stream operating in mmap mode.
-    virtual status_t stop();
-
-    // Retrieve information on the data buffer in mmap mode.
-    virtual status_t createMmapBuffer(int32_t minSizeFrames,
-                                      struct audio_mmap_buffer_info *info);
-
-    // Get current read/write position in the mmap buffer
-    virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
-    // Get active microphones
-    virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
-    // Called when the metadata of the stream's sink has been changed.
-    status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
-
-  private:
-    audio_stream_in_t *mStream;
-
-    friend class DeviceHalLocal;
-
-    // Can not be constructed directly by clients.
-    StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
-
-    virtual ~StreamInHalLocal();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamPowerLog.h b/media/libaudiohal/4.0/StreamPowerLog.h
deleted file mode 100644
index 57b7201..0000000
--- a/media/libaudiohal/4.0/StreamPowerLog.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
-#define ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
-
-#include <audio_utils/clock.h>
-#include <audio_utils/PowerLog.h>
-#include <cutils/properties.h>
-#include <system/audio.h>
-
-namespace android {
-namespace V4_0 {
-
-class StreamPowerLog {
-public:
-    StreamPowerLog() :
-        mIsUserDebugOrEngBuild(is_userdebug_or_eng_build()),
-        mPowerLog(nullptr),
-        mFrameSize(0) {
-        // use init() to set up the power log.
-    }
-
-    ~StreamPowerLog() {
-        power_log_destroy(mPowerLog); // OK for null mPowerLog
-        mPowerLog = nullptr;
-    }
-
-    // A one-time initialization (do not call twice) before using StreamPowerLog.
-    void init(uint32_t sampleRate, audio_channel_mask_t channelMask, audio_format_t format) {
-        if (mPowerLog == nullptr) {
-            // Note: A way to get channel count for both input and output channel masks
-            // but does not check validity of the channel mask.
-            const uint32_t channelCount = popcount(audio_channel_mask_get_bits(channelMask));
-            mFrameSize = channelCount * audio_bytes_per_sample(format);
-            if (mFrameSize > 0) {
-                const size_t kPowerLogFramesPerEntry =
-                        (long long)sampleRate * kPowerLogSamplingIntervalMs / 1000;
-                mPowerLog = power_log_create(
-                        sampleRate,
-                        channelCount,
-                        format,
-                        kPowerLogEntries,
-                        kPowerLogFramesPerEntry);
-            }
-        }
-        // mPowerLog may be NULL (not the right build, format not accepted, etc.).
-    }
-
-    // Dump the power log to fd.
-    void dump(int fd) const {
-        // OK for null mPowerLog
-        (void)power_log_dump(
-                mPowerLog, fd, "      " /* prefix */, kPowerLogLines, 0 /* limit_ns */);
-    }
-
-    // Log the audio data contained in buffer.
-    void log(const void *buffer, size_t sizeInBytes) const {
-        if (mPowerLog != nullptr) { // mFrameSize is always nonzero if mPowerLog exists.
-            power_log_log(
-                    mPowerLog, buffer, sizeInBytes / mFrameSize, audio_utils_get_real_time_ns());
-        }
-    }
-
-    bool isUserDebugOrEngBuild() const {
-        return mIsUserDebugOrEngBuild;
-    }
-
-private:
-
-    static inline bool is_userdebug_or_eng_build() {
-        char value[PROPERTY_VALUE_MAX];
-        (void)property_get("ro.build.type", value, "unknown"); // ignore actual length
-        return strcmp(value, "userdebug") == 0 || strcmp(value, "eng") == 0;
-    }
-
-    // Audio signal power log configuration.
-    static const size_t kPowerLogLines = 40;
-    static const size_t kPowerLogSamplingIntervalMs = 50;
-    static const size_t kPowerLogEntries = (1 /* minutes */ * 60 /* seconds */ * 1000 /* msec */
-            / kPowerLogSamplingIntervalMs);
-
-    const bool mIsUserDebugOrEngBuild;
-    power_log_t *mPowerLog;
-    size_t mFrameSize;
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
deleted file mode 100644
index abf6de0..0000000
--- a/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
-
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
-{
-  public:
-    // Opens a device with the specified name. To close the device, it is
-    // necessary to release references to the returned object.
-    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
-  private:
-    friend class DevicesFactoryHalInterface;
-
-    // Can not be constructed directly by clients.
-    DevicesFactoryHalHybrid();
-
-    virtual ~DevicesFactoryHalHybrid();
-
-    sp<DevicesFactoryHalInterface> mLocalFactory;
-    sp<DevicesFactoryHalInterface> mHidlFactory;
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
deleted file mode 100644
index 680b7a1..0000000
--- a/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/4.0/types.h>
-#include <media/audiohal/EffectsFactoryHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-
-namespace android {
-namespace V4_0 {
-
-using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V4_0::IEffectsFactory;
-using ::android::hardware::hidl_vec;
-
-class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
-{
-  public:
-    // Returns the number of different effects in all loaded libraries.
-    virtual status_t queryNumberEffects(uint32_t *pNumEffects);
-
-    // Returns a descriptor of the next available effect.
-    virtual status_t getDescriptor(uint32_t index,
-            effect_descriptor_t *pDescriptor);
-
-    virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
-            effect_descriptor_t *pDescriptor);
-
-    // Creates an effect engine of the specified type.
-    // To release the effect engine, it is necessary to release references
-    // to the returned effect object.
-    virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
-            int32_t sessionId, int32_t ioId,
-            sp<EffectHalInterface> *effect);
-
-    virtual status_t dumpEffects(int fd);
-
-    status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
-    status_t mirrorBuffer(void* external, size_t size,
-                          sp<EffectBufferHalInterface>* buffer) override;
-
-  private:
-    friend class EffectsFactoryHalInterface;
-
-    sp<IEffectsFactory> mEffectsFactory;
-    hidl_vec<EffectDescriptor> mLastDescriptors;
-
-    // Can not be constructed directly by clients.
-    EffectsFactoryHalHidl();
-    virtual ~EffectsFactoryHalHidl();
-
-    status_t queryAllDescriptors();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 3a5df27..0ff0d4a 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -8,6 +8,7 @@
 
     cflags: [
         "-Wall",
+        "-Wextra",
         "-Werror",
     ],
 
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index 4c8eaf6..e631ace 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -17,18 +17,17 @@
 #include <android/hardware/audio/2.0/IDevicesFactory.h>
 #include <android/hardware/audio/4.0/IDevicesFactory.h>
 
-#include <DevicesFactoryHalHybrid.h>
-#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
+#include <libaudiohal/FactoryHalHidl.h>
 
 namespace android {
 
 // static
 sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
     if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
-        return new V4_0::DevicesFactoryHalHybrid();
+        return V4_0::createDevicesFactoryHal();
     }
     if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
-        return new DevicesFactoryHalHybrid();
+        return V2_0::createDevicesFactoryHal();
     }
     return nullptr;
 }
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index ead1fa2..f7734a8 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -17,19 +17,17 @@
 #include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
 #include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
 
-#include <EffectsFactoryHalHidl.h>
-#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
-
+#include <libaudiohal/FactoryHalHidl.h>
 
 namespace android {
 
 // static
 sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
     if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
-        return new V4_0::EffectsFactoryHalHidl();
+        return V4_0::createEffectsFactoryHal();
     }
     if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
-        return new EffectsFactoryHalHidl();
+        return V2_0::createEffectsFactoryHal();
     }
     return nullptr;
 }
diff --git a/media/libaudiohal/4.0/Android.bp b/media/libaudiohal/impl/Android.bp
similarity index 68%
rename from media/libaudiohal/4.0/Android.bp
rename to media/libaudiohal/impl/Android.bp
index 833defa..3827336 100644
--- a/media/libaudiohal/4.0/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -1,5 +1,5 @@
-cc_library_shared {
-    name: "libaudiohal@4.0",
+cc_defaults {
+    name: "libaudiohal_default",
 
     srcs: [
         "DeviceHalLocal.cpp",
@@ -24,28 +24,30 @@
         "-Werror",
     ],
     shared_libs: [
+        "android.hardware.audio.common-util",
+        "android.hardware.audio.common@2.0",
+        "android.hardware.audio.common@4.0",
+        "android.hardware.audio.effect@2.0",
+        "android.hardware.audio.effect@4.0",
+        "android.hardware.audio@2.0",
+        "android.hardware.audio@4.0",
+        "android.hidl.allocator@1.0",
+        "android.hidl.memory@1.0",
         "libaudiohal_deathhandler",
         "libaudioutils",
+        "libbase",
         "libbinder",
         "libcutils",
-        "liblog",
-        "libutils",
-        "libhardware",
-        "libbase",
         "libfmq",
-        "libhwbinder",
+        "libhardware",
         "libhidlbase",
         "libhidlmemory",
         "libhidltransport",
-        "android.hardware.audio@4.0",
-        "android.hardware.audio.common-util",
-        "android.hardware.audio.common@4.0",
-        "android.hardware.audio.common@4.0-util",
-        "android.hardware.audio.effect@4.0",
-        "android.hidl.allocator@1.0",
-        "android.hidl.memory@1.0",
+        "libhwbinder",
+        "liblog",
         "libmedia_helper",
         "libmediautils",
+        "libutils",
     ],
     header_libs: [
         "android.hardware.audio.common.util@all-versions",
@@ -56,3 +58,29 @@
         "libfmq",
     ],
 }
+
+cc_library_shared {
+    name: "libaudiohal@2.0",
+    defaults: ["libaudiohal_default"],
+    shared_libs: [
+        "android.hardware.audio.common@2.0-util",
+    ],
+    cflags: [
+        "-DMAJOR_VERSION=2",
+        "-DMINOR_VERSION=0",
+        "-include VersionMacro.h",
+    ]
+}
+
+cc_library_shared {
+    name: "libaudiohal@4.0",
+    defaults: ["libaudiohal_default"],
+    shared_libs: [
+        "android.hardware.audio.common@4.0-util",
+    ],
+    cflags: [
+        "-DMAJOR_VERSION=4",
+        "-DMINOR_VERSION=0",
+        "-include VersionMacro.h",
+    ]
+}
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
similarity index 90%
rename from media/libaudiohal/4.0/ConversionHelperHidl.cpp
rename to media/libaudiohal/impl/ConversionHelperHidl.cpp
index fe27504..5d12fad 100644
--- a/media/libaudiohal/4.0/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -22,15 +22,18 @@
 
 #include "ConversionHelperHidl.h"
 
-using ::android::hardware::audio::V4_0::AudioMicrophoneChannelMapping;
-using ::android::hardware::audio::V4_0::AudioMicrophoneDirectionality;
-using ::android::hardware::audio::V4_0::AudioMicrophoneLocation;
-using ::android::hardware::audio::V4_0::DeviceAddress;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::CPP_VERSION::Result;
+
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneChannelMapping;
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneDirectionality;
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneLocation;
+using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+#endif
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 // static
 status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
@@ -106,8 +109,9 @@
     ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
 }
 
+#if MAJOR_VERSION == 4
 // TODO: Use the same implementation in the hal when it moves to a util library.
-std::string deviceAddressToHal(const DeviceAddress& address) {
+static std::string deviceAddressToHal(const DeviceAddress& address) {
     // HAL assumes that the address is NUL-terminated.
     char halAddress[AUDIO_DEVICE_MAX_ADDRESS_LEN];
     memset(halAddress, 0, sizeof(halAddress));
@@ -141,7 +145,7 @@
 
 //local conversion helpers
 
-audio_microphone_channel_mapping_t  channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
+static audio_microphone_channel_mapping_t  channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
     switch (mapping) {
         case AudioMicrophoneChannelMapping::UNUSED:
             return AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
@@ -154,7 +158,7 @@
     }
 }
 
-audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
+static audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
     switch (location) {
         case AudioMicrophoneLocation::UNKNOWN:
             return AUDIO_MICROPHONE_LOCATION_UNKNOWN;
@@ -168,7 +172,7 @@
             LOG_ALWAYS_FATAL("Unknown locationToHal conversion %d", location);
     }
 }
-audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
+static audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
     switch (dir) {
         case AudioMicrophoneDirectionality::UNKNOWN:
             return AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
@@ -187,9 +191,8 @@
     }
 }
 
-// static
-void ConversionHelperHidl::microphoneInfoToHal(const MicrophoneInfo& src,
-                                                     audio_microphone_characteristic_t *pDst) {
+void microphoneInfoToHal(const MicrophoneInfo& src,
+                         audio_microphone_characteristic_t *pDst) {
     if (pDst != NULL) {
         snprintf(pDst->device_id, sizeof(pDst->device_id),
                  "%s", src.deviceId.c_str());
@@ -232,6 +235,7 @@
         pDst->orientation.z = src.orientation.z;
     }
 }
+#endif
 
-}  // namespace V4_0
+}  // namespace CPP_VERSION
 }  // namespace android
diff --git a/media/libaudiohal/2.0/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
similarity index 78%
rename from media/libaudiohal/2.0/ConversionHelperHidl.h
rename to media/libaudiohal/impl/ConversionHelperHidl.h
index c356f37..1a9319f 100644
--- a/media/libaudiohal/2.0/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -18,15 +18,20 @@
 #define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
 
 #include <android/hardware/audio/2.0/types.h>
+#include <android/hardware/audio/4.0/types.h>
 #include <hidl/HidlSupport.h>
+#include <system/audio.h>
 #include <utils/String8.h>
 
-using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using CoreResult = ::android::hardware::audio::CPP_VERSION::Result;
+
 using ::android::hardware::Return;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 
 namespace android {
+namespace CPP_VERSION {
 
 class ConversionHelperHidl {
   protected:
@@ -54,7 +59,7 @@
         return ret.isOk() ? OK : FAILED_TRANSACTION;
     }
 
-    status_t processReturn(const char* funcName, const Return<hardware::audio::V2_0::Result>& ret) {
+    status_t processReturn(const char* funcName, const Return<CoreResult>& ret) {
         if (!ret.isOk()) {
             emitError(funcName, ret.description().c_str());
         }
@@ -63,7 +68,7 @@
 
     template<typename T>
     status_t processReturn(
-            const char* funcName, const Return<T>& ret, hardware::audio::V2_0::Result retval) {
+            const char* funcName, const Return<T>& ret, CoreResult retval) {
         if (!ret.isOk()) {
             emitError(funcName, ret.description().c_str());
         }
@@ -73,11 +78,18 @@
   private:
     const char* mClassName;
 
-    static status_t analyzeResult(const hardware::audio::V2_0::Result& result);
+    static status_t analyzeResult(const CoreResult& result);
 
     void emitError(const char* funcName, const char* description);
 };
 
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+void microphoneInfoToHal(const MicrophoneInfo& src,
+                         audio_microphone_characteristic_t *pDst);
+#endif
+
+}  // namespace CPP_VERSION
 }  // namespace android
 
 #endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
similarity index 88%
rename from media/libaudiohal/4.0/DeviceHalHidl.cpp
rename to media/libaudiohal/impl/DeviceHalHidl.cpp
index 6facca9..723e2eb 100644
--- a/media/libaudiohal/4.0/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -19,6 +19,7 @@
 #define LOG_TAG "DeviceHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <android/hardware/audio/2.0/IPrimaryDevice.h>
 #include <android/hardware/audio/4.0/IPrimaryDevice.h>
 #include <cutils/native_handle.h>
 #include <hwbinder/IPCThreadState.h>
@@ -31,27 +32,30 @@
 #include "StreamHalHidl.h"
 #include "VersionUtils.h"
 
-using ::android::hardware::audio::common::V4_0::AudioConfig;
-using ::android::hardware::audio::common::V4_0::AudioDevice;
-using ::android::hardware::audio::common::V4_0::AudioInputFlag;
-using ::android::hardware::audio::common::V4_0::AudioOutputFlag;
-using ::android::hardware::audio::common::V4_0::AudioPatchHandle;
-using ::android::hardware::audio::common::V4_0::AudioPort;
-using ::android::hardware::audio::common::V4_0::AudioPortConfig;
-using ::android::hardware::audio::common::V4_0::AudioMode;
-using ::android::hardware::audio::common::V4_0::AudioSource;
-using ::android::hardware::audio::common::V4_0::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::AudioConfig;
+using ::android::hardware::audio::common::CPP_VERSION::AudioDevice;
+using ::android::hardware::audio::common::CPP_VERSION::AudioInputFlag;
+using ::android::hardware::audio::common::CPP_VERSION::AudioOutputFlag;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPatchHandle;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPort;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPortConfig;
+using ::android::hardware::audio::common::CPP_VERSION::AudioMode;
+using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
 using ::android::hardware::audio::common::utils::mkEnumConverter;
-using ::android::hardware::audio::V4_0::DeviceAddress;
-using ::android::hardware::audio::V4_0::IPrimaryDevice;
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::audio::V4_0::SinkMetadata;
+using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
+using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
 
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::SinkMetadata;
+#endif
+
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 namespace {
 
@@ -259,7 +263,9 @@
             hidlDevice,
             hidlConfig,
             mkEnumConverter<AudioOutputFlag>(flags),
+#if MAJOR_VERSION == 4
             {} /* metadata */,
+#endif
             [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
                 retval = r;
                 if (retval == Result::OK) {
@@ -285,15 +291,19 @@
     AudioConfig hidlConfig;
     HidlUtils::audioConfigFromHal(*config, &hidlConfig);
     Result retval = Result::NOT_INITIALIZED;
+#if MAJOR_VERSION == 2
+    auto sourceMetadata = AudioSource(source);
+#elif MAJOR_VERSION == 4
     // TODO: correctly propagate the tracks sources and volume
     //       for now, only send the main source at 1dbfs
-    SinkMetadata metadata = {{{AudioSource(source), 1}}};
+    SinkMetadata sourceMetadata = {{{AudioSource(source), 1}}};
+#endif
     Return<void> ret = mDevice->openInputStream(
             handle,
             hidlDevice,
             hidlConfig,
-            flags,
-            metadata,
+            mkEnumConverter<AudioInputFlag>(flags),
+            sourceMetadata,
             [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
                 retval = r;
                 if (retval == Result::OK) {
@@ -359,6 +369,13 @@
     return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
 }
 
+#if MAJOR_VERSION == 2
+status_t DeviceHalHidl::getMicrophones(
+        std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
+    if (mDevice == 0) return NO_INIT;
+    return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
 status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
     if (mDevice == 0) return NO_INIT;
     Result retval;
@@ -375,6 +392,7 @@
     });
     return processReturn("getMicrophones", ret, retval);
 }
+#endif
 
 status_t DeviceHalHidl::dump(int fd) {
     if (mDevice == 0) return NO_INIT;
@@ -385,5 +403,5 @@
     return processReturn("dump", ret);
 }
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
similarity index 94%
rename from media/libaudiohal/2.0/DeviceHalHidl.h
rename to media/libaudiohal/impl/DeviceHalHidl.h
index 3c1cb59..fb5e7e7 100644
--- a/media/libaudiohal/2.0/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -18,16 +18,19 @@
 #define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
 
 #include <android/hardware/audio/2.0/IDevice.h>
+#include <android/hardware/audio/4.0/IDevice.h>
 #include <android/hardware/audio/2.0/IPrimaryDevice.h>
+#include <android/hardware/audio/4.0/IPrimaryDevice.h>
 #include <media/audiohal/DeviceHalInterface.h>
 
 #include "ConversionHelperHidl.h"
 
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
+using ::android::hardware::audio::CPP_VERSION::IDevice;
+using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
 using ::android::hardware::Return;
 
 namespace android {
+namespace CPP_VERSION {
 
 class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
 {
@@ -124,6 +127,7 @@
     virtual ~DeviceHalHidl();
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
similarity index 96%
rename from media/libaudiohal/4.0/DeviceHalLocal.cpp
rename to media/libaudiohal/impl/DeviceHalLocal.cpp
index a245dd9..14e26f5 100644
--- a/media/libaudiohal/4.0/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -23,7 +23,7 @@
 #include "StreamHalLocal.h"
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
         : mDev(dev) {
@@ -185,6 +185,12 @@
         return INVALID_OPERATION;
 }
 
+#if MAJOR_VERSION == 2
+status_t DeviceHalLocal::getMicrophones(
+        std::vector<media::MicrophoneInfo> *microphones __unused) {
+    return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
 status_t DeviceHalLocal::getMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
     if (mDev->get_microphones == NULL) return INVALID_OPERATION;
     size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
@@ -196,6 +202,7 @@
     }
     return status;
 }
+#endif
 
 status_t DeviceHalLocal::dump(int fd) {
     return mDev->dump(mDev, fd);
@@ -209,5 +216,5 @@
     mDev->close_input_stream(mDev, stream_in);
 }
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
similarity index 98%
rename from media/libaudiohal/2.0/DeviceHalLocal.h
rename to media/libaudiohal/impl/DeviceHalLocal.h
index aec201a..18bd879 100644
--- a/media/libaudiohal/2.0/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -21,6 +21,7 @@
 #include <media/audiohal/DeviceHalInterface.h>
 
 namespace android {
+namespace CPP_VERSION {
 
 class DeviceHalLocal : public DeviceHalInterface
 {
@@ -122,6 +123,7 @@
     uint32_t version() const { return mDev->common.version; }
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
new file mode 100644
index 0000000..28001da
--- /dev/null
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+#include <vector>
+
+#define LOG_TAG "DevicesFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <android/hardware/audio/4.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "DeviceHalHidl.h"
+#include "DevicesFactoryHalHidl.h"
+
+using ::android::hardware::audio::CPP_VERSION::IDevice;
+using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::Return;
+
+namespace android {
+namespace CPP_VERSION {
+
+DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
+    sp<IDevicesFactory> defaultFactory{IDevicesFactory::getService()};
+    if (!defaultFactory) {
+        ALOGE("Failed to obtain IDevicesFactory/default service, terminating process.");
+        exit(1);
+    }
+    mDeviceFactories.push_back(defaultFactory);
+    if (MAJOR_VERSION >= 4) {
+        // The MSD factory is optional and only available starting at HAL 4.0
+        sp<IDevicesFactory> msdFactory{IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD)};
+        if (msdFactory) {
+            mDeviceFactories.push_back(msdFactory);
+        }
+    }
+    for (const auto& factory : mDeviceFactories) {
+        // It is assumed that the DevicesFactoryHalInterface instance is owned
+        // by AudioFlinger and thus have the same lifespan.
+        factory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+    }
+}
+
+
+#if MAJOR_VERSION == 2
+static IDevicesFactory::Device idFromHal(const char *name, status_t* status) {
+    *status = OK;
+    if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+        return IDevicesFactory::Device::PRIMARY;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
+        return IDevicesFactory::Device::A2DP;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
+        return IDevicesFactory::Device::USB;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
+        return IDevicesFactory::Device::R_SUBMIX;
+    } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
+        return IDevicesFactory::Device::STUB;
+    }
+    ALOGE("Invalid device name %s", name);
+    *status = BAD_VALUE;
+    return {};
+}
+#elif MAJOR_VERSION == 4
+static const char* idFromHal(const char *name, status_t* status) {
+    *status = OK;
+    return name;
+}
+#endif
+
+status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+    if (mDeviceFactories.empty()) return NO_INIT;
+    status_t status;
+    auto hidlId = idFromHal(name, &status);
+    if (status != OK) return status;
+    Result retval = Result::NOT_INITIALIZED;
+    for (const auto& factory : mDeviceFactories) {
+        Return<void> ret = factory->openDevice(
+                hidlId,
+                [&](Result r, const sp<IDevice>& result) {
+                    retval = r;
+                    if (retval == Result::OK) {
+                        *device = new DeviceHalHidl(result);
+                    }
+                });
+        if (!ret.isOk()) return FAILED_TRANSACTION;
+        switch (retval) {
+            // Device was found and was initialized successfully.
+            case Result::OK: return OK;
+            // Device was found but failed to initalize.
+            case Result::NOT_INITIALIZED: return NO_INIT;
+            // Otherwise continue iterating.
+            default: ;
+        }
+    }
+    ALOGW("The specified device name is not recognized: \"%s\"", name);
+    return BAD_VALUE;
+}
+
+} // namespace CPP_VERSION
+} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
similarity index 84%
rename from media/libaudiohal/2.0/DevicesFactoryHalHidl.h
rename to media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 0748849..a4282b0 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -18,15 +18,17 @@
 #define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
 
 #include <android/hardware/audio/2.0/IDevicesFactory.h>
+#include <android/hardware/audio/4.0/IDevicesFactory.h>
 #include <media/audiohal/DevicesFactoryHalInterface.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 
 #include "DeviceHalHidl.h"
 
-using ::android::hardware::audio::V2_0::IDevicesFactory;
+using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
 
 namespace android {
+namespace CPP_VERSION {
 
 class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
 {
@@ -38,17 +40,15 @@
   private:
     friend class DevicesFactoryHalHybrid;
 
-    sp<IDevicesFactory> mDevicesFactory;
-    sp<IDevicesFactory> mDevicesFactoryMsd;
-
-    static status_t nameFromHal(const char *name, IDevicesFactory::Device *device);
+    std::vector<sp<IDevicesFactory>> mDeviceFactories;
 
     // Can not be constructed directly by clients.
     DevicesFactoryHalHidl();
 
-    virtual ~DevicesFactoryHalHidl();
+    virtual ~DevicesFactoryHalHidl() = default;
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
similarity index 90%
rename from media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
rename to media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index 77df6b5..f337a8b 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -22,20 +22,20 @@
 #include "DevicesFactoryHalHidl.h"
 
 namespace android {
+namespace CPP_VERSION {
 
 DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
         : mLocalFactory(new DevicesFactoryHalLocal()),
           mHidlFactory(new DevicesFactoryHalHidl()) {
 }
 
-DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
-}
-
 status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
-    if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0) {
+    if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
+        strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
         return mHidlFactory->openDevice(name, device);
     }
     return mLocalFactory->openDevice(name, device);
 }
 
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
similarity index 89%
rename from media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
rename to media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index abd57d6..5ac0d0d 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -22,26 +22,27 @@
 #include <utils/RefBase.h>
 
 namespace android {
+namespace CPP_VERSION {
 
 class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
 {
   public:
+    DevicesFactoryHalHybrid();
+
     // Opens a device with the specified name. To close the device, it is
     // necessary to release references to the returned object.
     virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
 
   private:
-    friend class DevicesFactoryHalInterface;
-
-    // Can not be constructed directly by clients.
-    DevicesFactoryHalHybrid();
-
-    virtual ~DevicesFactoryHalHybrid();
-
     sp<DevicesFactoryHalInterface> mLocalFactory;
     sp<DevicesFactoryHalInterface> mHidlFactory;
 };
 
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal() {
+    return new DevicesFactoryHalHybrid();
+}
+
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
similarity index 97%
rename from media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
rename to media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
index e54edd4..af67ff5 100644
--- a/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
@@ -26,7 +26,7 @@
 #include "DevicesFactoryHalLocal.h"
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
 {
@@ -67,5 +67,5 @@
     return rc;
 }
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
similarity index 96%
rename from media/libaudiohal/2.0/DevicesFactoryHalLocal.h
rename to media/libaudiohal/impl/DevicesFactoryHalLocal.h
index b9d18ab..5d108dd 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -24,6 +24,7 @@
 #include "DeviceHalLocal.h"
 
 namespace android {
+namespace CPP_VERSION {
 
 class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
 {
@@ -41,6 +42,7 @@
     virtual ~DevicesFactoryHalLocal() {}
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.cpp b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
similarity index 98%
rename from media/libaudiohal/4.0/EffectBufferHalHidl.cpp
rename to media/libaudiohal/impl/EffectBufferHalHidl.cpp
index 957c89f..6ef4e8a 100644
--- a/media/libaudiohal/4.0/EffectBufferHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
@@ -30,7 +30,7 @@
 using ::android::hidl::allocator::V1_0::IAllocator;
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 // static
 uint64_t EffectBufferHalHidl::makeUniqueId() {
@@ -142,5 +142,5 @@
     memcpy(mExternalData, mAudioBuffer.raw, size);
 }
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.h b/media/libaudiohal/impl/EffectBufferHalHidl.h
similarity index 92%
rename from media/libaudiohal/2.0/EffectBufferHalHidl.h
rename to media/libaudiohal/impl/EffectBufferHalHidl.h
index 31e0087..029d71a 100644
--- a/media/libaudiohal/2.0/EffectBufferHalHidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.h
@@ -18,16 +18,18 @@
 #define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
 
 #include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hardware/audio/effect/4.0/types.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidl/HidlSupport.h>
 #include <media/audiohal/EffectBufferHalInterface.h>
 #include <system/audio_effect.h>
 
-using android::hardware::audio::effect::V2_0::AudioBuffer;
+using android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
 using android::hardware::hidl_memory;
 using android::hidl::memory::V1_0::IMemory;
 
 namespace android {
+namespace CPP_VERSION {
 
 class EffectBufferHalHidl : public EffectBufferHalInterface
 {
@@ -71,6 +73,7 @@
     status_t init();
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
similarity index 95%
rename from media/libaudiohal/4.0/EffectHalHidl.cpp
rename to media/libaudiohal/impl/EffectHalHidl.cpp
index c99c4c8..12649a1 100644
--- a/media/libaudiohal/4.0/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -22,26 +22,25 @@
 #include <media/EffectsFactoryApi.h>
 #include <utils/Log.h>
 
-#include "ConversionHelperHidl.h"
 #include "EffectBufferHalHidl.h"
 #include "EffectHalHidl.h"
 #include "HidlUtils.h"
 
-using ::android::hardware::audio::effect::V4_0::AudioBuffer;
-using ::android::hardware::audio::effect::V4_0::EffectBufferAccess;
-using ::android::hardware::audio::effect::V4_0::EffectConfigParameters;
-using ::android::hardware::audio::effect::V4_0::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::V4_0::Result;
-using ::android::hardware::audio::common::V4_0::HidlUtils;
-using ::android::hardware::audio::common::V4_0::AudioChannelMask;
-using ::android::hardware::audio::common::V4_0::AudioFormat;
+using ::android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferAccess;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectConfigParameters;
+using ::android::hardware::audio::effect::CPP_VERSION::MessageQueueFlagBits;
+using ::android::hardware::audio::effect::CPP_VERSION::Result;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
+using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
 using ::android::hardware::audio::common::utils::mkEnumConverter;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::MQDescriptorSync;
 using ::android::hardware::Return;
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
         : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
@@ -338,5 +337,5 @@
     return result;
 }
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
similarity index 85%
rename from media/libaudiohal/2.0/EffectHalHidl.h
rename to media/libaudiohal/impl/EffectHalHidl.h
index 6ffdaf1..04f40d3 100644
--- a/media/libaudiohal/2.0/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -18,19 +18,22 @@
 #define ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
 
 #include <android/hardware/audio/effect/2.0/IEffect.h>
+#include <android/hardware/audio/effect/4.0/IEffect.h>
 #include <media/audiohal/EffectHalInterface.h>
 #include <fmq/EventFlag.h>
 #include <fmq/MessageQueue.h>
 #include <system/audio_effect.h>
 
-using ::android::hardware::audio::effect::V2_0::EffectBufferConfig;
-using ::android::hardware::audio::effect::V2_0::EffectConfig;
-using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferConfig;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectConfig;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
+using EffectResult = ::android::hardware::audio::effect::CPP_VERSION::Result;
 using ::android::hardware::EventFlag;
 using ::android::hardware::MessageQueue;
 
 namespace android {
+namespace CPP_VERSION {
 
 class EffectHalHidl : public EffectHalInterface
 {
@@ -68,8 +71,7 @@
 
   private:
     friend class EffectsFactoryHalHidl;
-    typedef MessageQueue<
-        hardware::audio::effect::V2_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
+    typedef MessageQueue<EffectResult, hardware::kSynchronizedReadWrite> StatusMQ;
 
     sp<IEffect> mEffect;
     const uint64_t mEffectId;
@@ -79,7 +81,7 @@
     std::unique_ptr<StatusMQ> mStatusMQ;
     EventFlag* mEfGroup;
 
-    static status_t analyzeResult(const hardware::audio::effect::V2_0::Result& result);
+    static status_t analyzeResult(const EffectResult& result);
     static void effectBufferConfigFromHal(
             const buffer_config_t& halConfig, EffectBufferConfig* config);
     static void effectBufferConfigToHal(
@@ -103,6 +105,7 @@
     status_t setProcessBuffers();
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
similarity index 93%
rename from media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
rename to media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index dfed784..b880433 100644
--- a/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -18,21 +18,21 @@
 //#define LOG_NDEBUG 0
 
 #include <cutils/native_handle.h>
-#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
 
+#include "EffectsFactoryHalHidl.h"
 #include "ConversionHelperHidl.h"
 #include "EffectBufferHalHidl.h"
 #include "EffectHalHidl.h"
 #include "HidlUtils.h"
 
-using ::android::hardware::audio::common::V4_0::HidlUtils;
-using ::android::hardware::audio::common::V4_0::Uuid;
-using ::android::hardware::audio::effect::V4_0::IEffect;
-using ::android::hardware::audio::effect::V4_0::Result;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::Uuid;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
+using ::android::hardware::audio::effect::CPP_VERSION::Result;
 using ::android::hardware::Return;
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
     mEffectsFactory = IEffectsFactory::getService();
@@ -42,9 +42,6 @@
     }
 }
 
-EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
-}
-
 status_t EffectsFactoryHalHidl::queryAllDescriptors() {
     if (mEffectsFactory == 0) return NO_INIT;
     Result retval = Result::NOT_INITIALIZED;
@@ -148,5 +145,5 @@
 }
 
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
similarity index 84%
rename from media/libaudiohal/2.0/EffectsFactoryHalHidl.h
rename to media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 82b5481..c6fced7 100644
--- a/media/libaudiohal/2.0/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -18,20 +18,25 @@
 #define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
 
 #include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
 #include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hardware/audio/effect/4.0/types.h>
 #include <media/audiohal/EffectsFactoryHalInterface.h>
 
 #include "ConversionHelperHidl.h"
 
 namespace android {
+namespace CPP_VERSION {
 
-using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V2_0::IEffectsFactory;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffectsFactory;
 using ::android::hardware::hidl_vec;
 
 class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
 {
   public:
+    EffectsFactoryHalHidl();
+
     // Returns the number of different effects in all loaded libraries.
     virtual status_t queryNumberEffects(uint32_t *pNumEffects);
 
@@ -56,18 +61,17 @@
                           sp<EffectBufferHalInterface>* buffer) override;
 
   private:
-    friend class EffectsFactoryHalInterface;
-
     sp<IEffectsFactory> mEffectsFactory;
     hidl_vec<EffectDescriptor> mLastDescriptors;
 
-    // Can not be constructed directly by clients.
-    EffectsFactoryHalHidl();
-    virtual ~EffectsFactoryHalHidl();
-
     status_t queryAllDescriptors();
 };
 
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal() {
+    return new EffectsFactoryHalHidl();
+}
+
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
similarity index 93%
rename from media/libaudiohal/4.0/StreamHalHidl.cpp
rename to media/libaudiohal/impl/StreamHalHidl.cpp
index 1c2fdb0..b23e018 100644
--- a/media/libaudiohal/4.0/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "StreamHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <android/hardware/audio/2.0/IStreamOutCallback.h>
 #include <android/hardware/audio/4.0/IStreamOutCallback.h>
 #include <hwbinder/IPCThreadState.h>
 #include <mediautils/SchedulingPolicyService.h>
@@ -27,30 +28,33 @@
 #include "StreamHalHidl.h"
 #include "VersionUtils.h"
 
-using ::android::hardware::audio::common::V4_0::AudioChannelMask;
-using ::android::hardware::audio::common::V4_0::AudioContentType;
-using ::android::hardware::audio::common::V4_0::AudioFormat;
-using ::android::hardware::audio::common::V4_0::AudioSource;
-using ::android::hardware::audio::common::V4_0::AudioUsage;
-using ::android::hardware::audio::common::V4_0::ThreadInfo;
-using ::android::hardware::audio::V4_0::AudioDrain;
-using ::android::hardware::audio::V4_0::IStreamOutCallback;
-using ::android::hardware::audio::V4_0::MessageQueueFlagBits;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::audio::V4_0::MmapBufferInfo;
-using ::android::hardware::audio::V4_0::MmapPosition;
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::PlaybackTrackMetadata;
-using ::android::hardware::audio::V4_0::RecordTrackMetadata;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::audio::V4_0::TimeSpec;
+using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
+using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
+using ::android::hardware::audio::common::CPP_VERSION::ThreadInfo;
+using ::android::hardware::audio::CPP_VERSION::AudioDrain;
+using ::android::hardware::audio::CPP_VERSION::IStreamOutCallback;
+using ::android::hardware::audio::CPP_VERSION::MessageQueueFlagBits;
+using ::android::hardware::audio::CPP_VERSION::MmapBufferInfo;
+using ::android::hardware::audio::CPP_VERSION::MmapPosition;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CPP_VERSION::TimeSpec;
 using ::android::hardware::MQDescriptorSync;
 using ::android::hardware::Return;
 using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::V4_0::IStreamIn::ReadCommand;
+using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
+
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::common::CPP_VERSION::AudioContentType;
+using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
+using ::android::hardware::audio::common::CPP_VERSION::AudioUsage;
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+using ::android::hardware::audio::CPP_VERSION::PlaybackTrackMetadata;
+using ::android::hardware::audio::CPP_VERSION::RecordTrackMetadata;
+#endif
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 StreamHalHidl::StreamHalHidl(IStream *stream)
         : ConversionHelperHidl("Stream"),
@@ -566,6 +570,12 @@
     }
 }
 
+#if MAJOR_VERSION == 2
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+    // Audio HAL V2.0 does not support propagating source metadata
+    return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
 /** Transform a standard collection to an HIDL vector. */
 template <class Values, class ElementConverter>
 static auto transformToHidlVec(const Values& values, ElementConverter converter) {
@@ -576,7 +586,7 @@
 }
 
 status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
-    hardware::audio::V4_0::SourceMetadata halMetadata = {
+    hardware::audio::CPP_VERSION::SourceMetadata halMetadata = {
         .tracks = transformToHidlVec(sourceMetadata.tracks,
               [](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
                   return {
@@ -587,6 +597,7 @@
               })};
     return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(halMetadata));
 }
+#endif
 
 void StreamOutHalHidl::onWriteReady() {
     sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
@@ -782,7 +793,19 @@
     }
 }
 
+#if MAJOR_VERSION == 2
+status_t StreamInHalHidl::getActiveMicrophones(
+        std::vector<media::MicrophoneInfo> *microphones __unused) {
+    if (mStream == 0) return NO_INIT;
+    return INVALID_OPERATION;
+}
 
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+    // Audio HAL V2.0 does not support propagating sink metadata
+    return INVALID_OPERATION;
+}
+
+#elif MAJOR_VERSION == 4
 status_t StreamInHalHidl::getActiveMicrophones(
         std::vector<media::MicrophoneInfo> *microphonesInfo) {
     if (!mStream) return NO_INIT;
@@ -802,7 +825,7 @@
 }
 
 status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
-    hardware::audio::V4_0::SinkMetadata halMetadata = {
+    hardware::audio::CPP_VERSION::SinkMetadata halMetadata = {
         .tracks = transformToHidlVec(sinkMetadata.tracks,
               [](const record_track_metadata& metadata) -> RecordTrackMetadata {
                   return {
@@ -812,6 +835,7 @@
               })};
     return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(halMetadata));
 }
+#endif
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
similarity index 92%
rename from media/libaudiohal/2.0/StreamHalHidl.h
rename to media/libaudiohal/impl/StreamHalHidl.h
index ebad8ae..95ec7f1 100644
--- a/media/libaudiohal/2.0/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -20,8 +20,11 @@
 #include <atomic>
 
 #include <android/hardware/audio/2.0/IStream.h>
+#include <android/hardware/audio/4.0/IStream.h>
 #include <android/hardware/audio/2.0/IStreamIn.h>
+#include <android/hardware/audio/4.0/IStreamIn.h>
 #include <android/hardware/audio/2.0/IStreamOut.h>
+#include <android/hardware/audio/4.0/IStreamOut.h>
 #include <fmq/EventFlag.h>
 #include <fmq/MessageQueue.h>
 #include <media/audiohal/StreamHalInterface.h>
@@ -29,18 +32,19 @@
 #include "ConversionHelperHidl.h"
 #include "StreamPowerLog.h"
 
-using ::android::hardware::audio::V2_0::IStream;
-using ::android::hardware::audio::V2_0::IStreamIn;
-using ::android::hardware::audio::V2_0::IStreamOut;
+using ::android::hardware::audio::CPP_VERSION::IStream;
+using ::android::hardware::audio::CPP_VERSION::IStreamIn;
+using ::android::hardware::audio::CPP_VERSION::IStreamOut;
 using ::android::hardware::EventFlag;
 using ::android::hardware::MessageQueue;
 using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::V2_0::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
-using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;
-using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
+using ReadParameters = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadParameters;
+using ReadStatus = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadStatus;
+using WriteCommand = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteCommand;
+using WriteStatus = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteStatus;
 
 namespace android {
+namespace CPP_VERSION {
 
 class DeviceHalHidl;
 
@@ -243,6 +247,7 @@
     status_t prepareForReading(size_t bufferSize);
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
similarity index 97%
rename from media/libaudiohal/4.0/StreamHalLocal.cpp
rename to media/libaudiohal/impl/StreamHalLocal.cpp
index e9d96bf..b134f57 100644
--- a/media/libaudiohal/4.0/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -25,7 +25,7 @@
 #include "VersionUtils.h"
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 
 StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
         : mDevice(device),
@@ -341,6 +341,12 @@
     return mStream->get_mmap_position(mStream, position);
 }
 
+#if MAJOR_VERSION == 2
+status_t StreamInHalLocal::getActiveMicrophones(
+        std::vector<media::MicrophoneInfo> *microphones __unused) {
+    return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
 status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
     if (mStream->get_active_microphones == NULL) return INVALID_OPERATION;
     size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
@@ -352,6 +358,7 @@
     }
     return status;
 }
+#endif
 
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
similarity index 99%
rename from media/libaudiohal/2.0/StreamHalLocal.h
rename to media/libaudiohal/impl/StreamHalLocal.h
index cda8d0c..cea4229 100644
--- a/media/libaudiohal/2.0/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -21,6 +21,7 @@
 #include "StreamPowerLog.h"
 
 namespace android {
+namespace CPP_VERSION {
 
 class DeviceHalLocal;
 
@@ -214,6 +215,7 @@
     virtual ~StreamInHalLocal();
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/2.0/StreamPowerLog.h b/media/libaudiohal/impl/StreamPowerLog.h
similarity index 98%
rename from media/libaudiohal/2.0/StreamPowerLog.h
rename to media/libaudiohal/impl/StreamPowerLog.h
index a78b1aa..5fd3912 100644
--- a/media/libaudiohal/2.0/StreamPowerLog.h
+++ b/media/libaudiohal/impl/StreamPowerLog.h
@@ -23,6 +23,7 @@
 #include <system/audio.h>
 
 namespace android {
+namespace CPP_VERSION {
 
 class StreamPowerLog {
 public:
@@ -97,6 +98,7 @@
     size_t mFrameSize;
 };
 
+} // namespace CPP_VERSION
 } // namespace android
 
 #endif // ANDROID_HARDWARE_STREAM_POWER_LOG_H
diff --git a/media/libaudiohal/impl/VersionMacro.h b/media/libaudiohal/impl/VersionMacro.h
new file mode 100644
index 0000000..98e9c07
--- /dev/null
+++ b/media/libaudiohal/impl/VersionMacro.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_VERSION_MACRO_H
+#define ANDROID_HARDWARE_VERSION_MACRO_H
+
+#if !defined(MAJOR_VERSION) || !defined(MINOR_VERSION)
+#error "MAJOR_VERSION and MINOR_VERSION must be defined"
+#endif
+
+#define CONCAT_3(a,b,c) a##b##c
+#define EXPAND_CONCAT_3(a,b,c) CONCAT_3(a,b,c)
+/** The directory name of the version: <major>.<minor> */
+#define FILE_VERSION EXPAND_CONCAT_3(MAJOR_VERSION,.,MINOR_VERSION)
+
+#define CONCAT_4(a,b,c,d) a##b##c##d
+#define EXPAND_CONCAT_4(a,b,c,d) CONCAT_4(a,b,c,d)
+/** The c++ namespace of the version: V<major>_<minor> */
+#define CPP_VERSION EXPAND_CONCAT_4(V,MAJOR_VERSION,_,MINOR_VERSION)
+
+#endif // ANDROID_HARDWARE_VERSION_MACRO_H
diff --git a/media/libaudiohal/4.0/VersionUtils.h b/media/libaudiohal/impl/VersionUtils.h
similarity index 61%
rename from media/libaudiohal/4.0/VersionUtils.h
rename to media/libaudiohal/impl/VersionUtils.h
index 1246c2e..5004895 100644
--- a/media/libaudiohal/4.0/VersionUtils.h
+++ b/media/libaudiohal/impl/VersionUtils.h
@@ -14,22 +14,36 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_VERSION_UTILS_4_0_H
-#define ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+#ifndef ANDROID_HARDWARE_VERSION_UTILS_H
+#define ANDROID_HARDWARE_VERSION_UTILS_H
 
+#include <android/hardware/audio/2.0/types.h>
 #include <android/hardware/audio/4.0/types.h>
 #include <hidl/HidlSupport.h>
 
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
 using ::android::hardware::Return;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::hidl_string;
 
 namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
 namespace utils {
 
+#if MAJOR_VERSION == 2
+template <class T, class Callback>
+Return<void> getParameters(T& object, hidl_vec<ParameterValue> /*context*/,
+                           hidl_vec<hidl_string> keys, Callback callback) {
+    return object->getParameters(keys, callback);
+}
+
+template <class T>
+Return<Result> setParameters(T& object, hidl_vec<ParameterValue> /*context*/,
+                             hidl_vec<ParameterValue> keys) {
+    return object->setParameters(keys);
+}
+#elif MAJOR_VERSION == 4
 template <class T, class Callback>
 Return<void> getParameters(T& object, hidl_vec<ParameterValue> context,
                            hidl_vec<hidl_string> keys, Callback callback) {
@@ -41,9 +55,10 @@
                              hidl_vec<ParameterValue> keys) {
     return object->setParameters(context, keys);
 }
+#endif
 
 } // namespace utils
-} // namespace V4_0
+} // namespace CPP_VERSION
 } // namespace android
 
-#endif // ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+#endif // ANDROID_HARDWARE_VERSION_UTILS_H
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
new file mode 100644
index 0000000..fa0effc
--- /dev/null
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
+
+/** @file Library entry points to create the HAL factories. */
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+namespace V2_0 {
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V2_0
+
+namespace V4_0 {
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V4_0
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 93ed5f2..f6f817a 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -192,7 +192,6 @@
     // always recompute for both channel masks even if only one has changed.
     const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
     const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
-    const bool mixerChannelCountChanged = track->mMixerChannelCount != mixerChannelCount;
 
     ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
             && trackChannelCount
@@ -204,17 +203,16 @@
 
     // channel masks have changed, does this track need a downmixer?
     // update to try using our desired format (if we aren't already using it)
-    const audio_format_t prevDownmixerFormat = track->mDownmixRequiresFormat;
     const status_t status = track->prepareForDownmix();
     ALOGE_IF(status != OK,
             "prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
             status, track->channelMask, track->mMixerChannelMask);
 
-    if (prevDownmixerFormat != track->mDownmixRequiresFormat) {
-        track->prepareForReformat(); // because of downmixer, track format may change!
-    }
+    // always do reformat since channel mask changed,
+    // do it after downmix since track format may change!
+    track->prepareForReformat();
 
-    if (track->mResampler.get() != nullptr && mixerChannelCountChanged) {
+    if (track->mResampler.get() != nullptr) {
         // resampler channels may have changed.
         const uint32_t resetToSampleRate = track->sampleRate;
         track->mResampler.reset(nullptr);
@@ -314,6 +312,14 @@
                 targetFormat,
                 kCopyBufferFrameCount));
         requiresReconfigure = true;
+    } else if (mFormat == AUDIO_FORMAT_PCM_FLOAT) {
+        // Input and output are floats, make sure application did not provide > 3db samples
+        // that would break volume application (b/68099072)
+        // TODO: add a trusted source flag to avoid the overhead
+        mReformatBufferProvider.reset(new ClampFloatBufferProvider(
+                audio_channel_count_from_out_mask(channelMask),
+                kCopyBufferFrameCount));
+        requiresReconfigure = true;
     }
     if (targetFormat != mMixerInFormat) {
         mPostDownmixReformatBufferProvider.reset(new ReformatBufferProvider(
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index e19af4a..2d9e1cb 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -376,6 +376,23 @@
     memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount);
 }
 
+ClampFloatBufferProvider::ClampFloatBufferProvider(int32_t channelCount, size_t bufferFrameCount) :
+        CopyBufferProvider(
+                channelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT),
+                channelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT),
+                bufferFrameCount),
+        mChannelCount(channelCount)
+{
+    ALOGV("ClampFloatBufferProvider(%p)(%u)", this, channelCount);
+}
+
+void ClampFloatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_to_float_from_float_with_clamping((float*)dst, (const float*)src,
+                                             frames * mChannelCount,
+                                             FLOAT_NOMINAL_RANGE_HEADROOM);
+}
+
 TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
         audio_format_t format, uint32_t sampleRate, const AudioPlaybackRate &playbackRate) :
         mChannelCount(channelCount),
diff --git a/media/libeffects/config/Android.bp b/media/libeffects/config/Android.bp
index 3e88c7c..5fa9da9 100644
--- a/media/libeffects/config/Android.bp
+++ b/media/libeffects/config/Android.bp
@@ -1,5 +1,5 @@
 // Effect configuration
-cc_library_shared {
+cc_library {
     name: "libeffectsconfig",
     vendor_available: true,
 
diff --git a/media/libeffects/data/audio_effects.xml b/media/libeffects/data/audio_effects.xml
new file mode 100644
index 0000000..3f85052
--- /dev/null
+++ b/media/libeffects/data/audio_effects.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<audio_effects_conf version="2.0" xmlns="http://schemas.android.com/audio/audio_effects_conf/v2_0">
+    <!-- List of effect libraries to load.
+         Each library element must contain a "name" attribute and a "path" attribute giving the
+         name of a library .so file in /vendor/lib/soundfx on the target
+
+         If offloadable effects are present, the AOSP library libeffectproxy.so must be listed as
+         well as one library for the SW implementation and one library for the DSP implementation:
+         <library name="proxy" path="libeffectproxy.so"/>
+         <library name="some_fx_sw" path="lib_some_fx_sw.so"/>
+         <library name="some_fx_hw" path="lib_some_fx_hw.so"/>
+
+         If the audio HAL implements support for AOSP software audio pre-processing effects,
+         the following library must be added:
+         <library name="pre_processing" path="libaudiopreprocessing.so"/>
+    -->
+    <libraries>
+        <library name="bundle" path="libbundlewrapper.so"/>
+        <library name="reverb" path="libreverbwrapper.so"/>
+        <library name="visualizer" path="libvisualizer.so"/>
+        <library name="downmix" path="libdownmix.so"/>
+        <library name="loudness_enhancer" path="libldnhncr.so"/>
+        <library name="dynamics_processing" path="libdynproc.so"/>
+    </libraries>
+
+    <!-- list of effects to load.
+         Each "effect" element must contain a "name", "library" and a "uuid" attribute.
+         The value of the "library" element must correspond to the name of one library element in
+         the "libraries" element.
+         The "name" attribute is indicative, only the value of the "uuid" attribute designates
+         the effect.
+         The uuid is the implementation specific UUID as specified by the effect vendor. This is not
+         the generic effect type UUID.
+
+         Offloadable effects are described by an "effectProxy" element which contains one "libsw"
+         element containing the "uuid" and "library" for the SW implementation and one "libhw"
+         element containing the "uuid" and "library" for the DSP implementation.
+         The "uuid" value for the "effectProxy" element must be unique and will override the default
+         uuid in the AOSP proxy effect implementation.
+
+         If the audio HAL implements support for AOSP software audio pre-processing effects,
+         the following effects can be added:
+         <effect name="agc" library="pre_processing" uuid="aa8130e0-66fc-11e0-bad0-0002a5d5c51b"/>
+         <effect name="aec" library="pre_processing" uuid="bb392ec0-8d4d-11e0-a896-0002a5d5c51b"/>
+         <effect name="ns" library="pre_processing" uuid="c06c8400-8e06-11e0-9cb6-0002a5d5c51b"/>
+    -->
+
+    <effects>
+        <effect name="bassboost" library="bundle" uuid="8631f300-72e2-11df-b57e-0002a5d5c51b"/>
+        <effect name="virtualizer" library="bundle" uuid="1d4033c0-8557-11df-9f2d-0002a5d5c51b"/>
+        <effect name="equalizer" library="bundle" uuid="ce772f20-847d-11df-bb17-0002a5d5c51b"/>
+        <effect name="volume" library="bundle" uuid="119341a0-8469-11df-81f9-0002a5d5c51b"/>
+        <effect name="reverb_env_aux" library="reverb" uuid="4a387fc0-8ab3-11df-8bad-0002a5d5c51b"/>
+        <effect name="reverb_env_ins" library="reverb" uuid="c7a511a0-a3bb-11df-860e-0002a5d5c51b"/>
+        <effect name="reverb_pre_aux" library="reverb" uuid="f29a1400-a3bb-11df-8ddc-0002a5d5c51b"/>
+        <effect name="reverb_pre_ins" library="reverb" uuid="172cdf00-a3bc-11df-a72f-0002a5d5c51b"/>
+        <effect name="visualizer" library="visualizer" uuid="d069d9e0-8329-11df-9168-0002a5d5c51b"/>
+        <effect name="downmix" library="downmix" uuid="93f04452-e4fe-41cc-91f9-e475b6d1d69f"/>
+        <effect name="loudness_enhancer" library="loudness_enhancer" uuid="fa415329-2034-4bea-b5dc-5b381c8d1e2c"/>
+        <effect name="dynamics_processing" library="dynamics_processing" uuid="e0e6539b-1781-7261-676f-6d7573696340"/>
+    </effects>
+
+    <!-- Audio pre processor configurations.
+         The pre processor configuration is described in a "preprocess" element and consists in a
+         list of elements each describing pre processor settings for a given use case or "stream".
+         Each stream element has a "type" attribute corresponding to the input source used.
+         Valid types are:
+              "mic", "camcorder", "voice_recognition", "voice_communication"
+         Each "stream" element contains a list of "apply" elements indicating one effect to apply.
+         The effect to apply is designated by its name in the "effects" elements.
+
+        <preprocess>
+            <stream type="voice_communication">
+                <apply effect="aec"/>
+                <apply effect="ns"/>
+            </stream>
+        </preprocess>
+    -->
+
+    <!-- Audio post processor configurations.
+         The post processor configuration is described in a "postprocess" element and consists in a
+         list of elements each describing post processor settings for a given use case or "stream".
+         Each stream element has a "type" attribute corresponding to the stream type used.
+         Valid types are:
+              "music", "ring", "alarm", "notification", "voice_call"
+         Each "stream" element contains a list of "apply" elements indicating one effect to apply.
+         The effect to apply is designated by its name in the "effects" elements.
+
+        <postprocess>
+            <stream type="music">
+                <apply effect="music_post_proc"/>
+            </stream>
+            <stream type="voice_call">
+                <apply effect="voice_post_proc"/>
+            </stream>
+            <stream type="notification">
+                <apply effect="notification_post_proc"/>
+            </stream>
+        </postprocess>
+    -->
+
+</audio_effects_conf>
diff --git a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
index 55383eb..0b883f1 100644
--- a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
+++ b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
@@ -51,7 +51,7 @@
         {0x7261676f, 0x6d75, 0x7369, 0x6364, {0x28, 0xe2, 0xfd, 0x3a, 0xc3, 0x9e}}, // type
         {0xe0e6539b, 0x1781, 0x7261, 0x676f, {0x6d, 0x75, 0x73, 0x69, 0x63, 0x40}}, // uuid
         EFFECT_CONTROL_API_VERSION,
-        (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST),
+        (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_LAST | EFFECT_FLAG_VOLUME_CTRL),
         0, // TODO
         1,
         "Dynamics Processing",
@@ -367,6 +367,76 @@
     return 0;
 }
 
+//helper function
+bool DP_checkSizesInt(uint32_t paramSize, uint32_t valueSize, uint32_t expectedParams,
+        uint32_t expectedValues) {
+    if (paramSize < expectedParams * sizeof(int32_t)) {
+        ALOGE("Invalid paramSize: %u expected %u", paramSize,
+                (uint32_t)(expectedParams * sizeof(int32_t)));
+        return false;
+    }
+    if (valueSize < expectedValues * sizeof(int32_t)) {
+        ALOGE("Invalid valueSize %u expected %u", valueSize,
+                (uint32_t)(expectedValues * sizeof(int32_t)));
+        return false;
+    }
+    return true;
+}
+
+static dp_fx::DPChannel* DP_getChannel(DynamicsProcessingContext *pContext,
+        int32_t channel) {
+    if (pContext->mPDynamics == NULL) {
+        return NULL;
+    }
+    dp_fx::DPChannel *pChannel = pContext->mPDynamics->getChannel(channel);
+    ALOGE_IF(pChannel == NULL, "DPChannel NULL. invalid channel %d", channel);
+    return pChannel;
+}
+
+static dp_fx::DPEq* DP_getEq(DynamicsProcessingContext *pContext, int32_t channel,
+        int32_t eqType) {
+    dp_fx::DPChannel *pChannel = DP_getChannel(pContext, channel);
+    if (pChannel == NULL) {
+        return NULL;
+    }
+    dp_fx::DPEq *pEq = (eqType == DP_PARAM_PRE_EQ ? pChannel->getPreEq() :
+            (eqType == DP_PARAM_POST_EQ ? pChannel->getPostEq() : NULL));
+    ALOGE_IF(pEq == NULL,"DPEq NULL invalid eq");
+    return pEq;
+}
+
+static dp_fx::DPEqBand* DP_getEqBand(DynamicsProcessingContext *pContext, int32_t channel,
+        int32_t eqType, int32_t band) {
+    dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqType);
+    if (pEq == NULL) {
+        return NULL;
+    }
+    dp_fx::DPEqBand *pEqBand = pEq->getBand(band);
+    ALOGE_IF(pEqBand == NULL, "DPEqBand NULL. invalid band %d", band);
+    return pEqBand;
+}
+
+static dp_fx::DPMbc* DP_getMbc(DynamicsProcessingContext *pContext, int32_t channel) {
+    dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+    if (pChannel == NULL) {
+        return NULL;
+    }
+    dp_fx::DPMbc *pMbc = pChannel->getMbc();
+    ALOGE_IF(pMbc == NULL, "DPMbc NULL invalid MBC");
+    return pMbc;
+}
+
+static dp_fx::DPMbcBand* DP_getMbcBand(DynamicsProcessingContext *pContext, int32_t channel,
+        int32_t band) {
+    dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+    if (pMbc == NULL) {
+        return NULL;
+    }
+    dp_fx::DPMbcBand *pMbcBand = pMbc->getBand(band);
+    ALOGE_IF(pMbcBand == NULL, "pMbcBand NULL. invalid band %d", band);
+    return pMbcBand;
+}
+
 int DP_command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
 
@@ -483,8 +553,49 @@
                 p->data + voffset);
         break;
     }
+    case EFFECT_CMD_SET_VOLUME: {
+        ALOGV("EFFECT_CMD_SET_VOLUME");
+        // if pReplyData is NULL, VOL_CTRL is delegated to another effect
+        if (pReplyData == NULL || replySize == NULL || *replySize < ((int)sizeof(int32_t) * 2)) {
+            ALOGV("no VOLUME data to return");
+            break;
+        }
+        if (pCmdData == NULL || cmdSize < ((int)sizeof(uint32_t) * 2)) {
+            ALOGE("\tLVM_ERROR : DynamicsProcessing EFFECT_CMD_SET_VOLUME ERROR");
+            return -EINVAL;
+        }
+
+        const int32_t unityGain = 1 << 24;
+        //channel count
+        int32_t channelCount = (int32_t)audio_channel_count_from_out_mask(
+                pContext->mConfig.inputCfg.channels);
+        for (int32_t ch = 0; ch < channelCount; ch++) {
+
+            dp_fx::DPChannel * pChannel = DP_getChannel(pContext, ch);
+            if (pChannel == NULL) {
+                ALOGE("%s EFFECT_CMD_SET_VOLUME invalid channel %d", __func__, ch);
+                return -EINVAL;
+                break;
+            }
+
+            int32_t offset = ch;
+            if (ch > 1) {
+                // FIXME: limited to 2 unique channels. If more channels present, use value for
+                // first channel
+                offset = 0;
+            }
+            const float gain = (float)*((uint32_t *)pCmdData + offset) / unityGain;
+            const float gainDb = linearToDb(gain);
+            ALOGVV("%s EFFECT_CMD_SET_VOLUME channel %d, engine outputlevel %f (%0.2f dB)",
+                    __func__, ch, gain, gainDb);
+            pChannel->setOutputGain(gainDb);
+        }
+
+        const int32_t  volRet[2] = {unityGain, unityGain}; // Apply no volume before effect.
+        memcpy(pReplyData, volRet, sizeof(volRet));
+        break;
+    }
     case EFFECT_CMD_SET_DEVICE:
-    case EFFECT_CMD_SET_VOLUME:
     case EFFECT_CMD_SET_AUDIO_MODE:
         break;
 
@@ -523,76 +634,6 @@
     return 0;
 }
 
-//helper function
-bool DP_checkSizesInt(uint32_t paramSize, uint32_t valueSize, uint32_t expectedParams,
-        uint32_t expectedValues) {
-    if (paramSize < expectedParams * sizeof(int32_t)) {
-        ALOGE("Invalid paramSize: %u expected %u", paramSize,
-                (uint32_t) (expectedParams * sizeof(int32_t)));
-        return false;
-    }
-    if (valueSize < expectedValues * sizeof(int32_t)) {
-        ALOGE("Invalid valueSize %u expected %u", valueSize,
-                (uint32_t)(expectedValues * sizeof(int32_t)));
-        return false;
-    }
-    return true;
-}
-
-static dp_fx::DPChannel* DP_getChannel(DynamicsProcessingContext *pContext,
-        int32_t channel) {
-    if (pContext->mPDynamics == NULL) {
-        return NULL;
-    }
-    dp_fx::DPChannel *pChannel = pContext->mPDynamics->getChannel(channel);
-    ALOGE_IF(pChannel == NULL, "DPChannel NULL. invalid channel %d", channel);
-    return pChannel;
-}
-
-static dp_fx::DPEq* DP_getEq(DynamicsProcessingContext *pContext, int32_t channel,
-        int32_t eqType) {
-    dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
-    if (pChannel == NULL) {
-        return NULL;
-    }
-    dp_fx::DPEq *pEq = (eqType == DP_PARAM_PRE_EQ ? pChannel->getPreEq() :
-            (eqType == DP_PARAM_POST_EQ ? pChannel->getPostEq() : NULL));
-    ALOGE_IF(pEq == NULL,"DPEq NULL invalid eq");
-    return pEq;
-}
-
-static dp_fx::DPEqBand* DP_getEqBand(DynamicsProcessingContext *pContext, int32_t channel,
-        int32_t eqType, int32_t band) {
-    dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqType);
-    if (pEq == NULL) {
-        return NULL;
-    }
-    dp_fx::DPEqBand *pEqBand = pEq->getBand(band);
-    ALOGE_IF(pEqBand == NULL, "DPEqBand NULL. invalid band %d", band);
-    return pEqBand;
-}
-
-static dp_fx::DPMbc* DP_getMbc(DynamicsProcessingContext *pContext, int32_t channel) {
-    dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
-    if (pChannel == NULL) {
-        return NULL;
-    }
-    dp_fx::DPMbc *pMbc = pChannel->getMbc();
-    ALOGE_IF(pMbc == NULL, "DPMbc NULL invalid MBC");
-    return pMbc;
-}
-
-static dp_fx::DPMbcBand* DP_getMbcBand(DynamicsProcessingContext *pContext, int32_t channel,
-        int32_t band) {
-    dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
-    if (pMbc == NULL) {
-        return NULL;
-    }
-    dp_fx::DPMbcBand *pMbcBand = pMbc->getBand(band);
-    ALOGE_IF(pMbcBand == NULL, "pMbcBand NULL. invalid band %d", band);
-    return pMbcBand;
-}
-
 int DP_getParameter(DynamicsProcessingContext *pContext,
                            uint32_t paramSize,
                            void *pParam,
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.cpp b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
index 8b79991..ac758e0 100644
--- a/media/libeffects/dynamicsproc/dsp/DPBase.cpp
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
@@ -174,8 +174,8 @@
 }
 
 //----
-DPChannel::DPChannel() : mInitialized(false), mInputGainDb(0), mPreEqInUse(false), mMbcInUse(false),
-        mPostEqInUse(false), mLimiterInUse(false) {
+DPChannel::DPChannel() : mInitialized(false), mInputGainDb(0), mOutputGainDb(0),
+        mPreEqInUse(false), mMbcInUse(false), mPostEqInUse(false), mLimiterInUse(false) {
 }
 
 void DPChannel::init(float inputGain, bool preEqInUse, uint32_t preEqBandCount,
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.h b/media/libeffects/dynamicsproc/dsp/DPBase.h
index 355f64b..e74f91d 100644
--- a/media/libeffects/dynamicsproc/dsp/DPBase.h
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.h
@@ -272,6 +272,16 @@
         mInputGainDb = gain;
     }
 
+    float getOutputGain() const {
+        if (!mInitialized) {
+            return 0;
+        }
+        return mOutputGainDb;
+    }
+    void setOutputGain(float gain) {
+        mOutputGainDb = gain;
+    }
+
     DPEq* getPreEq();
     DPMbc* getMbc();
     DPEq* getPostEq();
@@ -281,6 +291,7 @@
 private:
     bool mInitialized;
     float mInputGainDb;
+    float mOutputGainDb;
 
     DPEq mPreEq;
     DPMbc mMbc;
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
index 59195fc..d06fd70 100644
--- a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
@@ -29,7 +29,7 @@
 
 #define CIRCULAR_BUFFER_UPSAMPLE 4  //4 times buffer size
 
-static constexpr float MIN_ENVELOPE = 0.000001f;
+static constexpr float MIN_ENVELOPE = 1e-6f; //-120 dB
 //helper functionS
 static inline bool isPowerOf2(unsigned long n) {
     return (n & (n - 1)) == 0;
@@ -53,14 +53,6 @@
 #define IS_CHANGED(c, a, b) { c |= !compareEquality(a,b); \
     (a) = (b); }
 
-float dBtoLinear(float valueDb) {
-    return  pow (10, valueDb / 20.0);
-}
-
-float linearToDb(float value) {
-    return 20 * log10(value);
-}
-
 //ChannelBuffers helper
 void ChannelBuffer::initBuffers(unsigned int blockSize, unsigned int overlapSize,
         unsigned int halfFftSize, unsigned int samplingRate, DPBase &dpBase) {
@@ -74,7 +66,7 @@
     cBOutput.resize(mBlockSize * CIRCULAR_BUFFER_UPSAMPLE);
 
     //fill input with half block size...
-    for (unsigned int k = 0;  k < mBlockSize/2; k++) {
+    for (unsigned int k = 0; k < mBlockSize/2; k++) {
         cBInput.write(0);
     }
 
@@ -94,12 +86,14 @@
             mMbcBands.size(), mPostEqBands.size());
 
     DPChannel *pChannel = dpBase.getChannel(0);
-    if (pChannel != NULL) {
+    if (pChannel != nullptr) {
         mPreEqInUse = pChannel->getPreEq()->isInUse();
         mMbcInUse = pChannel->getMbc()->isInUse();
         mPostEqInUse = pChannel->getPostEq()->isInUse();
         mLimiterInUse = pChannel->getLimiter()->isInUse();
     }
+
+    mLimiterParams.linkGroup = -1; //no group.
 }
 
 void ChannelBuffer::computeBinStartStop(BandParams &bp, size_t binStart) {
@@ -108,8 +102,35 @@
     bp.binStop = (int)(0.5 + bp.freqCutoffHz * mBlockSize / mSamplingRate);
 }
 
-//== DPFrequency
+//== LinkedLimiters Helper
+void LinkedLimiters::reset() {
+    mGroupsMap.clear();
+}
 
+void LinkedLimiters::update(int32_t group, int index) {
+    mGroupsMap[group].push_back(index);
+}
+
+void LinkedLimiters::remove(int index) {
+    //check all groups and if index is found, remove it.
+    //if group is empty afterwards, remove it.
+    for (auto it = mGroupsMap.begin(); it != mGroupsMap.end(); ) {
+        for (auto itIndex = it->second.begin(); itIndex != it->second.end(); ) {
+            if (*itIndex == index) {
+                itIndex = it->second.erase(itIndex);
+            } else {
+                ++itIndex;
+            }
+        }
+        if (it->second.size() == 0) {
+            it = mGroupsMap.erase(it);
+        } else {
+            ++it;
+        }
+    }
+}
+
+//== DPFrequency
 void DPFrequency::reset() {
 }
 
@@ -147,14 +168,25 @@
                 mSamplingRate, *this);
     }
 
-    //dsp
+    //effective number of frames processed per second
+    mBlocksPerSecond = (float)mSamplingRate / (mBlockSize - mOverlapSize);
+
     fill_window(mVWindow, RDSP_WINDOW_HANNING_FLAT_TOP, mBlockSize, mOverlapSize);
+
+    //compute window rms for energy compensation
+    mWindowRms = 0;
+    for (size_t i = 0; i < mVWindow.size(); i++) {
+        mWindowRms += mVWindow[i] * mVWindow[i];
+    }
+
+    //Making sure window rms is not zero.
+    mWindowRms = std::max(sqrt(mWindowRms / mVWindow.size()), MIN_ENVELOPE);
 }
 
 void DPFrequency::updateParameters(ChannelBuffer &cb, int channelIndex) {
     DPChannel *pChannel = getChannel(channelIndex);
 
-    if (pChannel == NULL) {
+    if (pChannel == nullptr) {
         ALOGE("Error: updateParameters null DPChannel %d", channelIndex);
         return;
     }
@@ -166,7 +198,7 @@
         //===EqPre
         if (cb.mPreEqInUse) {
             DPEq *pPreEq = pChannel->getPreEq();
-            if (pPreEq == NULL) {
+            if (pPreEq == nullptr) {
                 ALOGE("Error: updateParameters null PreEq for channel: %d", channelIndex);
                 return;
             }
@@ -174,7 +206,7 @@
             if (cb.mPreEqEnabled) {
                 for (unsigned int b = 0; b < getPreEqBandCount(); b++) {
                     DPEqBand *pEqBand = pPreEq->getBand(b);
-                    if (pEqBand == NULL) {
+                    if (pEqBand == nullptr) {
                         ALOGE("Error: updateParameters null PreEqBand for band %d", b);
                         return; //failed.
                     }
@@ -222,7 +254,7 @@
         bool changed = false;
 
         DPEq *pPostEq = pChannel->getPostEq();
-        if (pPostEq == NULL) {
+        if (pPostEq == nullptr) {
             ALOGE("Error: updateParameters null postEq for channel: %d", channelIndex);
             return; //failed.
         }
@@ -230,7 +262,7 @@
         if (cb.mPostEqEnabled) {
             for (unsigned int b = 0; b < getPostEqBandCount(); b++) {
                 DPEqBand *pEqBand = pPostEq->getBand(b);
-                if (pEqBand == NULL) {
+                if (pEqBand == nullptr) {
                     ALOGE("Error: updateParameters PostEqBand NULL for band %d", b);
                     return; //failed.
                 }
@@ -265,7 +297,7 @@
     //===MBC
     if (cb.mMbcInUse) {
         DPMbc *pMbc = pChannel->getMbc();
-        if (pMbc == NULL) {
+        if (pMbc == nullptr) {
             ALOGE("Error: updateParameters Mbc NULL for channel: %d", channelIndex);
             return;
         }
@@ -274,7 +306,7 @@
             bool changed = false;
             for (unsigned int b = 0; b < getMbcBandCount(); b++) {
                 DPMbcBand *pMbcBand = pMbc->getBand(b);
-                if (pMbcBand == NULL) {
+                if (pMbcBand == nullptr) {
                     ALOGE("Error: updateParameters MbcBand NULL for band %d", b);
                     return; //failed.
                 }
@@ -307,11 +339,38 @@
                     cb.computeBinStartStop(*pMbcBandParams, binNext);
                     binNext = pMbcBandParams->binStop + 1;
                 }
-
             }
-
         }
     }
+
+    //===Limiter
+    if (cb.mLimiterInUse) {
+        bool changed = false;
+        DPLimiter *pLimiter = pChannel->getLimiter();
+        if (pLimiter == nullptr) {
+            ALOGE("Error: updateParameters Limiter NULL for channel: %d", channelIndex);
+            return;
+        }
+        cb.mLimiterEnabled = pLimiter->isEnabled();
+        if (cb.mLimiterEnabled) {
+            IS_CHANGED(changed, cb.mLimiterParams.linkGroup ,
+                    (int32_t)pLimiter->getLinkGroup());
+            cb.mLimiterParams.attackTimeMs = pLimiter->getAttackTime();
+            cb.mLimiterParams.releaseTimeMs = pLimiter->getReleaseTime();
+            cb.mLimiterParams.ratio = pLimiter->getRatio();
+            cb.mLimiterParams.thresholdDb = pLimiter->getThreshold();
+            cb.mLimiterParams.postGainDb = pLimiter->getPostGain();
+        }
+
+        if (changed) {
+            ALOGV("limiter changed, recomputing linkGroups for %d", channelIndex);
+            mLinkedLimiters.remove(channelIndex); //in case it was already there.
+            mLinkedLimiters.update(cb.mLimiterParams.linkGroup, channelIndex);
+        }
+    }
+
+    //=== Output Gain
+    cb.outputGainDb = pChannel->getOutputGain();
 }
 
 size_t DPFrequency::processSamples(const float *in, float *out, size_t samples) {
@@ -336,12 +395,8 @@
            }
        }
 
-       //TODO: lookahead limiters
-       //TODO: apply linked limiters to all channels.
-       //**Process each Channel
-       for (int ch = 0; ch < channelCount; ch++) {
-           processMono(mChannelBuffers[ch]);
-       }
+       //**process all channelBuffers
+       processChannelBuffers(mChannelBuffers);
 
        //** estimate how much data is available in ALL channels
        size_t available = mChannelBuffers[0].cBOutput.availableToRead();
@@ -370,62 +425,78 @@
        return samples;
 }
 
-size_t DPFrequency::processMono(ChannelBuffer &cb) {
-
+size_t DPFrequency::processChannelBuffers(CBufferVector &channelBuffers) {
+    const int channelCount = channelBuffers.size();
     size_t processedSamples = 0;
+    size_t processFrames = mBlockSize - mOverlapSize;
 
-    size_t available = cb.cBInput.availableToRead();
-    while (available >= mBlockSize - mOverlapSize) {
-
-        //move tail of previous
-        for (unsigned int k = 0; k < mOverlapSize; ++k) {
-            cb.input[k] = cb.input[mBlockSize - mOverlapSize + k];
-        }
-
-        //read new available data
-        for (unsigned int k = 0; k < mBlockSize - mOverlapSize; k++) {
-            cb.input[mOverlapSize + k] = cb.cBInput.read();
-        }
-
-        //## Actual process
-        processOneVector(cb.output, cb.input, cb);
-        //##End of Process
-
-        //mix tail (and capture new tail
-        for (unsigned int k = 0; k < mOverlapSize; k++) {
-            cb.output[k] += cb.outTail[k];
-            cb.outTail[k] = cb.output[mBlockSize - mOverlapSize + k]; //new tail
-        }
-
-        //output data
-        for (unsigned int k = 0; k < mBlockSize - mOverlapSize; k++) {
-            cb.cBOutput.write(cb.output[k]);
-        }
-
-        available = cb.cBInput.availableToRead();
+    size_t available = channelBuffers[0].cBInput.availableToRead();
+    for (int ch = 1; ch < channelCount; ch++) {
+        available = std::min(available, channelBuffers[ch].cBInput.availableToRead());
     }
 
+    while (available >= processFrames) {
+        //First pass
+        for (int ch = 0; ch < channelCount; ch++) {
+            ChannelBuffer * pCb = &channelBuffers[ch];
+            //move tail of previous
+            std::copy(pCb->input.begin() + processFrames,
+                    pCb->input.end(),
+                    pCb->input.begin());
+
+            //read new available data
+            for (unsigned int k = 0; k < processFrames; k++) {
+                pCb->input[mOverlapSize + k] = pCb->cBInput.read();
+            }
+            //first stages: fft, preEq, mbc, postEq and start of Limiter
+            processedSamples += processFirstStages(*pCb);
+        }
+
+        //**compute linked limiters and update levels if needed
+        processLinkedLimiters(channelBuffers);
+
+        //final pass.
+        for (int ch = 0; ch < channelCount; ch++) {
+            ChannelBuffer * pCb = &channelBuffers[ch];
+
+            //linked limiter and ifft
+            processLastStages(*pCb);
+
+            //mix tail (and capture new tail
+            for (unsigned int k = 0; k < mOverlapSize; k++) {
+                pCb->output[k] += pCb->outTail[k];
+                pCb->outTail[k] = pCb->output[processFrames + k]; //new tail
+            }
+
+            //output data
+            for (unsigned int k = 0; k < processFrames; k++) {
+                pCb->cBOutput.write(pCb->output[k]);
+            }
+        }
+        available -= processFrames;
+    }
     return processedSamples;
 }
-
-size_t DPFrequency::processOneVector(FloatVec & output, FloatVec & input,
-        ChannelBuffer &cb) {
+size_t DPFrequency::processFirstStages(ChannelBuffer &cb) {
 
     //##apply window
     Eigen::Map<Eigen::VectorXf> eWindow(&mVWindow[0], mVWindow.size());
-    Eigen::Map<Eigen::VectorXf> eInput(&input[0], input.size());
+    Eigen::Map<Eigen::VectorXf> eInput(&cb.input[0], cb.input.size());
 
     Eigen::VectorXf eWin = eInput.cwiseProduct(eWindow); //apply window
 
-    //##fft //TODO: refactor frequency transformations away from other stages.
-    mFftServer.fwd(mComplexTemp, eWin);
+    //##fft
+    //Note: we are using eigen with the default scaling, which ensures that
+    //  IFFT( FFT(x) ) = x.
+    // TODO: optimize by using the noscale option, and compensate with dB scale offsets
+    mFftServer.fwd(cb.complexTemp, eWin);
 
-    size_t cSize = mComplexTemp.size();
+    size_t cSize = cb.complexTemp.size();
     size_t maxBin = std::min(cSize/2, mHalfFFTSize);
 
     //== EqPre (always runs)
     for (size_t k = 0; k < maxBin; k++) {
-        mComplexTemp[k] *= cb.mPreEqFactorVector[k];
+        cb.complexTemp[k] *= cb.mPreEqFactorVector[k];
     }
 
     //== MBC
@@ -439,62 +510,68 @@
             float preGainSquared = preGainFactor * preGainFactor;
 
             for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
-                float fReal = mComplexTemp[k].real();
-                float fImag = mComplexTemp[k].imag();
-                float fSquare = (fReal * fReal + fImag * fImag) * preGainSquared;
-
-                fEnergySum += fSquare;
+                fEnergySum += std::norm(cb.complexTemp[k]) * preGainSquared; //mag squared
             }
 
-            fEnergySum = sqrt(fEnergySum /2.0);
+            //Eigen FFT is full spectrum, even if the source was real data.
+            // Each half spectrum has half the energy. This is taken into account with the * 2
+            // factor in the energy computations.
+            // energy = sqrt(sum_components_squared) number_points
+            // in here, the fEnergySum is duplicated to account for the second half spectrum,
+            // and the windowRms is used to normalize by the expected energy reduction
+            // caused by the window used (expected for steady state signals)
+            fEnergySum = sqrt(fEnergySum * 2) / (mBlockSize * mWindowRms);
+
+            // updates computed per frame advance.
             float fTheta = 0.0;
-            float fFAtt = pMbcBandParams->attackTimeMs;
-            float fFRel = pMbcBandParams->releaseTimeMs;
-
-            float fUpdatesPerSecond = 10; //TODO: compute from framerate
-
+            float fFAttSec = pMbcBandParams->attackTimeMs / 1000; //in seconds
+            float fFRelSec = pMbcBandParams->releaseTimeMs / 1000; //in seconds
 
             if (fEnergySum > pMbcBandParams->previousEnvelope) {
-                fTheta = exp(-1.0 / (fFAtt * fUpdatesPerSecond));
+                fTheta = exp(-1.0 / (fFAttSec * mBlocksPerSecond));
             } else {
-                fTheta = exp(-1.0 / (fFRel * fUpdatesPerSecond));
+                fTheta = exp(-1.0 / (fFRelSec * mBlocksPerSecond));
             }
 
             float fEnv = (1.0 - fTheta) * fEnergySum + fTheta * pMbcBandParams->previousEnvelope;
-
             //preserve for next iteration
             pMbcBandParams->previousEnvelope = fEnv;
 
-            float fThreshold = dBtoLinear(pMbcBandParams->thresholdDb);
-            float fNoiseGateThreshold = dBtoLinear(pMbcBandParams->noiseGateThresholdDb);
-
-            float fNewFactor = 1.0;
-
-            if (fEnv > fThreshold) {
-                float fDbAbove = linearToDb(fThreshold / fEnv);
-                float fDbTarget = fDbAbove / pMbcBandParams->ratio;
-                float fDbChange = fDbAbove - fDbTarget;
-                fNewFactor = dBtoLinear(fDbChange);
-            } else if (fEnv < fNoiseGateThreshold) {
-                if (fEnv < MIN_ENVELOPE) {
-                    fEnv = MIN_ENVELOPE;
-                }
-                float fDbBelow = linearToDb(fNoiseGateThreshold / fEnv);
-                float fDbTarget = fDbBelow / pMbcBandParams->expanderRatio;
-                float fDbChange = fDbBelow - fDbTarget;
-                fNewFactor = dBtoLinear(fDbChange);
+            if (fEnv < MIN_ENVELOPE) {
+                fEnv = MIN_ENVELOPE;
             }
+            const float envDb = linearToDb(fEnv);
+            float newLevelDb = envDb;
+            //using shorter variables for code clarity
+            const float thresholdDb = pMbcBandParams->thresholdDb;
+            const float ratio = pMbcBandParams->ratio;
+            const float kneeWidthDbHalf = pMbcBandParams->kneeWidthDb / 2;
+            const float noiseGateThresholdDb = pMbcBandParams->noiseGateThresholdDb;
+            const float expanderRatio = pMbcBandParams->expanderRatio;
+
+            //find segment
+            if (envDb > thresholdDb + kneeWidthDbHalf) {
+                //compression segment
+                newLevelDb = envDb + ((1 / ratio) - 1) * (envDb - thresholdDb);
+            } else if (envDb > thresholdDb - kneeWidthDbHalf) {
+                //knee-compression segment
+                float temp = (envDb - thresholdDb + kneeWidthDbHalf);
+                newLevelDb = envDb + ((1 / ratio) - 1) *
+                        temp * temp / (kneeWidthDbHalf * 4);
+            } else if (envDb < noiseGateThresholdDb) {
+                //expander segment
+                newLevelDb = noiseGateThresholdDb -
+                        expanderRatio * (noiseGateThresholdDb - envDb);
+            }
+
+            float newFactor = dBtoLinear(newLevelDb - envDb);
 
             //apply post gain.
-            fNewFactor *= dBtoLinear(pMbcBandParams->gainPostDb);
-
-            if (fNewFactor < 0) {
-                fNewFactor = 0;
-            }
+            newFactor *= dBtoLinear(pMbcBandParams->gainPostDb);
 
             //apply to this band
             for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
-                mComplexTemp[k] *= fNewFactor;
+                cb.complexTemp[k] *= newFactor;
             }
 
         } //end per band process
@@ -504,14 +581,94 @@
     //== EqPost
     if (cb.mPostEqInUse && cb.mPostEqEnabled) {
         for (size_t k = 0; k < maxBin; k++) {
-            mComplexTemp[k] *= cb.mPostEqFactorVector[k];
+            cb.complexTemp[k] *= cb.mPostEqFactorVector[k];
+        }
+    }
+
+    //== Limiter. First Pass
+    if (cb.mLimiterInUse && cb.mLimiterEnabled) {
+        float fEnergySum = 0;
+        for (size_t k = 0; k < maxBin; k++) {
+            fEnergySum += std::norm(cb.complexTemp[k]);
+        }
+
+        //see explanation above for energy computation logic
+        fEnergySum = sqrt(fEnergySum * 2) / (mBlockSize * mWindowRms);
+        float fTheta = 0.0;
+        float fFAttSec = cb.mLimiterParams.attackTimeMs / 1000; //in seconds
+        float fFRelSec = cb.mLimiterParams.releaseTimeMs / 1000; //in seconds
+
+        if (fEnergySum > cb.mLimiterParams.previousEnvelope) {
+            fTheta = exp(-1.0 / (fFAttSec * mBlocksPerSecond));
+        } else {
+            fTheta = exp(-1.0 / (fFRelSec * mBlocksPerSecond));
+        }
+
+        float fEnv = (1.0 - fTheta) * fEnergySum + fTheta * cb.mLimiterParams.previousEnvelope;
+        //preserve for next iteration
+        cb.mLimiterParams.previousEnvelope = fEnv;
+
+        const float envDb = linearToDb(fEnv);
+        float newFactorDb = 0;
+        //using shorter variables for code clarity
+        const float thresholdDb = cb.mLimiterParams.thresholdDb;
+        const float ratio = cb.mLimiterParams.ratio;
+
+        if (envDb > thresholdDb) {
+            //limiter segment
+            newFactorDb = ((1 / ratio) - 1) * (envDb - thresholdDb);
+        }
+
+        float newFactor = dBtoLinear(newFactorDb);
+
+        cb.mLimiterParams.newFactor = newFactor;
+
+    } //end Limiter
+    return mBlockSize;
+}
+
+void DPFrequency::processLinkedLimiters(CBufferVector &channelBuffers) {
+
+    const int channelCount = channelBuffers.size();
+    for (auto &groupPair : mLinkedLimiters.mGroupsMap) {
+        float minFactor = 1.0;
+        //estimate minfactor for all linked
+        for(int index : groupPair.second) {
+            if (index >= 0 && index < channelCount) {
+                minFactor = std::min(channelBuffers[index].mLimiterParams.newFactor, minFactor);
+            }
+        }
+        //apply minFactor
+        for(int index : groupPair.second) {
+            if (index >= 0 && index < channelCount) {
+                channelBuffers[index].mLimiterParams.linkFactor = minFactor;
+            }
+        }
+    }
+}
+
+size_t DPFrequency::processLastStages(ChannelBuffer &cb) {
+
+    float outputGainFactor = dBtoLinear(cb.outputGainDb);
+    //== Limiter. last Pass
+    if (cb.mLimiterInUse && cb.mLimiterEnabled) {
+        //compute factor, with post-gain
+        float factor = cb.mLimiterParams.linkFactor * dBtoLinear(cb.mLimiterParams.postGainDb);
+        outputGainFactor *= factor;
+    }
+
+    //apply to all if != 1.0
+    if (!compareEquality(outputGainFactor, 1.0f)) {
+        size_t cSize = cb.complexTemp.size();
+        size_t maxBin = std::min(cSize/2, mHalfFFTSize);
+        for (size_t k = 0; k < maxBin; k++) {
+            cb.complexTemp[k] *= outputGainFactor;
         }
     }
 
     //##ifft directly to output.
-    Eigen::Map<Eigen::VectorXf> eOutput(&output[0], output.size());
-    mFftServer.inv(eOutput, mComplexTemp);
-
+    Eigen::Map<Eigen::VectorXf> eOutput(&cb.output[0], cb.output.size());
+    mFftServer.inv(eOutput, cb.complexTemp);
     return mBlockSize;
 }
 
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.h b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
index 9919142..be8771d 100644
--- a/media/libeffects/dynamicsproc/dsp/DPFrequency.h
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
@@ -39,8 +39,11 @@
     FloatVec output;    // time domain temp vector for output
     FloatVec outTail;   // time domain temp vector for output tail (for overlap-add method)
 
+    Eigen::VectorXcf complexTemp; // complex temp vector for frequency domain operations
+
     //Current parameters
     float inputGainDb;
+    float outputGainDb;
     struct BandParams {
         bool enabled;
         float freqCutoffHz;
@@ -64,6 +67,19 @@
         //Historic values
         float previousEnvelope;
     };
+    struct LimiterParams {
+        int32_t linkGroup;
+        float attackTimeMs;
+        float releaseTimeMs;
+        float ratio;
+        float thresholdDb;
+        float postGainDb;
+
+        //Historic values
+        float previousEnvelope;
+        float newFactor;
+        float linkFactor;
+    };
 
     bool mPreEqInUse;
     bool mPreEqEnabled;
@@ -79,6 +95,7 @@
 
     bool mLimiterInUse;
     bool mLimiterEnabled;
+    LimiterParams mLimiterParams;
     FloatVec mPreEqFactorVector; // temp pre-computed vector to shape spectrum at preEQ stage
     FloatVec mPostEqFactorVector; // temp pre-computed vector to shape spectrum at postEQ stage
 
@@ -91,6 +108,18 @@
 
 };
 
+using CBufferVector = std::vector<ChannelBuffer>;
+
+using GroupsMap = std::map<int32_t, IntVec>;
+
+class LinkedLimiters {
+public:
+    void reset();
+    void update(int32_t group, int index);
+    void remove(int index);
+    GroupsMap mGroupsMap;
+};
+
 class DPFrequency : public DPBase {
 public:
     virtual size_t processSamples(const float *in, float *out, size_t samples);
@@ -104,16 +133,25 @@
     size_t processMono(ChannelBuffer &cb);
     size_t processOneVector(FloatVec &output, FloatVec &input, ChannelBuffer &cb);
 
+    size_t processChannelBuffers(CBufferVector &channelBuffers);
+    size_t processFirstStages(ChannelBuffer &cb);
+    size_t processLastStages(ChannelBuffer &cb);
+    void processLinkedLimiters(CBufferVector &channelBuffers);
+
     size_t mBlockSize;
     size_t mHalfFFTSize;
     size_t mOverlapSize;
     size_t mSamplingRate;
 
-    std::vector<ChannelBuffer> mChannelBuffers;
+    float mBlocksPerSecond;
+
+    CBufferVector mChannelBuffers;
+
+    LinkedLimiters mLinkedLimiters;
 
     //dsp
     FloatVec mVWindow;  //window class.
-    Eigen::VectorXcf mComplexTemp;
+    float mWindowRms;
     Eigen::FFT<float> mFftServer;
 };
 
diff --git a/media/libeffects/dynamicsproc/dsp/RDsp.h b/media/libeffects/dynamicsproc/dsp/RDsp.h
index 1048442..cfa1305 100644
--- a/media/libeffects/dynamicsproc/dsp/RDsp.h
+++ b/media/libeffects/dynamicsproc/dsp/RDsp.h
@@ -20,10 +20,25 @@
 #include <complex>
 #include <log/log.h>
 #include <vector>
+#include <map>
 using FloatVec = std::vector<float>;
+using IntVec = std::vector<int>;
 using ComplexVec  = std::vector<std::complex<float>>;
 
 // =======
+// Helper Functions
+// =======
+template <class T>
+static T dBtoLinear(T valueDb) {
+    return pow (10, valueDb / 20.0);
+}
+
+template <class T>
+static T linearToDb(T value) {
+    return 20 * log10(value);
+}
+
+// =======
 // DSP window creation
 // =======
 
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 8dae251..01f014f 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -271,17 +271,43 @@
 
 /////////////////////////////////////////////////////////////////////////
 
+struct HeifDecoderImpl::DecodeThread : public Thread {
+    explicit DecodeThread(HeifDecoderImpl *decoder) : mDecoder(decoder) {}
+
+private:
+    HeifDecoderImpl* mDecoder;
+
+    bool threadLoop();
+
+    DISALLOW_EVIL_CONSTRUCTORS(DecodeThread);
+};
+
+bool HeifDecoderImpl::DecodeThread::threadLoop() {
+    return mDecoder->decodeAsync();
+}
+
+/////////////////////////////////////////////////////////////////////////
+
 HeifDecoderImpl::HeifDecoderImpl() :
     // output color format should always be set via setOutputColor(), in case
     // it's not, default to HAL_PIXEL_FORMAT_RGB_565.
     mOutputColor(HAL_PIXEL_FORMAT_RGB_565),
     mCurScanline(0),
+    mWidth(0),
+    mHeight(0),
     mFrameDecoded(false),
     mHasImage(false),
-    mHasVideo(false) {
+    mHasVideo(false),
+    mAvailableLines(0),
+    mNumSlices(1),
+    mSliceHeight(0),
+    mAsyncDecodeDone(false) {
 }
 
 HeifDecoderImpl::~HeifDecoderImpl() {
+    if (mThread != nullptr) {
+        mThread->join();
+    }
 }
 
 bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
@@ -310,22 +336,23 @@
 
     mHasImage = hasImage && !strcasecmp(hasImage, "yes");
     mHasVideo = hasVideo && !strcasecmp(hasVideo, "yes");
+    sp<IMemory> sharedMem;
     if (mHasImage) {
         // image index < 0 to retrieve primary image
-        mFrameMemory = mRetriever->getImageAtIndex(
+        sharedMem = mRetriever->getImageAtIndex(
                 -1, mOutputColor, true /*metaOnly*/);
     } else if (mHasVideo) {
-        mFrameMemory = mRetriever->getFrameAtTime(0,
+        sharedMem = mRetriever->getFrameAtTime(0,
                 MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
                 mOutputColor, true /*metaOnly*/);
     }
 
-    if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+    if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
         ALOGE("getFrameAtTime: videoFrame is a nullptr");
         return false;
     }
 
-    VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+    VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
 
     ALOGV("Meta dimension %dx%d, display %dx%d, angle %d, iccSize %d",
             videoFrame->mWidth,
@@ -344,6 +371,14 @@
                 videoFrame->mIccSize,
                 videoFrame->getFlattenedIccData());
     }
+    mWidth = videoFrame->mWidth;
+    mHeight = videoFrame->mHeight;
+    if (mHasImage && videoFrame->mTileHeight >= 512 && mWidth >= 3000 && mHeight >= 2000 ) {
+        // Try decoding in slices only if the image has tiles and is big enough.
+        mSliceHeight = videoFrame->mTileHeight;
+        mNumSlices = (videoFrame->mHeight + mSliceHeight - 1) / mSliceHeight;
+        ALOGV("mSliceHeight %u, mNumSlices %zu", mSliceHeight, mNumSlices);
+    }
     return true;
 }
 
@@ -376,6 +411,36 @@
     return false;
 }
 
+bool HeifDecoderImpl::decodeAsync() {
+    for (size_t i = 1; i < mNumSlices; i++) {
+        ALOGV("decodeAsync(): decoding slice %zu", i);
+        size_t top = i * mSliceHeight;
+        size_t bottom = (i + 1) * mSliceHeight;
+        if (bottom > mHeight) {
+            bottom = mHeight;
+        }
+        sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+                -1, mOutputColor, 0, top, mWidth, bottom);
+        {
+            Mutex::Autolock autolock(mLock);
+
+            if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+                mAsyncDecodeDone = true;
+                mScanlineReady.signal();
+                break;
+            }
+            mFrameMemory = frameMemory;
+            mAvailableLines = bottom;
+            ALOGV("decodeAsync(): available lines %zu", mAvailableLines);
+            mScanlineReady.signal();
+        }
+    }
+    // Aggressive clear to avoid holding on to resources
+    mRetriever.clear();
+    mDataSource.clear();
+    return false;
+}
+
 bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
     // reset scanline pointer
     mCurScanline = 0;
@@ -384,6 +449,47 @@
         return true;
     }
 
+    // See if we want to decode in slices to allow client to start
+    // scanline processing in parallel with decode. If this fails
+    // we fallback to decoding the full frame.
+    if (mHasImage && mNumSlices > 1) {
+        // get first slice and metadata
+        sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+                -1, mOutputColor, 0, 0, mWidth, mSliceHeight);
+
+        if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+            ALOGE("decode: metadata is a nullptr");
+            return false;
+        }
+
+        VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->pointer());
+
+        if (frameInfo != nullptr) {
+            frameInfo->set(
+                    videoFrame->mWidth,
+                    videoFrame->mHeight,
+                    videoFrame->mRotationAngle,
+                    videoFrame->mBytesPerPixel,
+                    videoFrame->mIccSize,
+                    videoFrame->getFlattenedIccData());
+        }
+
+        mFrameMemory = frameMemory;
+        mAvailableLines = mSliceHeight;
+        mThread = new DecodeThread(this);
+        if (mThread->run("HeifDecode", ANDROID_PRIORITY_FOREGROUND) == OK) {
+            mFrameDecoded = true;
+            return true;
+        }
+
+        // Fallback to decode without slicing
+        mThread.clear();
+        mNumSlices = 1;
+        mSliceHeight = 0;
+        mAvailableLines = 0;
+        mFrameMemory.clear();
+    }
+
     if (mHasImage) {
         // image index < 0 to retrieve primary image
         mFrameMemory = mRetriever->getImageAtIndex(-1, mOutputColor);
@@ -393,14 +499,14 @@
     }
 
     if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
-        ALOGE("getFrameAtTime: videoFrame is a nullptr");
+        ALOGE("decode: videoFrame is a nullptr");
         return false;
     }
 
     VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
     if (videoFrame->mSize == 0 ||
             mFrameMemory->size() < videoFrame->getFlattenedSize()) {
-        ALOGE("getFrameAtTime: videoFrame size is invalid");
+        ALOGE("decode: videoFrame size is invalid");
         return false;
     }
 
@@ -424,36 +530,45 @@
     }
     mFrameDecoded = true;
 
-    // Aggressive clear to avoid holding on to resources
+    // Aggressively clear to avoid holding on to resources
     mRetriever.clear();
     mDataSource.clear();
     return true;
 }
 
-bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
     if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
         return false;
     }
     VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
-    if (mCurScanline >= videoFrame->mHeight) {
-        ALOGE("no more scanline available");
-        return false;
-    }
     uint8_t* src = videoFrame->getFlattenedData() + videoFrame->mRowBytes * mCurScanline++;
     memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mWidth);
     return true;
 }
 
-size_t HeifDecoderImpl::skipScanlines(size_t count) {
-    if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
-        return 0;
+bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+    if (mCurScanline >= mHeight) {
+        ALOGE("no more scanline available");
+        return false;
     }
-    VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
 
+    if (mNumSlices > 1) {
+        Mutex::Autolock autolock(mLock);
+
+        while (!mAsyncDecodeDone && mCurScanline >= mAvailableLines) {
+            mScanlineReady.wait(mLock);
+        }
+        return (mCurScanline < mAvailableLines) ? getScanlineInner(dst) : false;
+    }
+
+    return getScanlineInner(dst);
+}
+
+size_t HeifDecoderImpl::skipScanlines(size_t count) {
     uint32_t oldScanline = mCurScanline;
     mCurScanline += count;
-    if (mCurScanline > videoFrame->mHeight) {
-        mCurScanline = videoFrame->mHeight;
+    if (mCurScanline > mHeight) {
+        mCurScanline = mHeight;
     }
     return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
 }
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index 406c2c1..528ee3b 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -19,6 +19,8 @@
 
 #include "include/HeifDecoderAPI.h"
 #include <system/graphics.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
 #include <utils/RefBase.h>
 
 namespace android {
@@ -49,14 +51,30 @@
     size_t skipScanlines(size_t count) override;
 
 private:
+    struct DecodeThread;
+
     sp<IDataSource> mDataSource;
     sp<MediaMetadataRetriever> mRetriever;
     sp<IMemory> mFrameMemory;
     android_pixel_format_t mOutputColor;
     size_t mCurScanline;
+    uint32_t mWidth;
+    uint32_t mHeight;
     bool mFrameDecoded;
     bool mHasImage;
     bool mHasVideo;
+
+    // Slice decoding only
+    Mutex mLock;
+    Condition mScanlineReady;
+    sp<DecodeThread> mThread;
+    size_t mAvailableLines;
+    size_t mNumSlices;
+    uint32_t mSliceHeight;
+    bool mAsyncDecodeDone;
+
+    bool decodeAsync();
+    bool getScanlineInner(uint8_t* dst);
 };
 
 } // namespace android
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 9d9ac8c..a22819a 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -32,6 +32,9 @@
         "libaudioclient_headers",
         "libaudio_system_headers",
     ],
+    export_header_lib_headers: [
+        "libmedia_headers",
+    ],
     clang: true,
 }
 
@@ -41,6 +44,7 @@
     vndk: {
         enabled: true,
     },
+    double_loadable: true,
 
     srcs: [
         "aidl/android/IGraphicBufferSource.aidl",
@@ -203,6 +207,7 @@
     ],
 
     shared_libs: [
+        "android.hidl.token@1.0-utils",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index cb0e927..1c95e27 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -36,6 +36,8 @@
 const char * const AudioParameter::keyHwAvSync = AUDIO_PARAMETER_HW_AV_SYNC;
 const char * const AudioParameter::keyPresentationId = AUDIO_PARAMETER_STREAM_PRESENTATION_ID;
 const char * const AudioParameter::keyProgramId = AUDIO_PARAMETER_STREAM_PROGRAM_ID;
+const char * const AudioParameter::keyAudioLanguagePreferred =
+        AUDIO_PARAMETER_KEY_AUDIO_LANGUAGE_PREFERRED;
 const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
 const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
 const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
@@ -47,6 +49,8 @@
 const char * const AudioParameter::valueOn = AUDIO_PARAMETER_VALUE_ON;
 const char * const AudioParameter::valueOff = AUDIO_PARAMETER_VALUE_OFF;
 const char * const AudioParameter::valueListSeparator = AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
+const char * const AudioParameter::keyReconfigA2dp = AUDIO_PARAMETER_RECONFIG_A2DP;
+const char * const AudioParameter::keyReconfigA2dpSupported = AUDIO_PARAMETER_A2DP_RECONFIG_SUPPORTED;
 
 AudioParameter::AudioParameter(const String8& keyValuePairs)
 {
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index f725c97..590ba1a 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -69,6 +69,7 @@
     SET_DATA_SOURCE_CALLBACK,
     GET_FRAME_AT_TIME,
     GET_IMAGE_AT_INDEX,
+    GET_IMAGE_RECT_AT_INDEX,
     GET_FRAME_AT_INDEX,
     EXTRACT_ALBUM_ART,
     EXTRACT_METADATA,
@@ -166,15 +167,16 @@
         return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
-    sp<IMemory> getImageAtIndex(int index, int colorFormat, bool metaOnly)
+    sp<IMemory> getImageAtIndex(int index, int colorFormat, bool metaOnly, bool thumbnail)
     {
-        ALOGV("getImageAtIndex: index %d, colorFormat(%d) metaOnly(%d)",
-                index, colorFormat, metaOnly);
+        ALOGV("getImageAtIndex: index %d, colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+                index, colorFormat, metaOnly, thumbnail);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
         data.writeInt32(index);
         data.writeInt32(colorFormat);
         data.writeInt32(metaOnly);
+        data.writeInt32(thumbnail);
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
         sendSchedPolicy(data);
 #endif
@@ -186,6 +188,30 @@
         return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
+    sp<IMemory> getImageRectAtIndex(
+            int index, int colorFormat, int left, int top, int right, int bottom)
+    {
+        ALOGV("getImageRectAtIndex: index %d, colorFormat(%d) rect {%d, %d, %d, %d}",
+                index, colorFormat, left, top, right, bottom);
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+        data.writeInt32(index);
+        data.writeInt32(colorFormat);
+        data.writeInt32(left);
+        data.writeInt32(top);
+        data.writeInt32(right);
+        data.writeInt32(bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+        sendSchedPolicy(data);
+#endif
+        remote()->transact(GET_IMAGE_RECT_AT_INDEX, data, &reply);
+        status_t ret = reply.readInt32();
+        if (ret != NO_ERROR) {
+            return NULL;
+        }
+        return interface_cast<IMemory>(reply.readStrongBinder());
+    }
+
     status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
             int frameIndex, int numFrames, int colorFormat, bool metaOnly)
     {
@@ -356,12 +382,13 @@
             int index = data.readInt32();
             int colorFormat = data.readInt32();
             bool metaOnly = (data.readInt32() != 0);
-            ALOGV("getImageAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
-                    index, colorFormat, metaOnly);
+            bool thumbnail = (data.readInt32() != 0);
+            ALOGV("getImageAtIndex: index(%d), colorFormat(%d), metaOnly(%d), thumbnail(%d)",
+                    index, colorFormat, metaOnly, thumbnail);
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
             setSchedPolicy(data);
 #endif
-            sp<IMemory> bitmap = getImageAtIndex(index, colorFormat, metaOnly);
+            sp<IMemory> bitmap = getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
             if (bitmap != 0) {  // Don't send NULL across the binder interface
                 reply->writeInt32(NO_ERROR);
                 reply->writeStrongBinder(IInterface::asBinder(bitmap));
@@ -373,6 +400,34 @@
 #endif
             return NO_ERROR;
         } break;
+
+        case GET_IMAGE_RECT_AT_INDEX: {
+            CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+            int index = data.readInt32();
+            int colorFormat = data.readInt32();
+            int left = data.readInt32();
+            int top = data.readInt32();
+            int right = data.readInt32();
+            int bottom = data.readInt32();
+            ALOGV("getImageRectAtIndex: index(%d), colorFormat(%d), rect {%d, %d, %d, %d}",
+                    index, colorFormat, left, top, right, bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+            setSchedPolicy(data);
+#endif
+            sp<IMemory> bitmap = getImageRectAtIndex(
+                    index, colorFormat, left, top, right, bottom);
+            if (bitmap != 0) {  // Don't send NULL across the binder interface
+                reply->writeInt32(NO_ERROR);
+                reply->writeStrongBinder(IInterface::asBinder(bitmap));
+            } else {
+                reply->writeInt32(UNKNOWN_ERROR);
+            }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+            restoreSchedPolicy();
+#endif
+            return NO_ERROR;
+        } break;
+
         case GET_FRAME_AT_INDEX: {
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
             int frameIndex = data.readInt32();
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index 6f56d0c..272bc30 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -31,6 +31,18 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <utils/Errors.h>
 
+// TODO: remove forward declaration when AMediaExtractor_disconnect is offcially added to NDK
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+media_status_t AMediaExtractor_disconnect(AMediaExtractor *);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
 namespace android {
 
 static const size_t kAESBlockSize = 16;  // AES_BLOCK_SIZE
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index a3db754..514c795 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -132,6 +132,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_MMAP_NOIRQ),
     MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_VOIP_TX),
     MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_AV_SYNC),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_DIRECT),
     TERMINATOR
 };
 
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
index 9d026f6..d6a9cfb 100644
--- a/media/libmedia/include/media/BufferProviders.h
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -161,6 +161,17 @@
     const audio_format_t mOutputFormat;
 };
 
+// ClampFloatBufferProvider derives from CopyBufferProvider to clamp floats inside -3db
+class ClampFloatBufferProvider : public CopyBufferProvider {
+public:
+    ClampFloatBufferProvider(int32_t channelCount,
+            size_t bufferFrameCount);
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+    const uint32_t       mChannelCount;
+};
+
 // TimestretchBufferProvider derives from PassthruBufferProvider for time stretching
 class TimestretchBufferProvider : public PassthruBufferProvider {
 public:
diff --git a/media/libmedia/include/media/CodecServiceRegistrant.h b/media/libmedia/include/media/CodecServiceRegistrant.h
new file mode 100644
index 0000000..e0af781
--- /dev/null
+++ b/media/libmedia/include/media/CodecServiceRegistrant.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC_SERVICE_REGISTRANT_H_
+
+#define CODEC_SERVICE_REGISTRANT_H_
+
+typedef void (*RegisterCodecServicesFunc)();
+
+#endif  // CODEC_SERVICE_REGISTRANT_H_
diff --git a/media/libmedia/include/media/CryptoHal.h b/media/libmedia/include/media/CryptoHal.h
index 4414e9d..ff8789d 100644
--- a/media/libmedia/include/media/CryptoHal.h
+++ b/media/libmedia/include/media/CryptoHal.h
@@ -81,7 +81,20 @@
      */
     status_t mInitCheck;
 
-    KeyedVector<int32_t, uint32_t> mHeapBases;
+    struct HeapBase {
+        HeapBase() : mBufferId(0), mSize(0) {}
+        HeapBase(uint32_t bufferId, size_t size) :
+            mBufferId(bufferId), mSize(size) {}
+
+        uint32_t getBufferId() const {return mBufferId;}
+        size_t getSize() const {return mSize;}
+
+    private:
+        uint32_t mBufferId;
+        size_t mSize;
+    };
+
+    KeyedVector<int32_t, HeapBase> mHeapBases;
     uint32_t mNextBufferId;
     int32_t mHeapSeqNum;
 
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index 5491535..c6f422d 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -45,7 +45,9 @@
     virtual sp<IMemory>     getFrameAtTime(
             int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
     virtual sp<IMemory>     getImageAtIndex(
-            int index, int colorFormat, bool metaOnly) = 0;
+            int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+    virtual sp<IMemory>     getImageRectAtIndex(
+            int index, int colorFormat, int left, int top, int right, int bottom) = 0;
     virtual status_t        getFrameAtIndex(
             std::vector<sp<IMemory> > *frames,
             int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 116b548..98d300f 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -43,38 +43,19 @@
 
     virtual status_t    setDataSource(int fd, int64_t offset, int64_t length) = 0;
     virtual status_t    setDataSource(const sp<DataSource>& source, const char *mime) = 0;
-    virtual VideoFrame* getFrameAtTime(
+    virtual sp<IMemory> getFrameAtTime(
             int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
-    virtual VideoFrame* getImageAtIndex(
-            int index, int colorFormat, bool metaOnly) = 0;
+    virtual sp<IMemory> getImageAtIndex(
+            int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+    virtual sp<IMemory> getImageRectAtIndex(
+            int index, int colorFormat, int left, int top, int right, int bottom) = 0;
     virtual status_t getFrameAtIndex(
-            std::vector<VideoFrame*>* frames,
+            std::vector<sp<IMemory> >* frames,
             int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
     virtual MediaAlbumArt* extractAlbumArt() = 0;
     virtual const char* extractMetadata(int keyCode) = 0;
 };
 
-// MediaMetadataRetrieverInterface
-class MediaMetadataRetrieverInterface : public MediaMetadataRetrieverBase
-{
-public:
-    MediaMetadataRetrieverInterface() {}
-
-    virtual             ~MediaMetadataRetrieverInterface() {}
-    virtual VideoFrame* getFrameAtTime(
-            int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
-    { return NULL; }
-    virtual VideoFrame* getImageAtIndex(
-            int /*index*/, int /*colorFormat*/, bool /*metaOnly*/)
-    { return NULL; }
-    virtual status_t getFrameAtIndex(
-            std::vector<VideoFrame*>* /*frames*/,
-            int /*frameIndex*/, int /*numFrames*/, int /*colorFormat*/, bool /*metaOnly*/)
-    { return ERROR_UNSUPPORTED; }
-    virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
-    virtual const char* extractMetadata(int /*keyCode*/) { return NULL; }
-};
-
 }; // namespace android
 
 #endif // ANDROID_MEDIAMETADATARETRIEVERINTERFACE_H
diff --git a/media/libmedia/include/media/MediaProfiles.h b/media/libmedia/include/media/MediaProfiles.h
index 6975581..0feb4f3 100644
--- a/media/libmedia/include/media/MediaProfiles.h
+++ b/media/libmedia/include/media/MediaProfiles.h
@@ -83,6 +83,7 @@
      * successful only when validation is successful.
      */
     static constexpr char const * const xmlFiles[] = {
+            "odm/etc/media_profiles_V1_0.xml",
             "vendor/etc/media_profiles_V1_0.xml",
             "system/etc/media_profiles.xml"
             };
diff --git a/media/libmedia/include/media/MediaResource.h b/media/libmedia/include/media/MediaResource.h
index 1957a45..e1fdb9b 100644
--- a/media/libmedia/include/media/MediaResource.h
+++ b/media/libmedia/include/media/MediaResource.h
@@ -29,7 +29,8 @@
         kUnspecified = 0,
         kSecureCodec,
         kNonSecureCodec,
-        kGraphicMemory
+        kGraphicMemory,
+        kCpuBoost,
     };
 
     enum SubType {
diff --git a/media/libmedia/include/media/PatchBuilder.h b/media/libmedia/include/media/PatchBuilder.h
new file mode 100644
index 0000000..f2722a6
--- /dev/null
+++ b/media/libmedia/include/media/PatchBuilder.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PATCH_BUILDER_H
+#define ANDROID_PATCH_BUILDER_H
+
+#include <functional>
+#include <utility>
+
+#include <system/audio.h>
+#include <utils/StrongPointer.h>
+
+// This is a header-only utility.
+
+namespace android {
+
+class PatchBuilder {
+  public:
+    using mix_usecase_t = decltype(audio_port_config_mix_ext::usecase);
+
+    PatchBuilder() = default;
+
+    // All existing methods operating on audio patches take a pointer to const.
+    // It's OK to construct a temporary PatchBuilder while preparing a parameter
+    // to such a function because the Builder will be kept alive until the code
+    // execution reaches the function call statement semicolon.
+    const struct audio_patch* patch() const { return &mPatch; }
+
+    template<typename T, typename... S>
+    PatchBuilder& addSink(T&& t, S&&... s) {
+        sinks().add(std::forward<T>(t), std::forward<S>(s)...);
+        return *this;
+    }
+    // Explicit type of the second parameter allows clients to provide the struct inline.
+    template<typename T>
+    PatchBuilder& addSink(T&& t, const mix_usecase_t& update) {
+        sinks().add(std::forward<T>(t), update);
+        return *this;
+    }
+    template<typename T, typename... S>
+    PatchBuilder& addSource(T&& t, S&&... s) {
+        sources().add(std::forward<T>(t), std::forward<S>(s)...);
+        return *this;
+    }
+    // Explicit type of the second parameter allows clients to provide the struct inline.
+    template<typename T>
+    PatchBuilder& addSource(T&& t, const mix_usecase_t& update) {
+        sources().add(std::forward<T>(t), update);
+        return *this;
+    }
+
+  private:
+    struct PortCfgs {
+        PortCfgs(unsigned int *countPtr, struct audio_port_config *portCfgs)
+                : mCountPtr(countPtr), mPortCfgs(portCfgs) {}
+        audio_port_config& add(const audio_port_config& portCfg) {
+            return *advance() = portCfg;
+        }
+        template<typename T>
+        audio_port_config& add(const sp<T>& entity) {
+            audio_port_config* added = advance();
+            entity->toAudioPortConfig(added);
+            return *added;
+        }
+        template<typename T>
+        void add(const sp<T>& entity, const mix_usecase_t& usecaseUpdate) {
+            add(entity).ext.mix.usecase = usecaseUpdate;
+        }
+        template<typename T>
+        void add(const sp<T>& entity,
+                std::function<mix_usecase_t(const mix_usecase_t&)> usecaseUpdater) {
+            mix_usecase_t* usecase = &add(entity).ext.mix.usecase;
+            *usecase = usecaseUpdater(*usecase);
+        }
+        struct audio_port_config* advance() {
+            return &mPortCfgs[(*mCountPtr)++];
+        }
+        unsigned int *mCountPtr;
+        struct audio_port_config *mPortCfgs;
+    };
+
+    PortCfgs sinks() { return PortCfgs(&mPatch.num_sinks, mPatch.sinks); }
+    PortCfgs sources() { return PortCfgs(&mPatch.num_sources, mPatch.sources); }
+
+    struct audio_patch mPatch = {};
+};
+
+}  // namespace android
+
+#endif  // ANDROID_PATCH_BUILDER_H
diff --git a/media/libmedia/include/media/SingleStateQueue.h b/media/libmedia/include/media/SingleStateQueue.h
index d423962..c2761cb 100644
--- a/media/libmedia/include/media/SingleStateQueue.h
+++ b/media/libmedia/include/media/SingleStateQueue.h
@@ -99,6 +99,13 @@
             return mShared->mAck - sequence >= 0;
         }
 
+        // returns the last value written (or the contents of the shared buffer after initialization
+        // if no value was written).
+        T last() const
+        {   // assume no sequence check required - we are the writer.
+            return mShared->mValue;
+        }
+
     private:
         int32_t     mSequence;
         Shared * const mShared;
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index b41da80..cdef637 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -90,7 +90,9 @@
     sp<IMemory> getFrameAtTime(int64_t timeUs, int option,
             int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
     sp<IMemory> getImageAtIndex(int index,
-            int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+            int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
+    sp<IMemory> getImageRectAtIndex(
+            int index, int colorFormat, int left, int top, int right, int bottom);
     status_t getFrameAtIndex(
             std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
             int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 6a4204b..e61b04d 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -155,15 +155,28 @@
 }
 
 sp<IMemory> MediaMetadataRetriever::getImageAtIndex(
-        int index, int colorFormat, bool metaOnly) {
-    ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d)",
-            index, colorFormat, metaOnly);
+        int index, int colorFormat, bool metaOnly, bool thumbnail) {
+    ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+            index, colorFormat, metaOnly, thumbnail);
     Mutex::Autolock _l(mLock);
     if (mRetriever == 0) {
         ALOGE("retriever is not initialized");
         return NULL;
     }
-    return mRetriever->getImageAtIndex(index, colorFormat, metaOnly);
+    return mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+}
+
+sp<IMemory> MediaMetadataRetriever::getImageRectAtIndex(
+        int index, int colorFormat, int left, int top, int right, int bottom) {
+    ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+            index, colorFormat, left, top, right, bottom);
+    Mutex::Autolock _l(mLock);
+    if (mRetriever == 0) {
+        ALOGE("retriever is not initialized");
+        return NULL;
+    }
+    return mRetriever->getImageRectAtIndex(
+            index, colorFormat, left, top, right, bottom);
 }
 
 status_t MediaMetadataRetriever::getFrameAtIndex(
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
index 39f8d6e..d197b3f 100644
--- a/media/libmediaextractor/MediaBuffer.cpp
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -39,7 +39,7 @@
       mRangeOffset(0),
       mRangeLength(size),
       mOwnsData(false),
-      mMetaData(new MetaData),
+      mMetaData(new MetaDataBase),
       mOriginal(NULL) {
 }
 
@@ -51,7 +51,7 @@
       mRangeOffset(0),
       mRangeLength(size),
       mOwnsData(true),
-      mMetaData(new MetaData),
+      mMetaData(new MetaDataBase),
       mOriginal(NULL) {
     if (size < kSharedMemThreshold
             || std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
@@ -84,7 +84,7 @@
       mRangeLength(mSize),
       mBuffer(buffer),
       mOwnsData(false),
-      mMetaData(new MetaData),
+      mMetaData(new MetaDataBase),
       mOriginal(NULL) {
 }
 
@@ -96,7 +96,7 @@
         return;
     }
 
-    int prevCount = __sync_fetch_and_sub(&mRefCount, 1);
+    int prevCount = mRefCount.fetch_sub(1);
     if (prevCount == 1) {
         if (mObserver == NULL) {
             delete this;
@@ -110,13 +110,13 @@
 
 void MediaBuffer::claim() {
     CHECK(mObserver != NULL);
-    CHECK_EQ(mRefCount, 1);
+    CHECK_EQ(mRefCount.load(std::memory_order_relaxed), 1);
 
-    mRefCount = 0;
+    mRefCount.store(0, std::memory_order_relaxed);
 }
 
 void MediaBuffer::add_ref() {
-    (void) __sync_fetch_and_add(&mRefCount, 1);
+    (void) mRefCount.fetch_add(1);
 }
 
 void *MediaBuffer::data() const {
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
index f944d51..5a25965 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -86,12 +86,14 @@
     virtual MediaBufferBase *clone();
 
     // sum of localRefcount() and remoteRefcount()
+    // Result should be treated as approximate unless the result precludes concurrent accesses.
     virtual int refcount() const {
         return localRefcount() + remoteRefcount();
     }
 
+    // Result should be treated as approximate unless the result precludes concurrent accesses.
     virtual int localRefcount() const {
-        return mRefCount;
+        return mRefCount.load(std::memory_order_relaxed);
     }
 
     virtual int remoteRefcount() const {
@@ -146,7 +148,7 @@
     void claim();
 
     MediaBufferObserver *mObserver;
-    int mRefCount;
+    std::atomic<int> mRefCount;
 
     void *mData;
     size_t mSize, mRangeOffset, mRangeLength;
diff --git a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
index f1b7806..dfe34e8 100644
--- a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
@@ -70,6 +70,7 @@
     kKeyWantsNALFragments = 'NALf',
     kKeyIsSyncFrame       = 'sync',  // int32_t (bool)
     kKeyIsCodecConfig     = 'conf',  // int32_t (bool)
+    kKeyIsMuxerData       = 'muxd',  // int32_t (bool)
     kKeyTime              = 'time',  // int64_t (usecs)
     kKeyDecodingTime      = 'decT',  // int64_t (decoding timestamp in usecs)
     kKeyNTPTime           = 'ntpT',  // uint64_t (ntp-timestamp)
@@ -181,6 +182,9 @@
     kKeyCASystemID        = 'caid',  // int32_t
     kKeyCASessionID       = 'seid',  // raw data
 
+    kKeyEncryptedByteBlock = 'cblk',  // uint8_t
+    kKeySkipByteBlock     = 'sblk',  // uint8_t
+
     // Please see MediaFormat.KEY_IS_AUTOSELECT.
     kKeyTrackIsAutoselect = 'auto', // bool (int32_t)
     // Please see MediaFormat.KEY_IS_DEFAULT.
@@ -220,6 +224,7 @@
     kKeyFrameCount       = 'nfrm', // int32_t, total number of frame in video track
     kKeyExifOffset       = 'exof', // int64_t, Exif data offset
     kKeyExifSize         = 'exsz', // int64_t, Exif data size
+    kKeyIsExif           = 'exif', // bool (int32_t) buffer contains exif data block
 };
 
 enum {
@@ -229,6 +234,12 @@
     kTypeD263        = 'd263',
 };
 
+enum {
+    kCryptoModeUnencrypted = 0,
+    kCryptoModeAesCtr      = 1,
+    kCryptoModeAesCbc      = 2,
+};
+
 class Parcel;
 
 class MetaDataBase {
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index dc2bec8..135c9b6 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -248,12 +248,17 @@
 }
 
 void MediaAnalyticsItem::Prop::setName(const char *name, size_t len) {
-    mNameLen = len;
+    free((void *)mName);
     mName = (const char *) malloc(len+1);
+    LOG_ALWAYS_FATAL_IF(mName == NULL,
+                        "failed malloc() for property '%s' (len %zu)",
+                        name, len);
     memcpy ((void *)mName, name, len+1);
+    mNameLen = len;
 }
 
-// used only as part of a storing operation
+// consider this "find-or-allocate".
+// caller validates type and uses clearPropValue() accordingly
 MediaAnalyticsItem::Prop *MediaAnalyticsItem::allocateProp(const char *name) {
     size_t len = strlen(name);
     size_t i = findPropIndex(name, len);
@@ -271,7 +276,6 @@
         i = mPropCount++;
         prop = &mProps[i];
         prop->setName(name, len);
-        prop->mType = kTypeNone;        // make caller set type info
     }
 
     return prop;
@@ -299,6 +303,7 @@
 void MediaAnalyticsItem::setInt32(MediaAnalyticsItem::Attr name, int32_t value) {
     Prop *prop = allocateProp(name);
     if (prop != NULL) {
+        clearPropValue(prop);
         prop->mType = kTypeInt32;
         prop->u.int32Value = value;
     }
@@ -307,6 +312,7 @@
 void MediaAnalyticsItem::setInt64(MediaAnalyticsItem::Attr name, int64_t value) {
     Prop *prop = allocateProp(name);
     if (prop != NULL) {
+        clearPropValue(prop);
         prop->mType = kTypeInt64;
         prop->u.int64Value = value;
     }
@@ -315,6 +321,7 @@
 void MediaAnalyticsItem::setDouble(MediaAnalyticsItem::Attr name, double value) {
     Prop *prop = allocateProp(name);
     if (prop != NULL) {
+        clearPropValue(prop);
         prop->mType = kTypeDouble;
         prop->u.doubleValue = value;
     }
@@ -325,6 +332,7 @@
     Prop *prop = allocateProp(name);
     // any old value will be gone
     if (prop != NULL) {
+        clearPropValue(prop);
         prop->mType = kTypeCString;
         prop->u.CStringValue = strdup(value);
     }
@@ -333,6 +341,7 @@
 void MediaAnalyticsItem::setRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
     Prop *prop = allocateProp(name);
     if (prop != NULL) {
+        clearPropValue(prop);
         prop->mType = kTypeRate;
         prop->u.rate.count = count;
         prop->u.rate.duration = duration;
@@ -585,6 +594,9 @@
     // fix any pointers that we blindly copied, so we have our own copies
     if (dst->mName) {
         void *p =  malloc(dst->mNameLen + 1);
+        LOG_ALWAYS_FATAL_IF(p == NULL,
+                            "failed malloc() duping property '%s' (len %zu)",
+                            dst->mName, dst->mNameLen);
         memcpy (p, src->mName, dst->mNameLen + 1);
         dst->mName = (const char *) p;
     }
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 1fa8789..0fb5abc 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -9,6 +9,7 @@
 
     srcs: [
         "JAudioTrack.cpp",
+        "JavaVMHelper.cpp",
         "MediaPlayer2AudioOutput.cpp",
         "mediaplayer2.cpp",
     ],
@@ -49,6 +50,10 @@
         "media_plugin_headers",
     ],
 
+    include_dirs: [
+        "frameworks/base/core/jni",
+    ],
+
     static_libs: [
         "libmedia_helper",
         "libstagefright_nuplayer2",
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index ac0cc57..778ae1b 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -21,7 +21,7 @@
 #include "mediaplayer2/JAudioTrack.h"
 
 #include <android_media_AudioErrors.h>
-#include <android_runtime/AndroidRuntime.h>
+#include <mediaplayer2/JavaVMHelper.h>
 
 namespace android {
 
@@ -39,7 +39,7 @@
         const audio_attributes_t* pAttributes,        // AudioAttributes
         float maxRequiredSpeed) {                     // bufferSizeInBytes
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
     mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
 
@@ -116,19 +116,19 @@
 }
 
 JAudioTrack::~JAudioTrack() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     env->DeleteGlobalRef(mAudioTrackCls);
 }
 
 size_t JAudioTrack::frameCount() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetBufferSizeInFrames = env->GetMethodID(
             mAudioTrackCls, "getBufferSizeInFrames", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
 }
 
 size_t JAudioTrack::channelCount() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
 }
@@ -143,7 +143,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
             mAudioTrackCls, "getPlaybackHeadPosition", "()I");
     *position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
@@ -152,7 +152,7 @@
 }
 
 bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
     jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
@@ -189,7 +189,7 @@
 status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
     // TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
     // Should we do the same thing?
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
     jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
@@ -224,7 +224,7 @@
 }
 
 const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jmethodID jGetPlaybackParams = env->GetMethodID(
             mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
@@ -266,7 +266,7 @@
         return media::VolumeShaper::Status(BAD_VALUE);
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
             "(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
@@ -282,7 +282,7 @@
 }
 
 status_t JAudioTrack::setAuxEffectSendLevel(float level) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
             mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
@@ -290,14 +290,14 @@
 }
 
 status_t JAudioTrack::attachAuxEffect(int effectId) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
     int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
     return javaToNativeStatus(result);
 }
 
 status_t JAudioTrack::setVolume(float left, float right) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     // TODO: Java setStereoVolume is deprecated. Do we really need this method?
     jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
@@ -305,14 +305,14 @@
 }
 
 status_t JAudioTrack::setVolume(float volume) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
     return javaToNativeStatus(result);
 }
 
 status_t JAudioTrack::start() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
     // TODO: Should we catch the Java IllegalStateException from play()?
     env->CallVoidMethod(mAudioTrackObj, jPlay);
@@ -324,7 +324,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jbyteArray jAudioData = env->NewByteArray(size);
     env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
 
@@ -353,7 +353,7 @@
 }
 
 void JAudioTrack::stop() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
     env->CallVoidMethod(mAudioTrackObj, jStop);
     // TODO: Should we catch IllegalStateException?
@@ -365,20 +365,20 @@
 }
 
 void JAudioTrack::flush() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
     env->CallVoidMethod(mAudioTrackObj, jFlush);
 }
 
 void JAudioTrack::pause() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
     env->CallVoidMethod(mAudioTrackObj, jPause);
     // TODO: Should we catch IllegalStateException?
 }
 
 bool JAudioTrack::isPlaying() const {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
     int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
 
@@ -393,7 +393,7 @@
 }
 
 uint32_t JAudioTrack::getSampleRate() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
 }
@@ -403,7 +403,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetBufferSizeInFrames = env->GetMethodID(
             mAudioTrackCls, "getBufferSizeInFrames", "()I");
     int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
@@ -417,7 +417,7 @@
 }
 
 audio_format_t JAudioTrack::format() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
     int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
     return audioFormatToNative(javaFormat);
@@ -454,7 +454,7 @@
 }
 
 audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
             "()Landroid/media/AudioDeviceInfo;");
     jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
@@ -469,14 +469,14 @@
 }
 
 audio_session_t JAudioTrack::getAudioSessionId() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
     jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
     return (audio_session_t) sessionId;
 }
 
 status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
     jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
             jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
@@ -550,7 +550,7 @@
         return NULL;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     // Referenced "android_media_VolumeShaper.h".
     jfloatArray xarray = nullptr;
@@ -595,7 +595,7 @@
 jobject JAudioTrack::createVolumeShaperOperationObj(
         const sp<media::VolumeShaper::Operation>& operation) {
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
     jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
@@ -647,7 +647,7 @@
 }
 
 jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2Impl$StreamEventCallback");
     jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
     jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
@@ -655,7 +655,7 @@
 }
 
 jobject JAudioTrack::createCallbackExecutor() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
     jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
             "newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
diff --git a/media/libmediaplayer2/JavaVMHelper.cpp b/media/libmediaplayer2/JavaVMHelper.cpp
new file mode 100644
index 0000000..90aaa7f
--- /dev/null
+++ b/media/libmediaplayer2/JavaVMHelper.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JavaVMHelper"
+
+#include "mediaplayer2/JavaVMHelper.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <stdlib.h>
+
+namespace android {
+
+// static
+std::atomic<JavaVM *> JavaVMHelper::sJavaVM(NULL);
+
+// static
+JNIEnv *JavaVMHelper::getJNIEnv() {
+    JNIEnv *env;
+    JavaVM *vm = sJavaVM.load();
+    CHECK(vm != NULL);
+
+    if (vm->GetEnv((void **)&env, JNI_VERSION_1_4) != JNI_OK) {
+        return NULL;
+    }
+
+    return env;
+}
+
+// static
+void JavaVMHelper::setJavaVM(JavaVM *vm) {
+    sJavaVM.store(vm);
+}
+
+}  // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
new file mode 100644
index 0000000..35091b7
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JAVA_VM_HELPER_H_
+
+#define JAVA_VM_HELPER_H_
+
+#include "jni.h"
+
+#include <atomic>
+
+namespace android {
+
+struct JavaVMHelper {
+    static JNIEnv *getJNIEnv();
+    static void setJavaVM(JavaVM *vm);
+
+private:
+    // Once a valid JavaVM has been set, it should never be reset or changed.
+    // However, as it may be accessed from multiple threads, access needs to be
+    // synchronized.
+    static std::atomic<JavaVM *> sJavaVM;
+};
+
+}  // namespace android
+
+#endif  // JAVA_VM_HELPER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
index 3905b55..2fb5a2c 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
@@ -150,11 +150,11 @@
 
 // Do not change these values without updating their counterparts in MediaPlayer2.java
 enum mediaplayer2_states {
-    MEDIAPLAYER2_STATE_IDLE         = 1,
-    MEDIAPLAYER2_STATE_PREPARED     = 2,
-    MEDIAPLAYER2_STATE_PLAYING      = 3,
-    MEDIAPLAYER2_STATE_PAUSED       = 4,
-    MEDIAPLAYER2_STATE_ERROR        = 5,
+    MEDIAPLAYER2_STATE_IDLE         = 1001,
+    MEDIAPLAYER2_STATE_PREPARED     = 1002,
+    MEDIAPLAYER2_STATE_PLAYING      = 1003,
+    MEDIAPLAYER2_STATE_PAUSED       = 1004,
+    MEDIAPLAYER2_STATE_ERROR        = 1005,
 };
 
 enum media_player2_internal_states {
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index d586192..3af212e 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -68,7 +68,7 @@
             status_t        stop();
             status_t        pause();
             bool            isPlaying();
-            mediaplayer2_states getMediaPlayer2State();
+            mediaplayer2_states getState();
             status_t        setPlaybackSettings(const AudioPlaybackRate& rate);
             status_t        getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
             status_t        setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index e5567dc..4fb47b8 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -860,7 +860,7 @@
     return false;
 }
 
-mediaplayer2_states MediaPlayer2::getMediaPlayer2State() {
+mediaplayer2_states MediaPlayer2::getState() {
     Mutex::Autolock _l(mLock);
     if (mCurrentState & MEDIA_PLAYER2_STATE_ERROR) {
         return MEDIAPLAYER2_STATE_ERROR;
diff --git a/media/libmediaplayer2/nuplayer2/Android.bp b/media/libmediaplayer2/nuplayer2/Android.bp
index c40b361..1634f35 100644
--- a/media/libmediaplayer2/nuplayer2/Android.bp
+++ b/media/libmediaplayer2/nuplayer2/Android.bp
@@ -58,8 +58,6 @@
 
     name: "libstagefright_nuplayer2",
 
-    tags: ["eng"],
-
     sanitize: {
         cfi: true,
         diag: {
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 16ed530..40b17bf 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -48,7 +48,6 @@
 {
     ALOGV("MetadataRetrieverClient constructor pid(%d)", pid);
     mPid = pid;
-    mThumbnail = NULL;
     mAlbumArt = NULL;
     mRetriever = NULL;
 }
@@ -77,7 +76,6 @@
     ALOGV("disconnect from pid %d", mPid);
     Mutex::Autolock lock(mLock);
     mRetriever.clear();
-    mThumbnail.clear();
     mAlbumArt.clear();
     IPCThreadState::self()->flushCommands();
 }
@@ -194,25 +192,6 @@
 
 Mutex MetadataRetrieverClient::sLock;
 
-static sp<IMemory> getThumbnail(VideoFrame* frame) {
-    std::unique_ptr<VideoFrame> frameDeleter(frame);
-
-    size_t size = frame->getFlattenedSize();
-    sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
-    if (heap == NULL) {
-        ALOGE("failed to create MemoryDealer");
-        return NULL;
-    }
-    sp<IMemory> thrumbnail = new MemoryBase(heap, 0, size);
-    if (thrumbnail == NULL) {
-        ALOGE("not enough memory for VideoFrame size=%zu", size);
-        return NULL;
-    }
-    VideoFrame *frameCopy = static_cast<VideoFrame *>(thrumbnail->pointer());
-    frameCopy->copyFlattened(*frame);
-    return thrumbnail;
-}
-
 sp<IMemory> MetadataRetrieverClient::getFrameAtTime(
         int64_t timeUs, int option, int colorFormat, bool metaOnly)
 {
@@ -220,36 +199,53 @@
             (long long)timeUs, option, colorFormat, metaOnly);
     Mutex::Autolock lock(mLock);
     Mutex::Autolock glock(sLock);
-    mThumbnail.clear();
     if (mRetriever == NULL) {
         ALOGE("retriever is not initialized");
         return NULL;
     }
-    VideoFrame *frame = mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
+    sp<IMemory> frame = mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
     if (frame == NULL) {
         ALOGE("failed to capture a video frame");
         return NULL;
     }
-    return getThumbnail(frame);
+    return frame;
 }
 
 sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
-        int index, int colorFormat, bool metaOnly) {
-    ALOGV("getFrameAtTime: index(%d) colorFormat(%d), metaOnly(%d)",
-            index, colorFormat, metaOnly);
+        int index, int colorFormat, bool metaOnly, bool thumbnail) {
+    ALOGV("getImageAtIndex: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
+            index, colorFormat, metaOnly, thumbnail);
     Mutex::Autolock lock(mLock);
     Mutex::Autolock glock(sLock);
-    mThumbnail.clear();
     if (mRetriever == NULL) {
         ALOGE("retriever is not initialized");
         return NULL;
     }
-    VideoFrame *frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly);
+    sp<IMemory> frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
     if (frame == NULL) {
         ALOGE("failed to extract image");
         return NULL;
     }
-    return getThumbnail(frame);
+    return frame;
+}
+
+sp<IMemory> MetadataRetrieverClient::getImageRectAtIndex(
+        int index, int colorFormat, int left, int top, int right, int bottom) {
+    ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d), rect {%d, %d, %d, %d}",
+            index, colorFormat, left, top, right, bottom);
+    Mutex::Autolock lock(mLock);
+    Mutex::Autolock glock(sLock);
+    if (mRetriever == NULL) {
+        ALOGE("retriever is not initialized");
+        return NULL;
+    }
+    sp<IMemory> frame = mRetriever->getImageRectAtIndex(
+            index, colorFormat, left, top, right, bottom);
+    if (frame == NULL) {
+        ALOGE("failed to extract image");
+        return NULL;
+    }
+    return frame;
 }
 
 status_t MetadataRetrieverClient::getFrameAtIndex(
@@ -264,15 +260,12 @@
         return INVALID_OPERATION;
     }
 
-    std::vector<VideoFrame*> videoFrames;
     status_t err = mRetriever->getFrameAtIndex(
-            &videoFrames, frameIndex, numFrames, colorFormat, metaOnly);
+            frames, frameIndex, numFrames, colorFormat, metaOnly);
     if (err != OK) {
+        frames->clear();
         return err;
     }
-    for (size_t i = 0; i < videoFrames.size(); i++) {
-        frames->push_back(getThumbnail(videoFrames[i]));
-    }
     return OK;
 }
 
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index f71891a..272d093 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -53,7 +53,9 @@
     virtual sp<IMemory>             getFrameAtTime(
             int64_t timeUs, int option, int colorFormat, bool metaOnly);
     virtual sp<IMemory>             getImageAtIndex(
-            int index, int colorFormat, bool metaOnly);
+            int index, int colorFormat, bool metaOnly, bool thumbnail);
+    virtual sp<IMemory>             getImageRectAtIndex(
+            int index, int colorFormat, int left, int top, int right, int bottom);
     virtual status_t getFrameAtIndex(
                 std::vector<sp<IMemory> > *frames,
                 int frameIndex, int numFrames, int colorFormat, bool metaOnly);
@@ -73,9 +75,8 @@
     sp<MediaMetadataRetrieverBase>         mRetriever;
     pid_t                                  mPid;
 
-    // Keep the shared memory copy of album art and capture frame (for thumbnail)
+    // Keep the shared memory copy of album art
     sp<IMemory>                            mAlbumArt;
-    sp<IMemory>                            mThumbnail;
 };
 
 }; // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/Android.bp b/media/libmediaplayerservice/nuplayer/Android.bp
index 645bb7a..a4da564 100644
--- a/media/libmediaplayerservice/nuplayer/Android.bp
+++ b/media/libmediaplayerservice/nuplayer/Android.bp
@@ -54,8 +54,6 @@
 
     name: "libstagefright_nuplayer",
 
-    tags: ["eng"],
-
     sanitize: {
         cfi: true,
         diag: {
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index cbc3015..23d66bb 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -934,7 +934,11 @@
     sp<MetaData> meta = mSources.itemAt(trackIndex)->getFormat();
     if (meta == NULL) {
         ALOGE("no metadata for track %zu", trackIndex);
-        return NULL;
+        format->setInt32("type", MEDIA_TRACK_TYPE_UNKNOWN);
+        format->setString("mime", "application/octet-stream");
+        format->setString("language", "und");
+
+        return format;
     }
 
     const char *mime;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 0a1bdfe..a5f5fc6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1447,6 +1447,29 @@
             break;
         }
 
+        case kWhatGetStats:
+        {
+            ALOGV("kWhatGetStats");
+
+            Vector<sp<AMessage>> *trackStats;
+            CHECK(msg->findPointer("trackstats", (void**)&trackStats));
+
+            trackStats->clear();
+            if (mVideoDecoder != NULL) {
+                trackStats->push_back(mVideoDecoder->getStats());
+            }
+            if (mAudioDecoder != NULL) {
+                trackStats->push_back(mAudioDecoder->getStats());
+            }
+
+            // respond for synchronization
+            sp<AMessage> response = new AMessage;
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
+            break;
+        }
+
         default:
             TRESPASS();
             break;
@@ -2210,16 +2233,16 @@
     return renderer->getCurrentPosition(mediaUs);
 }
 
-void NuPlayer::getStats(Vector<sp<AMessage> > *mTrackStats) {
-    CHECK(mTrackStats != NULL);
+void NuPlayer::getStats(Vector<sp<AMessage> > *trackStats) {
+    CHECK(trackStats != NULL);
 
-    mTrackStats->clear();
-    if (mVideoDecoder != NULL) {
-        mTrackStats->push_back(mVideoDecoder->getStats());
-    }
-    if (mAudioDecoder != NULL) {
-        mTrackStats->push_back(mAudioDecoder->getStats());
-    }
+    ALOGV("NuPlayer::getStats()");
+    sp<AMessage> msg = new AMessage(kWhatGetStats, this);
+    msg->setPointer("trackstats", trackStats);
+
+    sp<AMessage> response;
+    (void) msg->postAndAwaitResponse(&response);
+    // response is for synchronization, ignore contents
 }
 
 sp<MetaData> NuPlayer::getFileMeta() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 3a7ef4e..e400d16 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -88,7 +88,7 @@
     status_t getSelectedTrack(int32_t type, Parcel* reply) const;
     status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
     status_t getCurrentPosition(int64_t *mediaUs);
-    void getStats(Vector<sp<AMessage> > *mTrackStats);
+    void getStats(Vector<sp<AMessage> > *trackStats);
 
     sp<MetaData> getFileMeta();
     float getFrameRate();
@@ -159,6 +159,7 @@
         kWhatPrepareDrm                 = 'pDrm',
         kWhatReleaseDrm                 = 'rDrm',
         kWhatMediaClockNotify           = 'mckN',
+        kWhatGetStats                   = 'gSts',
     };
 
     wp<NuPlayerDriver> mDriver;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 0402fca..fb12360 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -155,7 +155,9 @@
             break;
         default:
             ALOGE("Unknown track type: %d", track.mTrackType);
-            return NULL;
+            format->setInt32("type", MEDIA_TRACK_TYPE_UNKNOWN);
+            format->setString("mime", "application/octet-stream");
+            return format;
     }
 
     // For CEA-608 CC1, field 0 channel 0
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 2a08f62..69cd82e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -745,6 +745,7 @@
     sp<AMessage> reply = new AMessage(kWhatRenderBuffer, this);
     reply->setSize("buffer-ix", index);
     reply->setInt32("generation", mBufferGeneration);
+    reply->setSize("size", size);
 
     if (eos) {
         ALOGI("[%s] saw output EOS", mIsAudio ? "audio" : "video");
@@ -1127,6 +1128,7 @@
     int32_t render;
     size_t bufferIx;
     int32_t eos;
+    size_t size;
     CHECK(msg->findSize("buffer-ix", &bufferIx));
 
     if (!mIsAudio) {
@@ -1146,7 +1148,10 @@
         CHECK(msg->findInt64("timestampNs", &timestampNs));
         err = mCodec->renderOutputBufferAndRelease(bufferIx, timestampNs);
     } else {
-        mNumOutputFramesDropped += !mIsAudio;
+        if (!msg->findInt32("eos", &eos) || !eos ||
+                !msg->findSize("size", &size) || size) {
+            mNumOutputFramesDropped += !mIsAudio;
+        }
         err = mCodec->releaseOutputBuffer(bufferIx);
     }
     if (err != OK) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 63c887b..3e5bdd6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -669,6 +669,11 @@
         notifyListener_l(MEDIA_STOPPED);
     }
 
+    if (property_get_bool("persist.debug.sf.stats", false)) {
+        Vector<String16> args;
+        dump(-1, args);
+    }
+
     mState = STATE_RESET_IN_PROGRESS;
     mPlayer->resetAsync();
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index a762e76..57a0198 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -358,8 +358,12 @@
 
         // AudioSink has rendered some frames.
         int64_t nowUs = ALooper::GetNowUs();
-        int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
-                + mAudioFirstAnchorTimeMediaUs;
+        int64_t playedOutDurationUs = mAudioSink->getPlayedOutDurationUs(nowUs);
+        if (playedOutDurationUs == 0) {
+            *mediaUs = mAudioFirstAnchorTimeMediaUs;
+            return OK;
+        }
+        int64_t nowMediaUs = playedOutDurationUs + mAudioFirstAnchorTimeMediaUs;
         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
     }
 
@@ -1703,6 +1707,8 @@
     ++mAudioDrainGeneration;
     if (mAudioRenderingStartGeneration != -1) {
         prepareForMediaRenderingStart_l();
+        // PauseTimeout is applied to offload mode only. Cancel pending timer.
+        cancelAudioOffloadPauseTimeout();
     }
 }
 
@@ -1805,6 +1811,12 @@
     if (mAudioTornDown) {
         return;
     }
+
+    // TimeoutWhenPaused is only for offload mode.
+    if (reason == kDueToTimeout && !offloadingAudio()) {
+        return;
+    }
+
     mAudioTornDown = true;
 
     int64_t currentPositionUs;
diff --git a/media/libmediaplayerservice/tests/Android.bp b/media/libmediaplayerservice/tests/Android.bp
index e936bdd..e86b68a 100644
--- a/media/libmediaplayerservice/tests/Android.bp
+++ b/media/libmediaplayerservice/tests/Android.bp
@@ -2,8 +2,6 @@
 
     name: "DrmSessionManager_test",
 
-    tags: ["tests"],
-
     srcs: ["DrmSessionManager_test.cpp"],
 
     shared_libs: [
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index 2486b76..35a43d8 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -26,7 +26,7 @@
 
 PipeReader::PipeReader(Pipe& pipe) :
         NBAIO_Source(pipe.mFormat),
-        mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, true /*flush*/),
+        mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, false /*flush*/),
         mFramesOverrun(0),
         mOverruns(0)
 {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3bbba49..7f39d10 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -567,7 +567,7 @@
       mMetadataBuffersToSubmit(0),
       mNumUndequeuedBuffers(0),
       mRepeatFrameDelayUs(-1ll),
-      mMaxPtsGapUs(-1ll),
+      mMaxPtsGapUs(0ll),
       mMaxFps(-1),
       mFps(-1.0),
       mCaptureFps(-1.0),
@@ -1823,16 +1823,21 @@
 
         // only allow 32-bit value, since we pass it as U32 to OMX.
         if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
-            mMaxPtsGapUs = -1ll;
-        } else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < 0) {
+            mMaxPtsGapUs = 0ll;
+        } else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < INT32_MIN) {
             ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
-            mMaxPtsGapUs = -1ll;
+            mMaxPtsGapUs = 0ll;
         }
 
         if (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) {
             mMaxFps = -1;
         }
 
+        // notify GraphicBufferSource to allow backward frames
+        if (mMaxPtsGapUs < 0ll) {
+            mMaxFps = -1;
+        }
+
         if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
             mCaptureFps = -1.0;
         }
@@ -2144,6 +2149,10 @@
                 // value is unknown
                 drc.targetRefLevel = -1;
             }
+            if (!msg->findInt32("aac-drc-effect-type", &drc.effectType)) {
+                // value is unknown
+                drc.effectType = -2; // valid values are -1 and over
+            }
 
             err = setupAACCodec(
                     encoder, numChannels, sampleRate, bitrate, aacProfile,
@@ -2778,7 +2787,7 @@
             ? OMX_AUDIO_AACStreamFormatMP4ADTS
             : OMX_AUDIO_AACStreamFormatMP4FF;
 
-    OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation;
+    OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE presentation;
     InitOMXParams(&presentation);
     presentation.nMaxOutputChannels = maxOutputChannelCount;
     presentation.nDrcCut = drc.drcCut;
@@ -2787,14 +2796,29 @@
     presentation.nTargetReferenceLevel = drc.targetRefLevel;
     presentation.nEncodedTargetLevel = drc.encodedTargetLevel;
     presentation.nPCMLimiterEnable = pcmLimiterEnable;
+    presentation.nDrcEffectType = drc.effectType;
 
     status_t res = mOMXNode->setParameter(
             OMX_IndexParamAudioAac, &profile, sizeof(profile));
     if (res == OK) {
         // optional parameters, will not cause configuration failure
-        mOMXNode->setParameter(
+        if (mOMXNode->setParameter(
+                (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
+                &presentation, sizeof(presentation)) == ERROR_UNSUPPORTED) {
+            // prior to 9.0 we used a different config structure and index
+            OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation8;
+            InitOMXParams(&presentation8);
+            presentation8.nMaxOutputChannels = presentation.nMaxOutputChannels;
+            presentation8.nDrcCut = presentation.nDrcCut;
+            presentation8.nDrcBoost = presentation.nDrcBoost;
+            presentation8.nHeavyCompression = presentation.nHeavyCompression;
+            presentation8.nTargetReferenceLevel = presentation.nTargetReferenceLevel;
+            presentation8.nEncodedTargetLevel = presentation.nEncodedTargetLevel;
+            presentation8.nPCMLimiterEnable = presentation.nPCMLimiterEnable;
+            (void)mOMXNode->setParameter(
                 (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
-                &presentation, sizeof(presentation));
+                &presentation8, sizeof(presentation8));
+        }
     } else {
         ALOGW("did not set AudioAndroidAacPresentation due to error %d when setting AudioAac", res);
     }
@@ -5303,13 +5327,13 @@
     convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
 
     // if some aspects are unspecified, use dataspace fields
-    if (range != 0) {
+    if (range == 0) {
         range = (dataSpace & HAL_DATASPACE_RANGE_MASK) >> HAL_DATASPACE_RANGE_SHIFT;
     }
-    if (standard != 0) {
+    if (standard == 0) {
         standard = (dataSpace & HAL_DATASPACE_STANDARD_MASK) >> HAL_DATASPACE_STANDARD_SHIFT;
     }
-    if (transfer != 0) {
+    if (transfer == 0) {
         transfer = (dataSpace & HAL_DATASPACE_TRANSFER_MASK) >> HAL_DATASPACE_TRANSFER_SHIFT;
     }
 
@@ -6667,11 +6691,11 @@
         }
     }
 
-    if (mCodec->mMaxPtsGapUs > 0ll) {
+    if (mCodec->mMaxPtsGapUs != 0ll) {
         OMX_PARAM_U32TYPE maxPtsGapParams;
         InitOMXParams(&maxPtsGapParams);
         maxPtsGapParams.nPortIndex = kPortIndexInput;
-        maxPtsGapParams.nU32 = (uint32_t) mCodec->mMaxPtsGapUs;
+        maxPtsGapParams.nU32 = (uint32_t)mCodec->mMaxPtsGapUs;
 
         err = mCodec->mOMXNode->setParameter(
                 (OMX_INDEXTYPE)OMX_IndexParamMaxFrameDurationForBitrateControl,
@@ -6684,7 +6708,7 @@
         }
     }
 
-    if (mCodec->mMaxFps > 0) {
+    if (mCodec->mMaxFps > 0 || mCodec->mMaxPtsGapUs < 0) {
         err = statusFromBinderStatus(
                 mCodec->mGraphicBufferSource->setMaxFps(mCodec->mMaxFps));
 
@@ -6777,9 +6801,14 @@
 
     sp<RefBase> obj;
     CHECK(msg->findObject("input-surface", &obj));
+    if (obj == NULL) {
+        ALOGE("[%s] NULL input surface", mCodec->mComponentName.c_str());
+        mCodec->mCallback->onInputSurfaceDeclined(BAD_VALUE);
+        return;
+    }
+
     sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
     mCodec->mGraphicBufferSource = surface->getBufferSource();
-
     status_t err = setupInputSurface();
 
     if (err == OK) {
@@ -7610,8 +7639,10 @@
                 config->param[paramIndex].bSet =
                     (OMX_BOOL)params->findString(existingKey->second.c_str(), &value);
                 if (config->param[paramIndex].bSet) {
-                    strncpy((char *)config->param[paramIndex].cString, value.c_str(),
-                            sizeof(OMX_CONFIG_ANDROID_VENDOR_PARAMTYPE::cString));
+                    size_t dstSize = sizeof(config->param[paramIndex].cString);
+                    strncpy((char *)config->param[paramIndex].cString, value.c_str(), dstSize - 1);
+                    // null terminate value
+                    config->param[paramIndex].cString[dstSize - 1] = '\0';
                 }
                 break;
             }
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 22b1e59..48e351b 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -162,7 +162,6 @@
         "libmedia_helper",
         "libstagefright_codecbase",
         "libstagefright_foundation",
-        "libstagefright_omx",
         "libstagefright_omx_utils",
         "libstagefright_xmlparser",
         "libRScpp",
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 3d0aad1..3370df1 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -17,12 +17,11 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "FrameDecoder"
 
-#include <inttypes.h>
-
-#include <utils/Log.h>
-#include <gui/Surface.h>
-
 #include "include/FrameDecoder.h"
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <gui/Surface.h>
+#include <inttypes.h>
 #include <media/ICrypto.h>
 #include <media/IMediaSource.h>
 #include <media/MediaCodecBuffer.h>
@@ -36,36 +35,37 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
 #include <private/media/VideoFrame.h>
+#include <utils/Log.h>
 
 namespace android {
 
-static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
-static const size_t kRetryCount = 20; // must be >0
+static const int64_t kBufferTimeOutUs = 10000ll; // 10 msec
+static const size_t kRetryCount = 50; // must be >0
 
-VideoFrame *FrameDecoder::allocVideoFrame(
-        int32_t width, int32_t height, bool metaOnly) {
+sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
+        int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+        int32_t dstBpp, bool metaOnly = false) {
     int32_t rotationAngle;
-    if (!mTrackMeta->findInt32(kKeyRotation, &rotationAngle)) {
+    if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
         rotationAngle = 0;  // By default, no rotation
     }
-
     uint32_t type;
     const void *iccData;
     size_t iccSize;
-    if (!mTrackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
+    if (!trackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
         iccData = NULL;
         iccSize = 0;
     }
 
     int32_t sarWidth, sarHeight;
     int32_t displayWidth, displayHeight;
-    if (mTrackMeta->findInt32(kKeySARWidth, &sarWidth)
-            && mTrackMeta->findInt32(kKeySARHeight, &sarHeight)
+    if (trackMeta->findInt32(kKeySARWidth, &sarWidth)
+            && trackMeta->findInt32(kKeySARHeight, &sarHeight)
             && sarHeight != 0) {
         displayWidth = (width * sarWidth) / sarHeight;
         displayHeight = height;
-    } else if (mTrackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
-                && mTrackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
+    } else if (trackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
+                && trackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
                 && displayWidth > 0 && displayHeight > 0
                 && width > 0 && height > 0) {
         ALOGV("found display size %dx%d", displayWidth, displayHeight);
@@ -74,28 +74,67 @@
         displayHeight = height;
     }
 
-    return new VideoFrame(width, height, displayWidth, displayHeight,
-            rotationAngle, mDstBpp, !metaOnly, iccData, iccSize);
+    VideoFrame frame(width, height, displayWidth, displayHeight,
+            tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
+
+    size_t size = frame.getFlattenedSize();
+    sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
+    if (heap == NULL) {
+        ALOGE("failed to create MemoryDealer");
+        return NULL;
+    }
+    sp<IMemory> frameMem = new MemoryBase(heap, 0, size);
+    if (frameMem == NULL) {
+        ALOGE("not enough memory for VideoFrame size=%zu", size);
+        return NULL;
+    }
+    VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->pointer());
+    frameCopy->init(frame, iccData, iccSize);
+
+    return frameMem;
 }
 
-bool FrameDecoder::setDstColorFormat(android_pixel_format_t colorFormat) {
+bool findThumbnailInfo(
+        const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
+        uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
+    uint32_t dummyType;
+    const void *dummyData;
+    size_t dummySize;
+    return trackMeta->findInt32(kKeyThumbnailWidth, width)
+        && trackMeta->findInt32(kKeyThumbnailHeight, height)
+        && trackMeta->findData(kKeyThumbnailHVCC,
+                type ?: &dummyType, data ?: &dummyData, size ?: &dummySize);
+}
+
+bool findGridInfo(const sp<MetaData> &trackMeta,
+        int32_t *tileWidth, int32_t *tileHeight, int32_t *gridRows, int32_t *gridCols) {
+    return trackMeta->findInt32(kKeyTileWidth, tileWidth) && (*tileWidth > 0)
+        && trackMeta->findInt32(kKeyTileHeight, tileHeight) && (*tileHeight > 0)
+        && trackMeta->findInt32(kKeyGridRows, gridRows) && (*gridRows > 0)
+        && trackMeta->findInt32(kKeyGridCols, gridCols) && (*gridCols > 0);
+}
+
+bool getDstColorFormat(
+        android_pixel_format_t colorFormat,
+        OMX_COLOR_FORMATTYPE *dstFormat,
+        int32_t *dstBpp) {
     switch (colorFormat) {
         case HAL_PIXEL_FORMAT_RGB_565:
         {
-            mDstFormat = OMX_COLOR_Format16bitRGB565;
-            mDstBpp = 2;
+            *dstFormat = OMX_COLOR_Format16bitRGB565;
+            *dstBpp = 2;
             return true;
         }
         case HAL_PIXEL_FORMAT_RGBA_8888:
         {
-            mDstFormat = OMX_COLOR_Format32BitRGBA8888;
-            mDstBpp = 4;
+            *dstFormat = OMX_COLOR_Format32BitRGBA8888;
+            *dstBpp = 4;
             return true;
         }
         case HAL_PIXEL_FORMAT_BGRA_8888:
         {
-            mDstFormat = OMX_COLOR_Format32bitBGRA8888;
-            mDstBpp = 4;
+            *dstFormat = OMX_COLOR_Format32bitBGRA8888;
+            *dstBpp = 4;
             return true;
         }
         default:
@@ -107,51 +146,63 @@
     return false;
 }
 
-VideoFrame* FrameDecoder::extractFrame(
-        int64_t frameTimeUs, int option, int colorFormat, bool metaOnly) {
-    if (!setDstColorFormat((android_pixel_format_t)colorFormat)) {
+//static
+sp<IMemory> FrameDecoder::getMetadataOnly(
+        const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
+    OMX_COLOR_FORMATTYPE dstFormat;
+    int32_t dstBpp;
+    if (!getDstColorFormat(
+            (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
         return NULL;
     }
 
-    if (metaOnly) {
-        int32_t width, height;
-        CHECK(trackMeta()->findInt32(kKeyWidth, &width));
-        CHECK(trackMeta()->findInt32(kKeyHeight, &height));
-        return allocVideoFrame(width, height, true);
-    }
+    int32_t width, height, tileWidth = 0, tileHeight = 0;
+    if (thumbnail) {
+        if (!findThumbnailInfo(trackMeta, &width, &height)) {
+            return NULL;
+        }
+    } else {
+        CHECK(trackMeta->findInt32(kKeyWidth, &width));
+        CHECK(trackMeta->findInt32(kKeyHeight, &height));
 
-    status_t err = extractInternal(frameTimeUs, 1, option);
-    if (err != OK) {
-        return NULL;
+        int32_t gridRows, gridCols;
+        if (!findGridInfo(trackMeta, &tileWidth, &tileHeight, &gridRows, &gridCols)) {
+            tileWidth = tileHeight = 0;
+        }
     }
-
-    return mFrames.size() > 0 ? mFrames[0].release() : NULL;
+    return allocVideoFrame(trackMeta,
+            width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
 }
 
-status_t FrameDecoder::extractFrames(
-        int64_t frameTimeUs, size_t numFrames, int option, int colorFormat,
-        std::vector<VideoFrame*>* frames) {
-    if (!setDstColorFormat((android_pixel_format_t)colorFormat)) {
+FrameDecoder::FrameDecoder(
+        const AString &componentName,
+        const sp<MetaData> &trackMeta,
+        const sp<IMediaSource> &source)
+    : mComponentName(componentName),
+      mTrackMeta(trackMeta),
+      mSource(source),
+      mDstFormat(OMX_COLOR_Format16bitRGB565),
+      mDstBpp(2),
+      mHaveMoreInputs(true),
+      mFirstSample(true) {
+}
+
+FrameDecoder::~FrameDecoder() {
+    if (mDecoder != NULL) {
+        mDecoder->release();
+        mSource->stop();
+    }
+}
+
+status_t FrameDecoder::init(
+        int64_t frameTimeUs, size_t numFrames, int option, int colorFormat) {
+    if (!getDstColorFormat(
+            (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
         return ERROR_UNSUPPORTED;
     }
 
-    status_t err = extractInternal(frameTimeUs, numFrames, option);
-    if (err != OK) {
-        return err;
-    }
-
-    for (size_t i = 0; i < mFrames.size(); i++) {
-        frames->push_back(mFrames[i].release());
-    }
-    return OK;
-}
-
-status_t FrameDecoder::extractInternal(
-        int64_t frameTimeUs, size_t numFrames, int option) {
-
-    MediaSource::ReadOptions options;
     sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
-            frameTimeUs, numFrames, option, &options);
+            frameTimeUs, numFrames, option, &mReadOptions);
     if (videoFormat == NULL) {
         ALOGE("video format or seek mode not supported");
         return ERROR_UNSUPPORTED;
@@ -167,7 +218,8 @@
         return (decoder.get() == NULL) ? NO_MEMORY : err;
     }
 
-    err = decoder->configure(videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+    err = decoder->configure(
+            videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
     if (err != OK) {
         ALOGW("configure returned error %d (%s)", err, asString(err));
         decoder->release();
@@ -187,58 +239,75 @@
         decoder->release();
         return err;
     }
+    mDecoder = decoder;
 
-    Vector<sp<MediaCodecBuffer> > inputBuffers;
-    err = decoder->getInputBuffers(&inputBuffers);
+    return OK;
+}
+
+sp<IMemory> FrameDecoder::extractFrame(FrameRect *rect) {
+    status_t err = onExtractRect(rect);
+    if (err == OK) {
+        err = extractInternal();
+    }
     if (err != OK) {
-        ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
-        decoder->release();
-        mSource->stop();
+        return NULL;
+    }
+
+    return mFrames.size() > 0 ? mFrames[0] : NULL;
+}
+
+status_t FrameDecoder::extractFrames(std::vector<sp<IMemory> >* frames) {
+    status_t err = extractInternal();
+    if (err != OK) {
         return err;
     }
 
-    Vector<sp<MediaCodecBuffer> > outputBuffers;
-    err = decoder->getOutputBuffers(&outputBuffers);
-    if (err != OK) {
-        ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
-        decoder->release();
-        mSource->stop();
-        return err;
+    for (size_t i = 0; i < mFrames.size(); i++) {
+        frames->push_back(mFrames[i]);
     }
+    return OK;
+}
 
-    sp<AMessage> outputFormat = NULL;
-    bool haveMoreInputs = true;
-    size_t index, offset, size;
-    int64_t timeUs;
-    size_t retriesLeft = kRetryCount;
+status_t FrameDecoder::extractInternal() {
+    status_t err = OK;
     bool done = false;
-    bool firstSample = true;
+    size_t retriesLeft = kRetryCount;
     do {
-        size_t inputIndex = -1;
+        size_t index;
         int64_t ptsUs = 0ll;
         uint32_t flags = 0;
-        sp<MediaCodecBuffer> codecBuffer = NULL;
 
-        while (haveMoreInputs) {
-            err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
+        // Queue as many inputs as we possibly can, then block on dequeuing
+        // outputs. After getting each output, come back and queue the inputs
+        // again to keep the decoder busy.
+        while (mHaveMoreInputs) {
+            err = mDecoder->dequeueInputBuffer(&index, 0);
             if (err != OK) {
-                ALOGW("Timed out waiting for input");
+                ALOGV("Timed out waiting for input");
                 if (retriesLeft) {
                     err = OK;
                 }
                 break;
             }
-            codecBuffer = inputBuffers[inputIndex];
+            sp<MediaCodecBuffer> codecBuffer;
+            err = mDecoder->getInputBuffer(index, &codecBuffer);
+            if (err != OK) {
+                ALOGE("failed to get input buffer %zu", index);
+                break;
+            }
 
             MediaBufferBase *mediaBuffer = NULL;
 
-            err = mSource->read(&mediaBuffer, &options);
-            options.clearSeekTo();
+            err = mSource->read(&mediaBuffer, &mReadOptions);
+            mReadOptions.clearSeekTo();
             if (err != OK) {
-                ALOGW("Input Error or EOS");
-                haveMoreInputs = false;
-                if (!firstSample && err == ERROR_END_OF_STREAM) {
+                mHaveMoreInputs = false;
+                if (!mFirstSample && err == ERROR_END_OF_STREAM) {
+                    (void)mDecoder->queueInputBuffer(
+                            index, 0, 0, 0, MediaCodec::BUFFER_FLAG_EOS);
                     err = OK;
+                } else {
+                    ALOGW("Input Error: err=%d", err);
                 }
                 break;
             }
@@ -246,7 +315,7 @@
             if (mediaBuffer->range_length() > codecBuffer->capacity()) {
                 ALOGE("buffer size (%zu) too large for codec input size (%zu)",
                         mediaBuffer->range_length(), codecBuffer->capacity());
-                haveMoreInputs = false;
+                mHaveMoreInputs = false;
                 err = BAD_VALUE;
             } else {
                 codecBuffer->setRange(0, mediaBuffer->range_length());
@@ -256,51 +325,46 @@
                         (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
                         mediaBuffer->range_length());
 
-                onInputReceived(codecBuffer, mediaBuffer->meta_data(), firstSample, &flags);
-                firstSample = false;
+                onInputReceived(codecBuffer, mediaBuffer->meta_data(), mFirstSample, &flags);
+                mFirstSample = false;
             }
 
             mediaBuffer->release();
-            break;
-        }
 
-        if (haveMoreInputs && inputIndex < inputBuffers.size()) {
-            ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
-                    codecBuffer->size(), ptsUs, flags);
+            if (mHaveMoreInputs) {
+                ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
+                        codecBuffer->size(), ptsUs, flags);
 
-            err = decoder->queueInputBuffer(
-                    inputIndex,
-                    codecBuffer->offset(),
-                    codecBuffer->size(),
-                    ptsUs,
-                    flags);
+                err = mDecoder->queueInputBuffer(
+                        index,
+                        codecBuffer->offset(),
+                        codecBuffer->size(),
+                        ptsUs,
+                        flags);
 
-            if (flags & MediaCodec::BUFFER_FLAG_EOS) {
-                haveMoreInputs = false;
-            }
-
-            // we don't expect an output from codec config buffer
-            if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
-                continue;
+                if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+                    mHaveMoreInputs = false;
+                }
             }
         }
 
         while (err == OK) {
+            size_t offset, size;
             // wait for a decoded buffer
-            err = decoder->dequeueOutputBuffer(
+            err = mDecoder->dequeueOutputBuffer(
                     &index,
                     &offset,
                     &size,
-                    &timeUs,
+                    &ptsUs,
                     &flags,
                     kBufferTimeOutUs);
 
             if (err == INFO_FORMAT_CHANGED) {
                 ALOGV("Received format change");
-                err = decoder->getOutputFormat(&outputFormat);
+                err = mDecoder->getOutputFormat(&mOutputFormat);
             } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
                 ALOGV("Output buffers changed");
-                err = decoder->getOutputBuffers(&outputBuffers);
+                err = OK;
             } else {
                 if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
                     ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
@@ -308,12 +372,15 @@
                 } else if (err == OK) {
                     // If we're seeking with CLOSEST option and obtained a valid targetTimeUs
                     // from the extractor, decode to the specified frame. Otherwise we're done.
-                    ALOGV("Received an output buffer, timeUs=%lld", (long long)timeUs);
-                    sp<MediaCodecBuffer> videoFrameBuffer = outputBuffers.itemAt(index);
-
-                    err = onOutputReceived(videoFrameBuffer, outputFormat, timeUs, &done);
-
-                    decoder->releaseOutputBuffer(index);
+                    ALOGV("Received an output buffer, timeUs=%lld", (long long)ptsUs);
+                    sp<MediaCodecBuffer> videoFrameBuffer;
+                    err = mDecoder->getOutputBuffer(index, &videoFrameBuffer);
+                    if (err != OK) {
+                        ALOGE("failed to get output buffer %zu", index);
+                        break;
+                    }
+                    err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+                    mDecoder->releaseOutputBuffer(index);
                 } else {
                     ALOGW("Received error %d (%s) instead of output", err, asString(err));
                     done = true;
@@ -323,9 +390,6 @@
         }
     } while (err == OK && !done);
 
-    mSource->stop();
-    decoder->release();
-
     if (err != OK) {
         ALOGE("failed to get video frame (err %d)", err);
     }
@@ -333,6 +397,20 @@
     return err;
 }
 
+//////////////////////////////////////////////////////////////////////
+
+VideoFrameDecoder::VideoFrameDecoder(
+        const AString &componentName,
+        const sp<MetaData> &trackMeta,
+        const sp<IMediaSource> &source)
+    : FrameDecoder(componentName, trackMeta, source),
+      mIsAvcOrHevc(false),
+      mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
+      mTargetTimeUs(-1ll),
+      mNumFrames(0),
+      mNumFramesDecoded(0) {
+}
+
 sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
         int64_t frameTimeUs, size_t numFrames, int seekMode, MediaSource::ReadOptions *options) {
     mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
@@ -423,9 +501,10 @@
         return ERROR_MALFORMED;
     }
 
-    int32_t width, height;
+    int32_t width, height, stride;
     CHECK(outputFormat->findInt32("width", &width));
     CHECK(outputFormat->findInt32("height", &height));
+    CHECK(outputFormat->findInt32("stride", &stride));
 
     int32_t crop_left, crop_top, crop_right, crop_bottom;
     if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
@@ -434,11 +513,15 @@
         crop_bottom = height - 1;
     }
 
-    VideoFrame *frame = allocVideoFrame(
+    sp<IMemory> frameMem = allocVideoFrame(
+            trackMeta(),
             (crop_right - crop_left + 1),
             (crop_bottom - crop_top + 1),
-            false /*metaOnly*/);
-    addFrame(frame);
+            0,
+            0,
+            dstBpp());
+    addFrame(frameMem);
+    VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
 
     int32_t srcFormat;
     CHECK(outputFormat->findInt32("color-format", &srcFormat));
@@ -448,11 +531,10 @@
     if (converter.isValid()) {
         converter.convert(
                 (const uint8_t *)videoFrameBuffer->data(),
-                width, height,
+                width, height, stride,
                 crop_left, crop_top, crop_right, crop_bottom,
-                frame->mData,
-                frame->mWidth,
-                frame->mHeight,
+                frame->getFlattenedData(),
+                frame->mWidth, frame->mHeight, frame->mRowBytes,
                 crop_left, crop_top, crop_right, crop_bottom);
         return OK;
     }
@@ -462,6 +544,24 @@
     return ERROR_UNSUPPORTED;
 }
 
+////////////////////////////////////////////////////////////////////////
+
+ImageDecoder::ImageDecoder(
+        const AString &componentName,
+        const sp<MetaData> &trackMeta,
+        const sp<IMediaSource> &source)
+    : FrameDecoder(componentName, trackMeta, source),
+      mFrame(NULL),
+      mWidth(0),
+      mHeight(0),
+      mGridRows(1),
+      mGridCols(1),
+      mTileWidth(0),
+      mTileHeight(0),
+      mTilesDecoded(0),
+      mTargetTiles(0) {
+}
+
 sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
         int64_t frameTimeUs, size_t /*numFrames*/,
         int /*seekMode*/, MediaSource::ReadOptions *options) {
@@ -470,25 +570,24 @@
         uint32_t type;
         const void *data;
         size_t size;
-        int64_t thumbNailTime = 0;
-        int32_t thumbnailWidth, thumbnailHeight;
 
         // if we have a stand-alone thumbnail, set up the override meta,
         // and set seekTo time to -1.
-        if (trackMeta()->findInt32(kKeyThumbnailWidth, &thumbnailWidth)
-         && trackMeta()->findInt32(kKeyThumbnailHeight, &thumbnailHeight)
-         && trackMeta()->findData(kKeyThumbnailHVCC, &type, &data, &size)){
-            overrideMeta = new MetaData(*(trackMeta()));
-            overrideMeta->remove(kKeyDisplayWidth);
-            overrideMeta->remove(kKeyDisplayHeight);
-            overrideMeta->setInt32(kKeyWidth, thumbnailWidth);
-            overrideMeta->setInt32(kKeyHeight, thumbnailHeight);
-            overrideMeta->setData(kKeyHVCC, type, data, size);
-            thumbNailTime = -1ll;
-            ALOGV("thumbnail: %dx%d", thumbnailWidth, thumbnailHeight);
+        if (!findThumbnailInfo(trackMeta(), &mWidth, &mHeight, &type, &data, &size)) {
+            ALOGE("Thumbnail not available");
+            return NULL;
         }
-        options->setSeekTo(thumbNailTime);
+        overrideMeta = new MetaData(*(trackMeta()));
+        overrideMeta->remove(kKeyDisplayWidth);
+        overrideMeta->remove(kKeyDisplayHeight);
+        overrideMeta->setInt32(kKeyWidth, mWidth);
+        overrideMeta->setInt32(kKeyHeight, mHeight);
+        overrideMeta->setData(kKeyHVCC, type, data, size);
+        options->setSeekTo(-1);
     } else {
+        CHECK(trackMeta()->findInt32(kKeyWidth, &mWidth));
+        CHECK(trackMeta()->findInt32(kKeyHeight, &mHeight));
+
         options->setSeekTo(frameTimeUs);
     }
 
@@ -496,32 +595,28 @@
     if (overrideMeta == NULL) {
         // check if we're dealing with a tiled heif
         int32_t tileWidth, tileHeight, gridRows, gridCols;
-        if (trackMeta()->findInt32(kKeyTileWidth, &tileWidth) && tileWidth > 0
-         && trackMeta()->findInt32(kKeyTileHeight, &tileHeight) && tileHeight > 0
-         && trackMeta()->findInt32(kKeyGridRows, &gridRows) && gridRows > 0
-         && trackMeta()->findInt32(kKeyGridCols, &gridCols) && gridCols > 0) {
-            int32_t width, height;
-            CHECK(trackMeta()->findInt32(kKeyWidth, &width));
-            CHECK(trackMeta()->findInt32(kKeyHeight, &height));
-
-            if (width <= tileWidth * gridCols && height <= tileHeight * gridRows) {
+        if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
+            if (mWidth <= tileWidth * gridCols && mHeight <= tileHeight * gridRows) {
                 ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
-                        gridCols, gridRows, tileWidth, tileHeight, width, height);
+                        gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
 
                 overrideMeta = new MetaData(*(trackMeta()));
                 overrideMeta->setInt32(kKeyWidth, tileWidth);
                 overrideMeta->setInt32(kKeyHeight, tileHeight);
+                mTileWidth = tileWidth;
+                mTileHeight = tileHeight;
                 mGridCols = gridCols;
                 mGridRows = gridRows;
             } else {
-                ALOGE("bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
-                        gridCols, gridRows, tileWidth, tileHeight, width, height);
+                ALOGW("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
+                        gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
             }
         }
         if (overrideMeta == NULL) {
             overrideMeta = trackMeta();
         }
     }
+    mTargetTiles = mGridCols * mGridRows;
 
     sp<AMessage> videoFormat;
     if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
@@ -540,6 +635,45 @@
     return videoFormat;
 }
 
+status_t ImageDecoder::onExtractRect(FrameRect *rect) {
+    // TODO:
+    // This callback is for verifying whether we can decode the rect,
+    // and if so, set up the internal variables for decoding.
+    // Currently, rect decoding is restricted to sequentially decoding one
+    // row of tiles at a time. We can't decode arbitrary rects, as the image
+    // track doesn't yet support seeking by tiles. So all we do here is to
+    // verify the rect against what we expect.
+    // When seeking by tile is supported, this code should be updated to
+    // set the seek parameters.
+    if (rect == NULL) {
+        if (mTilesDecoded > 0) {
+            return ERROR_UNSUPPORTED;
+        }
+        mTargetTiles = mGridRows * mGridCols;
+        return OK;
+    }
+
+    if (mTileWidth <= 0 || mTileHeight <=0) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    int32_t row = mTilesDecoded / mGridCols;
+    int32_t expectedTop = row * mTileHeight;
+    int32_t expectedBot = (row + 1) * mTileHeight;
+    if (expectedBot > mHeight) {
+        expectedBot = mHeight;
+    }
+    if (rect->left != 0 || rect->top != expectedTop
+            || rect->right != mWidth || rect->bottom != expectedBot) {
+        ALOGE("currently only support sequential decoding of slices");
+        return ERROR_UNSUPPORTED;
+    }
+
+    // advance one row
+    mTargetTiles = mTilesDecoded + mGridCols;
+    return OK;
+}
+
 status_t ImageDecoder::onOutputReceived(
         const sp<MediaCodecBuffer> &videoFrameBuffer,
         const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
@@ -547,18 +681,17 @@
         return ERROR_MALFORMED;
     }
 
-    int32_t width, height;
+    int32_t width, height, stride;
     CHECK(outputFormat->findInt32("width", &width));
     CHECK(outputFormat->findInt32("height", &height));
-
-    int32_t imageWidth, imageHeight;
-    CHECK(trackMeta()->findInt32(kKeyWidth, &imageWidth));
-    CHECK(trackMeta()->findInt32(kKeyHeight, &imageHeight));
+    CHECK(outputFormat->findInt32("stride", &stride));
 
     if (mFrame == NULL) {
-        mFrame = allocVideoFrame(imageWidth, imageHeight, false /*metaOnly*/);
+        sp<IMemory> frameMem = allocVideoFrame(
+                trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
+        mFrame = static_cast<VideoFrame*>(frameMem->pointer());
 
-        addFrame(mFrame);
+        addFrame(frameMem);
     }
 
     int32_t srcFormat;
@@ -567,8 +700,6 @@
     ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
 
     int32_t dstLeft, dstTop, dstRight, dstBottom;
-    int32_t numTiles = mGridRows * mGridCols;
-
     dstLeft = mTilesDecoded % mGridCols * width;
     dstTop = mTilesDecoded / mGridCols * height;
     dstRight = dstLeft + width - 1;
@@ -583,25 +714,24 @@
 
     // apply crop on bottom-right
     // TODO: need to move this into the color converter itself.
-    if (dstRight >= imageWidth) {
-        crop_right = imageWidth - dstLeft - 1;
+    if (dstRight >= mWidth) {
+        crop_right = mWidth - dstLeft - 1;
         dstRight = dstLeft + crop_right;
     }
-    if (dstBottom >= imageHeight) {
-        crop_bottom = imageHeight - dstTop - 1;
+    if (dstBottom >= mHeight) {
+        crop_bottom = mHeight - dstTop - 1;
         dstBottom = dstTop + crop_bottom;
     }
 
-    *done = (++mTilesDecoded >= numTiles);
+    *done = (++mTilesDecoded >= mTargetTiles);
 
     if (converter.isValid()) {
         converter.convert(
                 (const uint8_t *)videoFrameBuffer->data(),
-                width, height,
+                width, height, stride,
                 crop_left, crop_top, crop_right, crop_bottom,
-                mFrame->mData,
-                mFrame->mWidth,
-                mFrame->mHeight,
+                mFrame->getFlattenedData(),
+                mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
                 dstLeft, dstTop, dstRight, dstBottom);
         return OK;
     }
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index a3261d7..6ff3d78 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -83,6 +83,9 @@
 static const char kMetaKey_TemporalLayerCount[] = "com.android.video.temporal_layers_count";
 
 static const int kTimestampDebugCount = 10;
+static const int kItemIdBase = 10000;
+static const char kExifHeader[] = {'E', 'x', 'i', 'f', '\0', '\0'};
+static const int32_t kTiffHeaderOffset = htonl(sizeof(kExifHeader));
 
 static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
     kHevcNalUnitTypeVps,
@@ -112,7 +115,7 @@
 
     int64_t getDurationUs() const;
     int64_t getEstimatedTrackSizeBytes() const;
-    int32_t getMetaSizeIncrease() const;
+    int32_t getMetaSizeIncrease(int32_t angle, int32_t trackCount) const;
     void writeTrackHeader(bool use32BitOffset = true);
     int64_t getMinCttsOffsetTimeUs();
     void bufferChunk(int64_t timestampUs);
@@ -122,8 +125,10 @@
     bool isAudio() const { return mIsAudio; }
     bool isMPEG4() const { return mIsMPEG4; }
     bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic; }
+    bool isExifData(const MediaBufferBase *buffer) const;
     void addChunkOffset(off64_t offset);
-    void addItemOffsetAndSize(off64_t offset, size_t size);
+    void addItemOffsetAndSize(off64_t offset, size_t size, bool isExif);
+    void flushItemRefs();
     int32_t getTrackId() const { return mTrackId; }
     status_t dump(int fd, const Vector<String16>& args) const;
     static const char *getFourCCForMime(const char *mime);
@@ -355,7 +360,9 @@
     int32_t mRotation;
 
     Vector<uint16_t> mProperties;
-    Vector<uint16_t> mDimgRefs;
+    ItemRefs mDimgRefs;
+    ItemRefs mCdscRefs;
+    uint16_t mImageItemId;
     int32_t mIsPrimary;
     int32_t mWidth, mHeight;
     int32_t mTileWidth, mTileHeight;
@@ -496,15 +503,16 @@
     mStreamableFile = false;
     mTimeScale = -1;
     mHasFileLevelMeta = false;
-    mHasMoovBox = false;
     mPrimaryItemId = 0;
     mAssociationEntryCount = 0;
     mNumGrids = 0;
+    mHasRefs = false;
 
     // Following variables only need to be set for the first recording session.
     // And they will stay the same for all the recording sessions.
     if (isFirstSession) {
         mMoovExtraSize = 0;
+        mHasMoovBox = false;
         mMetaKeys = new AMessage();
         addDeviceMeta();
         mLatitudex10000 = 0;
@@ -680,7 +688,12 @@
 #endif
 }
 
-int64_t MPEG4Writer::estimateFileLevelMetaSize() {
+int64_t MPEG4Writer::estimateFileLevelMetaSize(MetaData *params) {
+    int32_t rotation;
+    if (!params || !params->findInt32(kKeyRotation, &rotation)) {
+        rotation = 0;
+    }
+
     // base meta size
     int64_t metaSize =     12  // meta fullbox header
                          + 33  // hdlr box
@@ -695,7 +708,7 @@
     for (List<Track *>::iterator it = mTracks.begin();
          it != mTracks.end(); ++it) {
         if ((*it)->isHeic()) {
-            metaSize += (*it)->getMetaSizeIncrease();
+            metaSize += (*it)->getMetaSizeIncrease(rotation, mTracks.size());
         }
     }
 
@@ -900,7 +913,7 @@
     if (mInMemoryCacheSize == 0) {
         int32_t bitRate = -1;
         if (mHasFileLevelMeta) {
-            mInMemoryCacheSize += estimateFileLevelMetaSize();
+            mInMemoryCacheSize += estimateFileLevelMetaSize(param);
         }
         if (mHasMoovBox) {
             if (param) {
@@ -1344,12 +1357,17 @@
 }
 
 off64_t MPEG4Writer::addSample_l(
-        MediaBuffer *buffer, bool usePrefix, size_t *bytesWritten) {
+        MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten) {
     off64_t old_offset = mOffset;
 
     if (usePrefix) {
         addMultipleLengthPrefixedSamples_l(buffer);
     } else {
+        if (isExif) {
+            ::write(mFd, &kTiffHeaderOffset, 4); // exif_tiff_header_offset field
+            mOffset += 4;
+        }
+
         ::write(mFd,
               (const uint8_t *)buffer->data() + buffer->range_offset(),
               buffer->range_length());
@@ -1767,6 +1785,9 @@
       mReachedEOS(false),
       mStartTimestampUs(-1),
       mRotation(0),
+      mDimgRefs("dimg"),
+      mCdscRefs("cdsc"),
+      mImageItemId(0),
       mIsPrimary(0),
       mWidth(0),
       mHeight(0),
@@ -1933,6 +1954,13 @@
     return OK;
 }
 
+bool MPEG4Writer::Track::isExifData(const MediaBufferBase *buffer) const {
+    return mIsHeic
+            && (buffer->range_length() > sizeof(kExifHeader))
+            && !memcmp((uint8_t *)buffer->data() + buffer->range_offset(),
+                    kExifHeader, sizeof(kExifHeader));
+}
+
 void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
     CHECK(!mIsHeic);
     if (mOwner->use32BitFileOffset()) {
@@ -1943,7 +1971,7 @@
     }
 }
 
-void MPEG4Writer::Track::addItemOffsetAndSize(off64_t offset, size_t size) {
+void MPEG4Writer::Track::addItemOffsetAndSize(off64_t offset, size_t size, bool isExif) {
     CHECK(mIsHeic);
 
     if (offset > UINT32_MAX || size > UINT32_MAX) {
@@ -1954,6 +1982,18 @@
     if (mIsMalformed) {
         return;
     }
+
+    if (isExif) {
+         mCdscRefs.value.push_back(mOwner->addItem_l({
+            .itemType = "Exif",
+            .isPrimary = false,
+            .isHidden = false,
+            .offset = (uint32_t)offset,
+            .size = (uint32_t)size,
+        }));
+        return;
+    }
+
     if (mTileIndex >= mNumTiles) {
         ALOGW("Ignoring excess tiles!");
         return;
@@ -1968,7 +2008,7 @@
         default: break; // don't set if invalid
     }
 
-    bool hasGrid = (mNumTiles > 1);
+    bool hasGrid = (mTileWidth > 0);
 
     if (mProperties.empty()) {
         mProperties.push_back(mOwner->addProperty_l({
@@ -1990,18 +2030,16 @@
         }
     }
 
-    uint16_t itemId = mOwner->addItem_l({
-        .itemType = "hvc1",
-        .isPrimary = hasGrid ? false : (mIsPrimary != 0),
-        .isHidden = hasGrid,
-        .offset = (uint32_t)offset,
-        .size = (uint32_t)size,
-        .properties = mProperties,
-    });
-
     mTileIndex++;
     if (hasGrid) {
-        mDimgRefs.push_back(itemId);
+        mDimgRefs.value.push_back(mOwner->addItem_l({
+            .itemType = "hvc1",
+            .isPrimary = false,
+            .isHidden = true,
+            .offset = (uint32_t)offset,
+            .size = (uint32_t)size,
+            .properties = mProperties,
+        }));
 
         if (mTileIndex == mNumTiles) {
             mProperties.clear();
@@ -2016,7 +2054,7 @@
                     .rotation = heifRotation,
                 }));
             }
-            mOwner->addItem_l({
+            mImageItemId = mOwner->addItem_l({
                 .itemType = "grid",
                 .isPrimary = (mIsPrimary != 0),
                 .isHidden = false,
@@ -2025,9 +2063,31 @@
                 .width = (uint32_t)mWidth,
                 .height = (uint32_t)mHeight,
                 .properties = mProperties,
-                .dimgRefs = mDimgRefs,
             });
         }
+    } else {
+        mImageItemId = mOwner->addItem_l({
+            .itemType = "hvc1",
+            .isPrimary = (mIsPrimary != 0),
+            .isHidden = false,
+            .offset = (uint32_t)offset,
+            .size = (uint32_t)size,
+            .properties = mProperties,
+        });
+    }
+}
+
+// Flush out the item refs for this track. Note that it must be called after the
+// writer thread has stopped, because there might be pending items in the last
+// few chunks written by the writer thread (as opposed to the track). In particular,
+// it affects the 'dimg' refs for tiled image, as we only have the refs after the
+// last tile sample is written.
+void MPEG4Writer::Track::flushItemRefs() {
+    CHECK(mIsHeic);
+
+    if (mImageItemId > 0) {
+        mOwner->addRefs_l(mImageItemId, mDimgRefs);
+        mOwner->addRefs_l(mImageItemId, mCdscRefs);
     }
 }
 
@@ -2174,15 +2234,20 @@
         chunk->mTimeStampUs, chunk->mTrack->getTrackType());
 
     int32_t isFirstSample = true;
-    bool usePrefix = chunk->mTrack->usePrefix();
     while (!chunk->mSamples.empty()) {
         List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
 
+        int32_t isExif;
+        if (!(*it)->meta_data().findInt32(kKeyIsExif, &isExif)) {
+            isExif = 0;
+        }
+        bool usePrefix = chunk->mTrack->usePrefix() && !isExif;
+
         size_t bytesWritten;
-        off64_t offset = addSample_l(*it, usePrefix, &bytesWritten);
+        off64_t offset = addSample_l(*it, usePrefix, isExif, &bytesWritten);
 
         if (chunk->mTrack->isHeic()) {
-            chunk->mTrack->addItemOffsetAndSize(offset, bytesWritten);
+            chunk->mTrack->addItemOffsetAndSize(offset, bytesWritten, isExif);
         } else if (isFirstSample) {
             chunk->mTrack->addChunkOffset(offset);
             isFirstSample = false;
@@ -2904,6 +2969,19 @@
             break;
         }
 
+        bool isExif = false;
+        int32_t isMuxerData;
+        if (buffer->meta_data().findInt32(kKeyIsMuxerData, &isMuxerData) && isMuxerData) {
+            // We only support one type of muxer data, which is Exif data block.
+            isExif = isExifData(buffer);
+            if (!isExif) {
+                ALOGW("Ignoring bad Exif data block");
+                buffer->release();
+                buffer = NULL;
+                continue;
+            }
+        }
+
         ++nActualFrames;
 
         // Make a deep copy of the MediaBuffer and Metadata and release
@@ -2916,10 +2994,15 @@
         buffer->release();
         buffer = NULL;
 
-        if (usePrefix()) StripStartcode(copy);
+        if (isExif) {
+            copy->meta_data().setInt32(kKeyIsExif, 1);
+        }
+        bool usePrefix = this->usePrefix() && !isExif;
+
+        if (usePrefix) StripStartcode(copy);
 
         size_t sampleSize = copy->range_length();
-        if (usePrefix()) {
+        if (usePrefix) {
             if (mOwner->useNalLengthFour()) {
                 sampleSize += 4;
             } else {
@@ -3185,10 +3268,10 @@
         }
         if (!hasMultipleTracks) {
             size_t bytesWritten;
-            off64_t offset = mOwner->addSample_l(copy, usePrefix(), &bytesWritten);
+            off64_t offset = mOwner->addSample_l(copy, usePrefix, isExif, &bytesWritten);
 
             if (mIsHeic) {
-                addItemOffsetAndSize(offset, bytesWritten);
+                addItemOffsetAndSize(offset, bytesWritten, isExif);
             } else {
                 uint32_t count = (mOwner->use32BitFileOffset()
                             ? mStcoTableEntries->count()
@@ -3450,10 +3533,12 @@
     return mEstimatedTrackSizeBytes;
 }
 
-int32_t MPEG4Writer::Track::getMetaSizeIncrease() const {
+int32_t MPEG4Writer::Track::getMetaSizeIncrease(
+        int32_t angle, int32_t trackCount) const {
     CHECK(mIsHeic);
 
-    int32_t grid = (mNumTiles > 1);
+    int32_t grid = (mTileWidth > 0);
+    int32_t rotate = (angle > 0);
 
     // Note that the rotation angle is in the file meta, and we don't have
     // it until start, so here the calculation has to assume rotation.
@@ -3461,25 +3546,34 @@
     // increase to ipco
     int32_t increase = 20 * (grid + 1)              // 'ispe' property
                      + (8 + mCodecSpecificDataSize) // 'hvcC' property
-                     + 9;                           // 'irot' property (worst case)
+                     ;
+
+    if (rotate) {
+        increase += 9;                              // 'irot' property (worst case)
+    }
 
     // increase to iref and idat
     if (grid) {
-        increase += (8 + 2 + 2 + mNumTiles * 2)  // 'dimg' in iref
-                  + 12;                          // ImageGrid in 'idat' (worst case)
+        increase += (12 + mNumTiles * 2)            // 'dimg' in iref
+                  + 12;                             // ImageGrid in 'idat' (worst case)
     }
 
-    // increase to iloc, iinf and ipma
-    increase += (16             // increase to 'iloc'
-              + 21              // increase to 'iinf'
-              + (3 + 2 * 2))    // increase to 'ipma' (worst case, 2 props x 2 bytes)
-              * (mNumTiles + grid);
+    increase += (12 + 2);                           // 'cdsc' in iref
 
-    // adjust to ipma:
-    // if rotation is present and only one tile, it could ref 3 properties
-    if (!grid) {
-        increase += 2;
-    }
+    // increase to iloc, iinf
+    increase += (16                                 // increase to 'iloc'
+              + 21)                                 // increase to 'iinf'
+              * (mNumTiles + grid + 1);             // "+1" is for 'Exif'
+
+    // When total # of properties is > 127, the properties id becomes 2-byte.
+    // We write 4 properties at most for each image (2x'ispe', 1x'hvcC', 1x'irot').
+    // Set the threshold to be 30.
+    int32_t propBytes = trackCount > 30 ? 2 : 1;
+
+    // increase to ipma
+    increase += (3 + 2 * propBytes) * mNumTiles     // 'ispe' + 'hvcC'
+             + grid * (3 + propBytes)               // 'ispe' for grid
+             + rotate * propBytes;                  // 'irot' (either on grid or tile)
 
     return increase;
 }
@@ -4239,7 +4333,7 @@
     writeInt16((uint16_t)itemCount);
     for (size_t i = 0; i < itemCount; i++) {
         writeInfeBox(mItems[i].itemId, mItems[i].itemType,
-                mItems[i].isHidden ? 1 : 0);
+                (mItems[i].isImage() && mItems[i].isHidden) ? 1 : 0);
     }
 
     endBox();
@@ -4274,21 +4368,21 @@
     writeInt32(0);          // Version = 0, Flags = 0
     {
         for (size_t i = 0; i < mItems.size(); i++) {
-            if (!mItems[i].isGrid()) {
-                continue;
+            for (size_t r = 0; r < mItems[i].refsList.size(); r++) {
+                const ItemRefs &refs = mItems[i].refsList[r];
+                beginBox(refs.key);
+                writeInt16(mItems[i].itemId);
+                size_t refCount = refs.value.size();
+                if (refCount > 65535) {
+                    ALOGW("too many entries in %s", refs.key);
+                    refCount = 65535;
+                }
+                writeInt16((uint16_t)refCount);
+                for (size_t refIndex = 0; refIndex < refCount; refIndex++) {
+                    writeInt16(refs.value[refIndex]);
+                }
+                endBox();
             }
-            beginBox("dimg");
-            writeInt16(mItems[i].itemId);
-            size_t refCount = mItems[i].dimgRefs.size();
-            if (refCount > 65535) {
-                ALOGW("too many entries in dimg");
-                refCount = 65535;
-            }
-            writeInt16((uint16_t)refCount);
-            for (size_t refIndex = 0; refIndex < refCount; refIndex++) {
-                writeInt16(mItems[i].dimgRefs[refIndex]);
-            }
-            endBox();
         }
     }
     endBox();
@@ -4384,32 +4478,45 @@
 }
 
 void MPEG4Writer::writeFileLevelMetaBox() {
-    if (mItems.empty()) {
-        ALOGE("no valid item was found");
-        return;
-    }
-
     // patch up the mPrimaryItemId and count items with prop associations
     uint16_t firstVisibleItemId = 0;
+    uint16_t firstImageItemId = 0;
     for (size_t index = 0; index < mItems.size(); index++) {
+        if (!mItems[index].isImage()) continue;
+
         if (mItems[index].isPrimary) {
             mPrimaryItemId = mItems[index].itemId;
-        } else if (!firstVisibleItemId && !mItems[index].isHidden) {
+        }
+        if (!firstImageItemId) {
+            firstImageItemId = mItems[index].itemId;
+        }
+        if (!firstVisibleItemId && !mItems[index].isHidden) {
             firstVisibleItemId = mItems[index].itemId;
         }
-
         if (!mItems[index].properties.empty()) {
             mAssociationEntryCount++;
         }
     }
 
+    if (!firstImageItemId) {
+        ALOGE("no valid image was found");
+        return;
+    }
+
     if (mPrimaryItemId == 0) {
         if (firstVisibleItemId > 0) {
-            ALOGW("didn't find primary, using first visible item");
+            ALOGW("didn't find primary, using first visible image");
             mPrimaryItemId = firstVisibleItemId;
         } else {
-            ALOGW("no primary and no visible item, using first item");
-            mPrimaryItemId = mItems[0].itemId;
+            ALOGW("no primary and no visible item, using first image");
+            mPrimaryItemId = firstImageItemId;
+        }
+    }
+
+    for (List<Track *>::iterator it = mTracks.begin();
+        it != mTracks.end(); ++it) {
+        if ((*it)->isHeic()) {
+            (*it)->flushItemRefs();
         }
     }
 
@@ -4422,6 +4529,8 @@
     writeIprpBox();
     if (mNumGrids > 0) {
         writeIdatBox();
+    }
+    if (mHasRefs) {
         writeIrefBox();
     }
     endBox();
@@ -4445,8 +4554,8 @@
     size_t index = mItems.size();
     mItems.push_back(info);
 
-    // make the item id start at 10000
-    mItems.editItemAt(index).itemId = index + 10000;
+    // make the item id start at kItemIdBase
+    mItems.editItemAt(index).itemId = index + kItemIdBase;
 
 #if (LOG_NDEBUG==0)
     if (!info.properties.empty()) {
@@ -4464,6 +4573,20 @@
     return mItems[index].itemId;
 }
 
+void MPEG4Writer::addRefs_l(uint16_t itemId, const ItemRefs &refs) {
+    if (refs.value.empty()) {
+        return;
+    }
+    if (itemId < kItemIdBase) {
+        ALOGW("itemId shouldn't be smaller than kItemIdBase");
+        return;
+    }
+
+    size_t index = itemId - kItemIdBase;
+    mItems.editItemAt(index).refsList.push_back(refs);
+    mHasRefs = true;
+}
+
 /*
  * Geodata is stored according to ISO-6709 standard.
  */
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index f25d1f1..72eff94 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -479,6 +479,13 @@
 
 // static
 sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
+    // allow plugin to create surface
+    sp<PersistentSurface> pluginSurface =
+        StagefrightPluginLoader::GetCCodecInstance()->createInputSurface();
+    if (pluginSurface != nullptr) {
+        return pluginSurface;
+    }
+
     OMXClient client;
     if (client.connect() != OK) {
         ALOGE("Failed to connect to OMX to create persistent input surface.");
@@ -523,6 +530,7 @@
       mDequeueOutputReplyID(0),
       mHaveInputSurface(false),
       mHavePendingInputBuffers(false),
+      mCpuBoostRequested(false),
       mLatencyUnknown(0) {
     if (uid == kNoUid) {
         mUid = IPCThreadState::self()->getCallingUid();
@@ -763,7 +771,7 @@
 
     // ignore stuff with no presentation time
     if (presentationUs <= 0) {
-        ALOGD("-- returned buffer has bad timestamp %" PRId64 ", ignore it", presentationUs);
+        ALOGV("-- returned buffer timestamp %" PRId64 " <= 0, ignore it", presentationUs);
         mLatencyUnknown++;
         return;
     }
@@ -853,8 +861,7 @@
 
 //static
 sp<CodecBase> MediaCodec::GetCodecBase(const AString &name) {
-    static bool ccodecEnabled = property_get_bool("debug.stagefright.ccodec", false);
-    if (ccodecEnabled && name.startsWithIgnoreCase("c2.")) {
+    if (name.startsWithIgnoreCase("c2.")) {
         return CreateCCodec();
     } else if (name.startsWithIgnoreCase("omx.")) {
         // at this time only ACodec specifies a mime type.
@@ -1638,6 +1645,31 @@
     msg->post();
 }
 
+void MediaCodec::requestCpuBoostIfNeeded() {
+    if (mCpuBoostRequested) {
+        return;
+    }
+    int32_t colorFormat;
+    if (mSoftRenderer != NULL
+            && mOutputFormat->contains("hdr-static-info")
+            && mOutputFormat->findInt32("color-format", &colorFormat)
+            && (colorFormat == OMX_COLOR_FormatYUV420Planar16)) {
+        int32_t left, top, right, bottom, width, height;
+        int64_t totalPixel = 0;
+        if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+            totalPixel = (right - left + 1) * (bottom - top + 1);
+        } else if (mOutputFormat->findInt32("width", &width)
+                && mOutputFormat->findInt32("height", &height)) {
+            totalPixel = width * height;
+        }
+        if (totalPixel >= 1920 * 1080) {
+            addResource(MediaResource::kCpuBoost,
+                    MediaResource::kUnspecifiedSubType, 1);
+            mCpuBoostRequested = true;
+        }
+    }
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 void MediaCodec::cancelPendingDequeueOperations() {
@@ -2160,6 +2192,8 @@
                             }
                         }
 
+                        requestCpuBoostIfNeeded();
+
                         if (mFlags & kFlagIsEncoder) {
                             // Before we announce the format change we should
                             // collect codec specific data and amend the output
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index cd091a6..eaff283 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -92,10 +92,15 @@
 }
 
 std::vector<MediaCodecListBuilderBase *> GetBuilders() {
-    std::vector<MediaCodecListBuilderBase *> builders {&sOmxInfoBuilder};
-    if (property_get_bool("debug.stagefright.ccodec", false)) {
-        builders.push_back(GetCodec2InfoBuilder());
+    std::vector<MediaCodecListBuilderBase *> builders;
+    // if plugin provides the input surface, we cannot use OMX video encoders.
+    // In this case, rely on plugin to provide list of OMX codecs that are usable.
+    sp<PersistentSurface> surfaceTest =
+        StagefrightPluginLoader::GetCCodecInstance()->createInputSurface();
+    if (surfaceTest == nullptr) {
+        builders.push_back(&sOmxInfoBuilder);
     }
+    builders.push_back(GetCodec2InfoBuilder());
     return builders;
 }
 
@@ -287,7 +292,9 @@
 //static
 bool MediaCodecList::isSoftwareCodec(const AString &componentName) {
     return componentName.startsWithIgnoreCase("OMX.google.")
-        || !componentName.startsWithIgnoreCase("OMX.");
+            || componentName.startsWithIgnoreCase("c2.android.")
+            || (!componentName.startsWithIgnoreCase("OMX.")
+                    && !componentName.startsWithIgnoreCase("c2."));
 }
 
 static int compareSoftwareCodecsFirst(const AString *name1, const AString *name2) {
@@ -298,7 +305,14 @@
         return isSoftwareCodec2 - isSoftwareCodec1;
     }
 
-    // sort order 2: OMX codecs are first (lower)
+    // sort order 2: Codec 2.0 codecs are first (lower)
+    bool isC2_1 = name1->startsWithIgnoreCase("c2.");
+    bool isC2_2 = name2->startsWithIgnoreCase("c2.");
+    if (isC2_1 != isC2_2) {
+        return isC2_2 - isC2_1;
+    }
+
+    // sort order 3: OMX codecs are first (lower)
     bool isOMX1 = name1->startsWithIgnoreCase("OMX.");
     bool isOMX2 = name2->startsWithIgnoreCase("OMX.");
     return isOMX2 - isOMX1;
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 6920e51..cac53f4 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -222,7 +222,7 @@
     AString supportMultipleSecureCodecs = "true";
     for (const auto& info : infos) {
         AString name = info->getCodecName();
-        if (name.startsWith("OMX.google.") ||
+        if (name.startsWith("OMX.google.") || name.startsWith("c2.android.") ||
                 // TODO: reenable below codecs once fixed
                 name == "OMX.Intel.VideoDecoder.VP9.hybrid") {
             continue;
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index f6c61a0..2d4bd39 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -123,7 +123,7 @@
 };
 
 Mutex MediaExtractorFactory::gPluginMutex;
-std::shared_ptr<List<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
+std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
 bool MediaExtractorFactory::gPluginsRegistered = false;
 
 // static
@@ -133,7 +133,7 @@
     *confidence = 0.0f;
     *meta = nullptr;
 
-    std::shared_ptr<List<sp<ExtractorPlugin>>> plugins;
+    std::shared_ptr<std::list<sp<ExtractorPlugin>>> plugins;
     {
         Mutex::Autolock autoLock(gPluginMutex);
         if (!gPluginsRegistered) {
@@ -145,6 +145,7 @@
     MediaExtractor::CreatorFunc curCreator = NULL;
     MediaExtractor::CreatorFunc bestCreator = NULL;
     for (auto it = plugins->begin(); it != plugins->end(); ++it) {
+        ALOGV("sniffing %s", (*it)->def.extractor_name);
         float newConfidence;
         void *newMeta = nullptr;
         MediaExtractor::FreeMetaFunc newFreeMeta = nullptr;
@@ -171,7 +172,7 @@
 
 // static
 void MediaExtractorFactory::RegisterExtractor(const sp<ExtractorPlugin> &plugin,
-        List<sp<ExtractorPlugin>> &pluginList) {
+        std::list<sp<ExtractorPlugin>> &pluginList) {
     // sanity check check struct version, uuid, name
     if (plugin->def.def_version == 0
             || plugin->def.def_version > MediaExtractor::EXTRACTORDEF_VERSION) {
@@ -213,7 +214,7 @@
 
 //static
 void MediaExtractorFactory::RegisterExtractorsInApk(
-        const char *apkPath, List<sp<ExtractorPlugin>> &pluginList) {
+        const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList) {
     ALOGV("search for plugins at %s", apkPath);
     ZipArchiveHandle zipHandle;
     int32_t ret = OpenArchive(apkPath, &zipHandle);
@@ -261,7 +262,7 @@
 
 //static
 void MediaExtractorFactory::RegisterExtractorsInSystem(
-        const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList) {
+        const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
     ALOGV("search for plugins at %s", libDirPath);
     DIR *libDir = opendir(libDirPath);
     if (libDir) {
@@ -291,6 +292,10 @@
     }
 }
 
+static bool compareFunc(const sp<ExtractorPlugin>& first, const sp<ExtractorPlugin>& second) {
+    return strcmp(first->def.extractor_name, second->def.extractor_name) < 0;
+}
+
 // static
 void MediaExtractorFactory::UpdateExtractors(const char *newUpdateApkPath) {
     Mutex::Autolock autoLock(gPluginMutex);
@@ -301,7 +306,7 @@
         return;
     }
 
-    std::shared_ptr<List<sp<ExtractorPlugin>>> newList(new List<sp<ExtractorPlugin>>());
+    std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
 
     RegisterExtractorsInSystem("/system/lib"
 #ifdef __LP64__
@@ -319,6 +324,7 @@
         RegisterExtractorsInApk(newUpdateApkPath, *newList);
     }
 
+    newList->sort(compareFunc);
     gPlugins = newList;
     gPluginsRegistered = true;
 }
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 23e543d..98f59b5 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -190,6 +190,10 @@
         sampleMetaData.setInt32(kKeyIsSyncFrame, true);
     }
 
+    if (flags & MediaCodec::BUFFER_FLAG_MUXER_DATA) {
+        sampleMetaData.setInt32(kKeyIsMuxerData, 1);
+    }
+
     sp<MediaAdapter> currentTrack = mTrackList[trackIndex];
     // This pushBuffer will wait until the mediaBuffer is consumed.
     return currentTrack->pushBuffer(mediaBuffer);
diff --git a/media/libstagefright/OmxInfoBuilder.cpp b/media/libstagefright/OmxInfoBuilder.cpp
index fe141ab..96b896b 100644
--- a/media/libstagefright/OmxInfoBuilder.cpp
+++ b/media/libstagefright/OmxInfoBuilder.cpp
@@ -108,24 +108,6 @@
     if (!transStatus.isOk()) {
         ALOGE("Fail to obtain codec roles from IOmxStore.");
         return NO_INIT;
-    } else if (roles.size() == 0) {
-        ALOGW("IOmxStore has empty implementation. "
-                "Creating a local default instance...");
-        omxStore = new implementation::OmxStore();
-        if (omxStore == nullptr) {
-            ALOGE("Cannot create a local default instance.");
-            return NO_INIT;
-        }
-        ALOGI("IOmxStore local default instance created.");
-        transStatus = omxStore->listRoles(
-                [&roles] (
-                const hidl_vec<IOmxStore::RoleInfo>& inRoleList) {
-                    roles = inRoleList;
-                });
-        if (!transStatus.isOk()) {
-            ALOGE("Fail to obtain codec roles from local IOmxStore.");
-            return NO_INIT;
-        }
     }
 
     hidl_vec<IOmxStore::ServiceAttribute> serviceAttributes;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 5ae5644..e80ec3b 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -40,7 +40,8 @@
 
 StagefrightMetadataRetriever::StagefrightMetadataRetriever()
     : mParsedMetaData(false),
-      mAlbumArt(NULL) {
+      mAlbumArt(NULL),
+      mLastImageIndex(-1) {
     ALOGV("StagefrightMetadataRetriever()");
 }
 
@@ -124,11 +125,31 @@
     return OK;
 }
 
-VideoFrame* StagefrightMetadataRetriever::getImageAtIndex(
-        int index, int colorFormat, bool metaOnly) {
+sp<IMemory> StagefrightMetadataRetriever::getImageAtIndex(
+        int index, int colorFormat, bool metaOnly, bool thumbnail) {
+    ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+            index, colorFormat, metaOnly, thumbnail);
 
-    ALOGV("getImageAtIndex: index: %d colorFormat: %d, metaOnly: %d",
-            index, colorFormat, metaOnly);
+    return getImageInternal(index, colorFormat, metaOnly, thumbnail, NULL);
+}
+
+sp<IMemory> StagefrightMetadataRetriever::getImageRectAtIndex(
+        int index, int colorFormat, int left, int top, int right, int bottom) {
+    ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+            index, colorFormat, left, top, right, bottom);
+
+    FrameRect rect = {left, top, right, bottom};
+
+    if (mImageDecoder != NULL && index == mLastImageIndex) {
+        return mImageDecoder->extractFrame(&rect);
+    }
+
+    return getImageInternal(
+            index, colorFormat, false /*metaOnly*/, false /*thumbnail*/, &rect);
+}
+
+sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
+        int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
 
     if (mExtractor.get() == NULL) {
         ALOGE("no extractor.");
@@ -163,6 +184,10 @@
 
     sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
 
+    if (metaOnly) {
+        return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
+    }
+
     sp<IMediaSource> source = mExtractor->getTrack(i);
 
     if (source.get() == NULL) {
@@ -188,12 +213,19 @@
 
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
         const AString &componentName = matchingCodecs[i];
-        ImageDecoder decoder(componentName, trackMeta, source);
-        VideoFrame* frame = decoder.extractFrame(
-                0 /*frameTimeUs*/, 0 /*seekMode*/, colorFormat, metaOnly);
+        sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
+        int64_t frameTimeUs = thumbnail ? -1 : 0;
+        if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
+            sp<IMemory> frame = decoder->extractFrame(rect);
 
-        if (frame != NULL) {
-            return frame;
+            if (frame != NULL) {
+                if (rect != NULL) {
+                    // keep the decoder if slice decoding
+                    mImageDecoder = decoder;
+                    mLastImageIndex = index;
+                }
+                return frame;
+            }
         }
         ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
     }
@@ -201,19 +233,19 @@
     return NULL;
 }
 
-VideoFrame* StagefrightMetadataRetriever::getFrameAtTime(
+sp<IMemory> StagefrightMetadataRetriever::getFrameAtTime(
         int64_t timeUs, int option, int colorFormat, bool metaOnly) {
     ALOGV("getFrameAtTime: %" PRId64 " us option: %d colorFormat: %d, metaOnly: %d",
             timeUs, option, colorFormat, metaOnly);
 
-    VideoFrame *frame;
+    sp<IMemory> frame;
     status_t err = getFrameInternal(
             timeUs, 1, option, colorFormat, metaOnly, &frame, NULL /*outFrames*/);
     return (err == OK) ? frame : NULL;
 }
 
 status_t StagefrightMetadataRetriever::getFrameAtIndex(
-        std::vector<VideoFrame*>* frames,
+        std::vector<sp<IMemory> >* frames,
         int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
     ALOGV("getFrameAtIndex: frameIndex %d, numFrames %d, colorFormat: %d, metaOnly: %d",
             frameIndex, numFrames, colorFormat, metaOnly);
@@ -225,7 +257,7 @@
 
 status_t StagefrightMetadataRetriever::getFrameInternal(
         int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-        VideoFrame **outFrame, std::vector<VideoFrame*>* outFrames) {
+        sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames) {
     if (mExtractor.get() == NULL) {
         ALOGE("no extractor.");
         return NO_INIT;
@@ -265,6 +297,16 @@
     sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
             i, MediaExtractor::kIncludeExtensiveMetaData);
 
+    if (metaOnly) {
+        if (outFrame != NULL) {
+            *outFrame = FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
+            if (*outFrame != NULL) {
+                return OK;
+            }
+        }
+        return UNKNOWN_ERROR;
+    }
+
     sp<IMediaSource> source = mExtractor->getTrack(i);
 
     if (source.get() == NULL) {
@@ -293,17 +335,17 @@
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
         const AString &componentName = matchingCodecs[i];
         VideoFrameDecoder decoder(componentName, trackMeta, source);
-        if (outFrame != NULL) {
-            *outFrame = decoder.extractFrame(
-                    timeUs, option, colorFormat, metaOnly);
-            if (*outFrame != NULL) {
-                return OK;
-            }
-        } else if (outFrames != NULL) {
-            status_t err = decoder.extractFrames(
-                    timeUs, numFrames, option, colorFormat, outFrames);
-            if (err == OK) {
-                return OK;
+        if (decoder.init(timeUs, numFrames, option, colorFormat) == OK) {
+            if (outFrame != NULL) {
+                *outFrame = decoder.extractFrame();
+                if (*outFrame != NULL) {
+                    return OK;
+                }
+            } else if (outFrames != NULL) {
+                status_t err = decoder.extractFrames(outFrames);
+                if (err == OK) {
+                    return OK;
+                }
             }
         }
         ALOGV("%s failed to extract frame, trying next decoder.", componentName.c_str());
diff --git a/media/libstagefright/StagefrightPluginLoader.cpp b/media/libstagefright/StagefrightPluginLoader.cpp
index 7f13f87..519e870 100644
--- a/media/libstagefright/StagefrightPluginLoader.cpp
+++ b/media/libstagefright/StagefrightPluginLoader.cpp
@@ -44,6 +44,11 @@
     if (mCreateBuilder == nullptr) {
         ALOGD("Failed to find symbol: CreateBuilder (%s)", dlerror());
     }
+    mCreateInputSurface = (CodecBase::CreateInputSurfaceFunc)dlsym(
+            mLibHandle, "CreateInputSurface");
+    if (mCreateBuilder == nullptr) {
+        ALOGD("Failed to find symbol: CreateInputSurface (%s)", dlerror());
+    }
 }
 
 StagefrightPluginLoader::~StagefrightPluginLoader() {
@@ -69,6 +74,14 @@
     return mCreateBuilder();
 }
 
+PersistentSurface *StagefrightPluginLoader::createInputSurface() {
+    if (mLibHandle == nullptr || mCreateInputSurface == nullptr) {
+        ALOGD("Handle or CreateInputSurface symbol is null");
+        return nullptr;
+    }
+    return mCreateInputSurface();
+}
+
 //static
 const std::unique_ptr<StagefrightPluginLoader> &StagefrightPluginLoader::GetCCodecInstance() {
     Mutex::Autolock _l(sMutex);
diff --git a/media/libstagefright/StagefrightPluginLoader.h b/media/libstagefright/StagefrightPluginLoader.h
index 2746756..999d30c 100644
--- a/media/libstagefright/StagefrightPluginLoader.h
+++ b/media/libstagefright/StagefrightPluginLoader.h
@@ -20,6 +20,7 @@
 
 #include <media/stagefright/CodecBase.h>
 #include <media/stagefright/MediaCodecListWriter.h>
+#include <media/stagefright/PersistentSurface.h>
 #include <utils/Mutex.h>
 
 namespace android {
@@ -31,6 +32,8 @@
 
     CodecBase *createCodec();
     MediaCodecListBuilderBase *createBuilder();
+    PersistentSurface *createInputSurface();
+
 private:
     explicit StagefrightPluginLoader(const char *libPath);
 
@@ -40,6 +43,7 @@
     void *mLibHandle;
     CodecBase::CreateCodecFunc mCreateCodec;
     MediaCodecListBuilderBase::CreateBuilderFunc mCreateBuilder;
+    CodecBase::CreateInputSurfaceFunc mCreateInputSurface;
 };
 
 }  // namespace android
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 6457300..ea778a4 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -154,6 +154,7 @@
         { 23, OMX_AUDIO_AACObjectLD       },
         { 29, OMX_AUDIO_AACObjectHE_PS    },
         { 39, OMX_AUDIO_AACObjectELD      },
+        { 42, OMX_AUDIO_AACObjectXHE      },
     };
 
     OMX_AUDIO_AACPROFILETYPE profile;
@@ -1611,6 +1612,7 @@
     { OMX_AUDIO_AACObjectLD,          AUDIO_FORMAT_AAC_LD},
     { OMX_AUDIO_AACObjectHE_PS,       AUDIO_FORMAT_AAC_HE_V2},
     { OMX_AUDIO_AACObjectELD,         AUDIO_FORMAT_AAC_ELD},
+    { OMX_AUDIO_AACObjectXHE,         AUDIO_FORMAT_AAC_XHE},
     { OMX_AUDIO_AACObjectNull,        AUDIO_FORMAT_AAC},
 };
 
diff --git a/media/libstagefright/bqhelper/FrameDropper.cpp b/media/libstagefright/bqhelper/FrameDropper.cpp
index 7afe837..d2a2473 100644
--- a/media/libstagefright/bqhelper/FrameDropper.cpp
+++ b/media/libstagefright/bqhelper/FrameDropper.cpp
@@ -34,7 +34,12 @@
 }
 
 status_t FrameDropper::setMaxFrameRate(float maxFrameRate) {
-    if (maxFrameRate <= 0) {
+    if (maxFrameRate < 0) {
+        mMinIntervalUs = -1ll;
+        return OK;
+    }
+
+    if (maxFrameRate == 0) {
         ALOGE("framerate should be positive but got %f.", maxFrameRate);
         return BAD_VALUE;
     }
diff --git a/media/libstagefright/bqhelper/GraphicBufferSource.cpp b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
index 68ae8ec..dd03d38 100644
--- a/media/libstagefright/bqhelper/GraphicBufferSource.cpp
+++ b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
@@ -786,10 +786,16 @@
                 static_cast<long long>(mPrevFrameUs));
     } else {
         if (timeUs <= mPrevFrameUs) {
-            // Drop the frame if it's going backward in time. Bad timestamp
-            // could disrupt encoder's rate control completely.
-            ALOGW("Dropping frame that's going backward in time");
-            return false;
+            if (mFrameDropper != NULL && mFrameDropper->disabled()) {
+                // Warn only, client has disabled frame drop logic possibly for image
+                // encoding cases where camera's ZSL mode could send out of order frames.
+                ALOGW("Received frame that's going backward in time");
+            } else {
+                // Drop the frame if it's going backward in time. Bad timestamp
+                // could disrupt encoder's rate control completely.
+                ALOGW("Dropping frame that's going backward in time");
+                return false;
+            }
         }
 
         mPrevFrameUs = timeUs;
@@ -1110,6 +1116,7 @@
         mEndOfStream = false;
         mEndOfStreamSent = false;
         mSkipFramesBeforeNs = -1ll;
+        mFrameDropper.clear();
         mFrameRepeatIntervalUs = -1ll;
         mRepeatLastFrameGeneration = 0;
         mOutstandingFrameRepeatCount = 0;
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
index c5a6d4b..4e83059 100644
--- a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
@@ -35,6 +35,9 @@
     // Returns false if max frame rate has not been set via setMaxFrameRate.
     bool shouldDrop(int64_t timeUs);
 
+    // Returns true if all frame drop logic should be disabled.
+    bool disabled() { return (mMinIntervalUs == -1ll); }
+
 protected:
     virtual ~FrameDropper();
 
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
index 5af9556..abc8910 100644
--- a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
@@ -149,7 +149,21 @@
     // When set, the sample's timestamp will be adjusted with the timeOffsetUs.
     status_t setTimeOffsetUs(int64_t timeOffsetUs);
 
-    // When set, the max frame rate fed to the encoder will be capped at maxFps.
+    /*
+     * Set the maximum frame rate on the source.
+     *
+     * When maxFps is a positive number, it indicates the maximum rate at which
+     * the buffers from this source will be sent to the encoder. Excessive
+     * frames will be dropped to meet the frame rate requirement.
+     *
+     * When maxFps is a negative number, any frame drop logic will be disabled
+     * and all frames from this source will be sent to the encoder, even when
+     * the timestamp goes backwards. Note that some components may still drop
+     * out-of-order frames silently, so this usually has to be used in
+     * conjunction with OMXNodeInstance::setMaxPtsGapUs() workaround.
+     *
+     * When maxFps is 0, this call will fail with BAD_VALUE.
+     */
     status_t setMaxFps(float maxFps);
 
     // Sets the time lapse (or slow motion) parameters.
diff --git a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
index 129ad65..95d3724 100644
--- a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
+++ b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
@@ -24,7 +24,7 @@
 //#define DRC_PRES_MODE_WRAP_DEBUG
 
 #define GPM_ENCODER_TARGET_LEVEL 64
-#define MAX_TARGET_LEVEL 64
+#define MAX_TARGET_LEVEL 40
 
 CDrcPresModeWrapper::CDrcPresModeWrapper()
 {
@@ -164,7 +164,7 @@
     if (mDataUpdate) {
         // sanity check
         if (mDesTarget < MAX_TARGET_LEVEL){
-            mDesTarget = MAX_TARGET_LEVEL;  // limit target level to -16 dB or below
+            mDesTarget = MAX_TARGET_LEVEL;  // limit target level to -10 dB or below
             newTarget = MAX_TARGET_LEVEL;
         }
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index e0c0c32..bc0a69f 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -36,6 +36,7 @@
 #define DRC_DEFAULT_MOBILE_DRC_CUT   127 /* maximum compression of dynamic range for mobile conf */
 #define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
 #define DRC_DEFAULT_MOBILE_DRC_HEAVY 1   /* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_EFFECT 3  /* MPEG-D DRC effect type; 3 => Limited playback range */
 #define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
 #define MAX_CHANNEL_COUNT            8  /* maximum number of audio channels that can be decoded */
 // names of properties that can be used to override the default DRC settings
@@ -44,6 +45,7 @@
 #define PROP_DRC_OVERRIDE_BOOST      "aac_drc_boost"
 #define PROP_DRC_OVERRIDE_HEAVY      "aac_drc_heavy"
 #define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+#define PROP_DRC_OVERRIDE_EFFECT     "ro.aac_drc_effect_type"
 
 namespace android {
 
@@ -63,6 +65,7 @@
     OMX_AUDIO_AACObjectLD,
     OMX_AUDIO_AACObjectELD,
     OMX_AUDIO_AACObjectER_Scalable,
+    OMX_AUDIO_AACObjectXHE,
 };
 
 SoftAAC2::SoftAAC2(
@@ -207,6 +210,15 @@
     } else {
         mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, DRC_DEFAULT_MOBILE_ENC_LEVEL);
     }
+    // AAC_UNIDRC_SET_EFFECT
+    int32_t effectType =
+            property_get_int32(PROP_DRC_OVERRIDE_EFFECT, DRC_DEFAULT_MOBILE_DRC_EFFECT);
+    if (effectType < -1 || effectType > 8) {
+        effectType = DRC_DEFAULT_MOBILE_DRC_EFFECT;
+    }
+    ALOGV("AAC decoder using MPEG-D DRC effect type %d (default=%d)",
+            effectType, DRC_DEFAULT_MOBILE_DRC_EFFECT);
+    aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_SET_EFFECT, effectType);
 
     // By default, the decoder creates a 5.1 channel downmix signal.
     // For seven and eight channel input streams, enable 6.1 and 7.1 channel output
@@ -414,10 +426,10 @@
             return OMX_ErrorNone;
         }
 
-        case OMX_IndexParamAudioAndroidAacPresentation:
+        case OMX_IndexParamAudioAndroidAacDrcPresentation:
         {
-            const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *aacPresParams =
-                    (const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *)params;
+            const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *aacPresParams =
+                    (const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *)params;
 
             if (!isValidOMXParam(aacPresParams)) {
                 return OMX_ErrorBadParameter;
@@ -443,6 +455,10 @@
                 ALOGV("set nMaxOutputChannels=%d", max);
                 aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, max);
             }
+            if (aacPresParams->nDrcEffectType >= -1) {
+                ALOGV("set nDrcEffectType=%d", aacPresParams->nDrcEffectType);
+                aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_SET_EFFECT, aacPresParams->nDrcEffectType);
+            }
             bool updateDrcWrapper = false;
             if (aacPresParams->nDrcBoost >= 0) {
                 ALOGV("set nDrcBoost=%d", aacPresParams->nDrcBoost);
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
index 8d5d071..75ca846 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
@@ -107,6 +107,7 @@
 #else
         if ((size_t)nLayers > SIZE_MAX / sizeof(Vol *)) {
             status = PV_FALSE;
+            oscl_free(video);
             goto fail;
         }
 
@@ -115,7 +116,8 @@
         if (video->vol == NULL) status = PV_FALSE;
         video->memoryUsage += nLayers * sizeof(Vol *);
 
-
+        /* be sure not to leak any previous state */
+        PVCleanUpVideoDecoder(decCtrl);
         /* we need to setup this pointer for the application to */
         /*    pass it around.                                   */
         decCtrl->videoDecoderData = (void *) video;
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index 813004b..942f850 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -431,7 +431,7 @@
             }
 
             if (mInputBufferCount == 0) {
-                CHECK(mHeader == NULL);
+                delete mHeader;
                 mHeader = new OpusHeader();
                 memset(mHeader, 0, sizeof(*mHeader));
                 if (!ParseOpusHeader(data, size, mHeader)) {
@@ -452,6 +452,9 @@
                 }
 
                 int status = OPUS_INVALID_STATE;
+                if (mDecoder != NULL) {
+                    opus_multistream_decoder_destroy(mDecoder);
+                }
                 mDecoder = opus_multistream_decoder_create(kRate,
                                                            mHeader->channels,
                                                            mHeader->num_streams,
diff --git a/media/libstagefright/codecs/xaacdec/Android.bp b/media/libstagefright/codecs/xaacdec/Android.bp
new file mode 100644
index 0000000..7392f1e
--- /dev/null
+++ b/media/libstagefright/codecs/xaacdec/Android.bp
@@ -0,0 +1,36 @@
+cc_library_shared {
+    name: "libstagefright_soft_xaacdec",
+    vendor_available: true,
+
+    srcs: [
+        "SoftXAAC.cpp",
+    ],
+
+    include_dirs: [
+        "frameworks/av/media/libstagefright/include",
+        "frameworks/native/include/media/openmax",
+    ],
+
+    cflags: [
+        "-Werror",
+        "-DENABLE_MPEG_D_DRC"
+    ],
+
+    sanitize: {
+        // integer_overflow: true,
+        misc_undefined: [ "signed-integer-overflow", "unsigned-integer-overflow", ],
+        cfi: true,
+    },
+
+    static_libs: ["libxaacdec"],
+
+    shared_libs: [
+        "libstagefright_omx",
+        "libstagefright_foundation",
+        "libutils",
+        "libcutils",
+        "liblog",
+    ],
+
+    compile_multilib: "32",
+}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
new file mode 100644
index 0000000..f173e0f
--- /dev/null
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -0,0 +1,1687 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftXAAC"
+#include <utils/Log.h>
+
+#include "SoftXAAC.h"
+
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
+#include <cutils/properties.h>
+#include <math.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <utils/misc.h>
+
+/* 64*-0.25dB = -16 dB below full scale for mobile conf */
+#define DRC_DEFAULT_MOBILE_REF_LEVEL 64
+/* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_CUT 127
+/* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_BOOST 127
+/* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1
+/* encoder target level; -1 => the value is unknown,
+ * otherwise dB step value (e.g. 64 for -16 dB) */
+#define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1)
+
+/* Default Effect type is "Limited playback" */
+#define DRC_KEY_AAC_DRC_EFFECT_TYPE (3)
+
+/* REF_LEVEL of 64 pairs well with EFFECT_TYPE of 3. */
+/* Default loudness value for MPEG-D DRC */
+#define DRC_DEFAULT_MOBILE_LOUDNESS_LEVEL (64)
+
+#define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level"
+#define PROP_DRC_OVERRIDE_CUT "aac_drc_cut"
+#define PROP_DRC_OVERRIDE_BOOST "aac_drc_boost"
+#define PROP_DRC_OVERRIDE_HEAVY "aac_drc_heavy"
+#define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+#define PROP_DRC_OVERRIDE_EFFECT_TYPE "ro.aac_drc_effect_type"
+
+/* maximum number of audio channels that can be decoded */
+#define MAX_CHANNEL_COUNT 8
+
+#define RETURN_IF_FATAL(retval, str)                       \
+    if (retval & IA_FATAL_ERROR) {                         \
+        ALOGE("Error in %s: Returned: %d", str, retval);   \
+        return retval;                                     \
+    } else if (retval != IA_NO_ERROR) {                    \
+        ALOGW("Warning in %s: Returned: %d", str, retval); \
+    }
+
+namespace android {
+
+template <class T>
+static void InitOMXParams(T* params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+static const OMX_U32 kSupportedProfiles[] = {
+    OMX_AUDIO_AACObjectLC, OMX_AUDIO_AACObjectHE,  OMX_AUDIO_AACObjectHE_PS,
+    OMX_AUDIO_AACObjectLD, OMX_AUDIO_AACObjectELD,
+};
+
+SoftXAAC::SoftXAAC(const char* name, const OMX_CALLBACKTYPE* callbacks, OMX_PTR appData,
+                   OMX_COMPONENTTYPE** component)
+    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+      mIsADTS(false),
+      mInputBufferCount(0),
+      mOutputBufferCount(0),
+      mSignalledError(false),
+      mLastInHeader(NULL),
+      mPrevTimestamp(0),
+      mCurrentTimestamp(0),
+      mOutputPortSettingsChange(NONE),
+      mXheaacCodecHandle(NULL),
+      mMpegDDrcHandle(NULL),
+      mInputBufferSize(0),
+      mOutputFrameLength(1024),
+      mInputBuffer(NULL),
+      mOutputBuffer(NULL),
+      mSampFreq(0),
+      mNumChannels(0),
+      mPcmWdSz(0),
+      mChannelMask(0),
+      mIsCodecInitialized(false),
+      mIsCodecConfigFlushRequired(false),
+      mMpegDDRCPresent(0),
+      mDRCFlag(0)
+
+{
+    initPorts();
+    CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+SoftXAAC::~SoftXAAC() {
+    int errCode = deInitXAACDecoder();
+    if (0 != errCode) {
+        ALOGE("deInitXAACDecoder() failed %d", errCode);
+    }
+
+    mIsCodecInitialized = false;
+    mIsCodecConfigFlushRequired = false;
+}
+
+void SoftXAAC::initPorts() {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = 0;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = kNumInputBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = 8192;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.audio.cMIMEType = const_cast<char*>("audio/aac");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
+
+    addPort(def);
+
+    def.nPortIndex = 1;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = kNumOutputBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = 4096 * MAX_CHANNEL_COUNT;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.audio.cMIMEType = const_cast<char*>("audio/raw");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+    addPort(def);
+}
+
+status_t SoftXAAC::initDecoder() {
+    status_t status = UNKNOWN_ERROR;
+
+    int ui_drc_val;
+    IA_ERRORCODE err_code = IA_NO_ERROR;
+    int loop = 0;
+
+    err_code = initXAACDecoder();
+    if (err_code != IA_NO_ERROR) {
+        if (NULL == mXheaacCodecHandle) {
+            ALOGE("AAC decoder handle is null");
+        }
+        if (NULL == mMpegDDrcHandle) {
+            ALOGE("MPEG-D DRC decoder handle is null");
+        }
+        for (loop = 1; loop < mMallocCount; loop++) {
+            if (mMemoryArray[loop] == NULL) {
+                ALOGE(" memory allocation error %d\n", loop);
+                break;
+            }
+        }
+        ALOGE("initXAACDecoder Failed");
+
+        for (loop = 0; loop < mMallocCount; loop++) {
+            if (mMemoryArray[loop]) free(mMemoryArray[loop]);
+        }
+        mMallocCount = 0;
+        return status;
+    } else {
+        status = OK;
+    }
+
+    mEndOfInput = false;
+    mEndOfOutput = false;
+
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get(PROP_DRC_OVERRIDE_REF_LEVEL, value, NULL)) {
+        ui_drc_val = atoi(value);
+        ALOGV("AAC decoder using desired DRC target reference level of %d instead of %d",
+              ui_drc_val, DRC_DEFAULT_MOBILE_REF_LEVEL);
+    } else {
+        ui_drc_val = DRC_DEFAULT_MOBILE_REF_LEVEL;
+    }
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL, &ui_drc_val);
+
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL");
+#ifdef ENABLE_MPEG_D_DRC
+    /* Use ui_drc_val from PROP_DRC_OVERRIDE_REF_LEVEL or DRC_DEFAULT_MOBILE_REF_LEVEL
+     * for IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS too */
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS, &ui_drc_val);
+
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS");
+#endif
+
+    if (property_get(PROP_DRC_OVERRIDE_CUT, value, NULL)) {
+        ui_drc_val = atoi(value);
+        ALOGV("AAC decoder using desired DRC attenuation factor of %d instead of %d", ui_drc_val,
+              DRC_DEFAULT_MOBILE_DRC_CUT);
+    } else {
+        ui_drc_val = DRC_DEFAULT_MOBILE_DRC_CUT;
+    }
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT, &ui_drc_val);
+
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT");
+
+    if (property_get(PROP_DRC_OVERRIDE_BOOST, value, NULL)) {
+        ui_drc_val = atoi(value);
+        ALOGV("AAC decoder using desired DRC boost factor of %d instead of %d", ui_drc_val,
+              DRC_DEFAULT_MOBILE_DRC_BOOST);
+    } else {
+        ui_drc_val = DRC_DEFAULT_MOBILE_DRC_BOOST;
+    }
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST, &ui_drc_val);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST");
+
+    if (property_get(PROP_DRC_OVERRIDE_HEAVY, value, NULL)) {
+        ui_drc_val = atoi(value);
+        ALOGV("AAC decoder using desired Heavy compression factor of %d instead of %d", ui_drc_val,
+              DRC_DEFAULT_MOBILE_DRC_HEAVY);
+    } else {
+        ui_drc_val = DRC_DEFAULT_MOBILE_DRC_HEAVY;
+    }
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP, &ui_drc_val);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP");
+
+#ifdef ENABLE_MPEG_D_DRC
+    if (property_get(PROP_DRC_OVERRIDE_EFFECT_TYPE, value, NULL)) {
+        ui_drc_val = atoi(value);
+        ALOGV("AAC decoder using desired DRC effect type of %d instead of %d", ui_drc_val,
+              DRC_KEY_AAC_DRC_EFFECT_TYPE);
+    } else {
+        ui_drc_val = DRC_KEY_AAC_DRC_EFFECT_TYPE;
+    }
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE, &ui_drc_val);
+
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE");
+
+#endif
+    return status;
+}
+
+OMX_ERRORTYPE SoftXAAC::internalGetParameter(OMX_INDEXTYPE index, OMX_PTR params) {
+    switch ((OMX_U32)index) {
+        case OMX_IndexParamAudioPortFormat: {
+            OMX_AUDIO_PARAM_PORTFORMATTYPE* formatParams = (OMX_AUDIO_PARAM_PORTFORMATTYPE*)params;
+
+            if (!isValidOMXParam(formatParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            formatParams->eEncoding =
+                (formatParams->nPortIndex == 0) ? OMX_AUDIO_CodingAAC : OMX_AUDIO_CodingPCM;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAac: {
+            OMX_AUDIO_PARAM_AACPROFILETYPE* aacParams = (OMX_AUDIO_PARAM_AACPROFILETYPE*)params;
+
+            if (!isValidOMXParam(aacParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (aacParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            aacParams->nBitRate = 0;
+            aacParams->nAudioBandWidth = 0;
+            aacParams->nAACtools = 0;
+            aacParams->nAACERtools = 0;
+            aacParams->eAACProfile = OMX_AUDIO_AACObjectMain;
+
+            aacParams->eAACStreamFormat =
+                mIsADTS ? OMX_AUDIO_AACStreamFormatMP4ADTS : OMX_AUDIO_AACStreamFormatMP4FF;
+
+            aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
+
+            if (!isConfigured()) {
+                aacParams->nChannels = 1;
+                aacParams->nSampleRate = 44100;
+                aacParams->nFrameLength = 0;
+            } else {
+                aacParams->nChannels = mNumChannels;
+                aacParams->nSampleRate = mSampFreq;
+                aacParams->nFrameLength = mOutputFrameLength;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm: {
+            OMX_AUDIO_PARAM_PCMMODETYPE* pcmParams = (OMX_AUDIO_PARAM_PCMMODETYPE*)params;
+
+            if (!isValidOMXParam(pcmParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (pcmParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            pcmParams->eNumData = OMX_NumericalDataSigned;
+            pcmParams->eEndian = OMX_EndianBig;
+            pcmParams->bInterleaved = OMX_TRUE;
+            pcmParams->nBitPerSample = 16;
+            pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+            pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+            pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+            pcmParams->eChannelMapping[2] = OMX_AUDIO_ChannelCF;
+            pcmParams->eChannelMapping[3] = OMX_AUDIO_ChannelLFE;
+            pcmParams->eChannelMapping[4] = OMX_AUDIO_ChannelLS;
+            pcmParams->eChannelMapping[5] = OMX_AUDIO_ChannelRS;
+
+            if (!isConfigured()) {
+                pcmParams->nChannels = 1;
+                pcmParams->nSamplingRate = 44100;
+            } else {
+                pcmParams->nChannels = mNumChannels;
+                pcmParams->nSamplingRate = mSampFreq;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioProfileQuerySupported: {
+            OMX_AUDIO_PARAM_ANDROID_PROFILETYPE* profileParams =
+                (OMX_AUDIO_PARAM_ANDROID_PROFILETYPE*)params;
+
+            if (!isValidOMXParam(profileParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (profileParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (profileParams->nProfileIndex >= NELEM(kSupportedProfiles)) {
+                return OMX_ErrorNoMore;
+            }
+
+            profileParams->eProfile = kSupportedProfiles[profileParams->nProfileIndex];
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftXAAC::internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch ((int)index) {
+        case OMX_IndexParamStandardComponentRole: {
+            const OMX_PARAM_COMPONENTROLETYPE* roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE*)params;
+
+            if (!isValidOMXParam(roleParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (strncmp((const char*)roleParams->cRole, "audio_decoder.aac",
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPortFormat: {
+            const OMX_AUDIO_PARAM_PORTFORMATTYPE* formatParams =
+                (const OMX_AUDIO_PARAM_PORTFORMATTYPE*)params;
+
+            if (!isValidOMXParam(formatParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if ((formatParams->nPortIndex == 0 && formatParams->eEncoding != OMX_AUDIO_CodingAAC) ||
+                (formatParams->nPortIndex == 1 && formatParams->eEncoding != OMX_AUDIO_CodingPCM)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAac: {
+            const OMX_AUDIO_PARAM_AACPROFILETYPE* aacParams =
+                (const OMX_AUDIO_PARAM_AACPROFILETYPE*)params;
+
+            if (!isValidOMXParam(aacParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (aacParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (aacParams->eAACStreamFormat == OMX_AUDIO_AACStreamFormatMP4FF) {
+                mIsADTS = false;
+            } else if (aacParams->eAACStreamFormat == OMX_AUDIO_AACStreamFormatMP4ADTS) {
+                mIsADTS = true;
+            } else {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAndroidAacDrcPresentation: {
+            const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE* aacPresParams =
+                (const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE*)params;
+
+            if (!isValidOMXParam(aacPresParams)) {
+                ALOGE("set OMX_ErrorBadParameter");
+                return OMX_ErrorBadParameter;
+            }
+
+            // for the following parameters of the OMX_AUDIO_PARAM_AACPROFILETYPE structure,
+            // a value of -1 implies the parameter is not set by the application:
+            //   nMaxOutputChannels     -1 by default
+            //   nDrcCut                uses default platform properties, see initDecoder()
+            //   nDrcBoost                idem
+            //   nHeavyCompression        idem
+            //   nTargetReferenceLevel    idem
+            //   nEncodedTargetLevel      idem
+            if (aacPresParams->nMaxOutputChannels >= 0) {
+                int max;
+                if (aacPresParams->nMaxOutputChannels >= 8) {
+                    max = 8;
+                } else if (aacPresParams->nMaxOutputChannels >= 6) {
+                    max = 6;
+                } else if (aacPresParams->nMaxOutputChannels >= 2) {
+                    max = 2;
+                } else {
+                    // -1 or 0: disable downmix,  1: mono
+                    max = aacPresParams->nMaxOutputChannels;
+                }
+            }
+            /* Apply DRC Changes */
+            IA_ERRORCODE err_code = setXAACDRCInfo(aacPresParams->nDrcCut, aacPresParams->nDrcBoost,
+                                                   aacPresParams->nTargetReferenceLevel,
+                                                   aacPresParams->nHeavyCompression
+#ifdef ENABLE_MPEG_D_DRC
+                                                   ,
+                                                   aacPresParams->nDrcEffectType
+#endif
+            );  // TOD0 : Revert this change
+            if (err_code != IA_NO_ERROR) {
+                ALOGE("Error in OMX_IndexParamAudioAndroidAacDrcPresentation");
+                return OMX_ErrorBadParameter;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm: {
+            const OMX_AUDIO_PARAM_PCMMODETYPE* pcmParams = (OMX_AUDIO_PARAM_PCMMODETYPE*)params;
+
+            if (!isValidOMXParam(pcmParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (pcmParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+bool SoftXAAC::isConfigured() const {
+    return mInputBufferCount > 0;
+}
+
+void SoftXAAC::onQueueFilled(OMX_U32 /* portIndex */) {
+    if (mSignalledError || mOutputPortSettingsChange != NONE) {
+        ALOGE("onQueueFilled do not process %d %d", mSignalledError, mOutputPortSettingsChange);
+        return;
+    }
+
+    uint8_t* inBuffer = NULL;
+    uint32_t inBufferLength = 0;
+
+    List<BufferInfo*>& inQueue = getPortQueue(0);
+    List<BufferInfo*>& outQueue = getPortQueue(1);
+
+    signed int numOutBytes = 0;
+
+    /* If decoder call fails in between, then mOutputFrameLength is used  */
+    /* Decoded output for AAC is 1024/2048 samples / channel             */
+    /* TODO: For USAC mOutputFrameLength can go up to 4096                 */
+    /* Note: entire buffer logic to save and retrieve assumes 2 bytes per*/
+    /* sample currently                                                  */
+    if (mIsCodecInitialized) {
+        numOutBytes = mOutputFrameLength * (mPcmWdSz / 8) * mNumChannels;
+        if ((mPcmWdSz / 8) != 2) {
+            ALOGE("XAAC assumes 2 bytes per sample! mPcmWdSz %d", mPcmWdSz);
+        }
+    }
+
+    while ((!inQueue.empty() || mEndOfInput) && !outQueue.empty()) {
+        if (!inQueue.empty()) {
+            BufferInfo* inInfo = *inQueue.begin();
+            OMX_BUFFERHEADERTYPE* inHeader = inInfo->mHeader;
+
+            /* No need to check inHeader != NULL, as inQueue is not empty */
+            mEndOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+
+            if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
+                ALOGW("first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
+                inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
+            }
+            if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
+                inBuffer = inHeader->pBuffer + inHeader->nOffset;
+                inBufferLength = inHeader->nFilledLen;
+
+                /* GA header configuration sent to Decoder! */
+                int err_code = configXAACDecoder(inBuffer, inBufferLength);
+                if (0 != err_code) {
+                    ALOGW("configXAACDecoder err_code = %d", err_code);
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
+                    return;
+                }
+                mInputBufferCount++;
+                mOutputBufferCount++;  // fake increase of outputBufferCount to keep the counters
+                                       // aligned
+
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                mLastInHeader = NULL;
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+
+                // Only send out port settings changed event if both sample rate
+                // and mNumChannels are valid.
+                if (mSampFreq && mNumChannels && !mIsCodecConfigFlushRequired) {
+                    ALOGV("Configuring decoder: %d Hz, %d channels", mSampFreq, mNumChannels);
+                    notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+                    mOutputPortSettingsChange = AWAITING_DISABLED;
+                }
+
+                return;
+            }
+
+            if (inHeader->nFilledLen == 0) {
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                mLastInHeader = NULL;
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+                continue;
+            }
+
+            // Restore Offset and Length for Port reconfig case
+            size_t tempOffset = inHeader->nOffset;
+            size_t tempFilledLen = inHeader->nFilledLen;
+            if (mIsADTS) {
+                size_t adtsHeaderSize = 0;
+                // skip 30 bits, aac_frame_length follows.
+                // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
+
+                const uint8_t* adtsHeader = inHeader->pBuffer + inHeader->nOffset;
+
+                bool signalError = false;
+                if (inHeader->nFilledLen < 7) {
+                    ALOGE(
+                        "Audio data too short to contain even the ADTS header. "
+                        "Got %d bytes.",
+                        inHeader->nFilledLen);
+                    hexdump(adtsHeader, inHeader->nFilledLen);
+                    signalError = true;
+                } else {
+                    bool protectionAbsent = (adtsHeader[1] & 1);
+
+                    unsigned aac_frame_length =
+                        ((adtsHeader[3] & 3) << 11) | (adtsHeader[4] << 3) | (adtsHeader[5] >> 5);
+
+                    if (inHeader->nFilledLen < aac_frame_length) {
+                        ALOGE(
+                            "Not enough audio data for the complete frame. "
+                            "Got %d bytes, frame size according to the ADTS "
+                            "header is %u bytes.",
+                            inHeader->nFilledLen, aac_frame_length);
+                        hexdump(adtsHeader, inHeader->nFilledLen);
+                        signalError = true;
+                    } else {
+                        adtsHeaderSize = (protectionAbsent ? 7 : 9);
+                        if (aac_frame_length < adtsHeaderSize) {
+                            signalError = true;
+                        } else {
+                            inBuffer = (uint8_t*)adtsHeader + adtsHeaderSize;
+                            inBufferLength = aac_frame_length - adtsHeaderSize;
+
+                            inHeader->nOffset += adtsHeaderSize;
+                            inHeader->nFilledLen -= adtsHeaderSize;
+                        }
+                    }
+                }
+
+                if (signalError) {
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL);
+                    return;
+                }
+
+                // insert buffer size and time stamp
+                if (mLastInHeader != inHeader) {
+                    mCurrentTimestamp = inHeader->nTimeStamp;
+                    mLastInHeader = inHeader;
+                } else {
+                    mCurrentTimestamp = mPrevTimestamp + mOutputFrameLength * 1000000ll / mSampFreq;
+                }
+            } else {
+                inBuffer = inHeader->pBuffer + inHeader->nOffset;
+                inBufferLength = inHeader->nFilledLen;
+                mLastInHeader = inHeader;
+                mCurrentTimestamp = inHeader->nTimeStamp;
+            }
+
+            int numLoops = 0;
+            signed int prevSampleRate = mSampFreq;
+            signed int prevNumChannels = mNumChannels;
+
+            /* XAAC decoder expects first frame to be fed via configXAACDecoder API */
+            /* which should initialize the codec. Once this state is reached, call the  */
+            /* decodeXAACStream API with same frame to decode!                        */
+            if (!mIsCodecInitialized) {
+                int err_code = configXAACDecoder(inBuffer, inBufferLength);
+                if (0 != err_code) {
+                    ALOGW("configXAACDecoder Failed 2 err_code = %d", err_code);
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
+                    return;
+                }
+                mIsCodecConfigFlushRequired = true;
+            }
+
+            if (!mSampFreq || !mNumChannels) {
+                if ((mInputBufferCount > 2) && (mOutputBufferCount <= 1)) {
+                    ALOGW("Invalid AAC stream");
+                    ALOGW("mSampFreq %d mNumChannels %d ", mSampFreq, mNumChannels);
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                    return;
+                }
+            } else if ((mSampFreq != prevSampleRate) || (mNumChannels != prevNumChannels)) {
+                ALOGV("Reconfiguring decoder: %d->%d Hz, %d->%d channels", prevSampleRate,
+                      mSampFreq, prevNumChannels, mNumChannels);
+                inHeader->nOffset = tempOffset;
+                inHeader->nFilledLen = tempFilledLen;
+                notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+                mOutputPortSettingsChange = AWAITING_DISABLED;
+                return;
+            }
+
+            signed int bytesConsumed = 0;
+            int errorCode = 0;
+            if (mIsCodecInitialized) {
+                errorCode =
+                    decodeXAACStream(inBuffer, inBufferLength, &bytesConsumed, &numOutBytes);
+            } else {
+                ALOGW("Assumption that first frame after header initializes decoder failed!");
+            }
+            inHeader->nFilledLen -= bytesConsumed;
+            inHeader->nOffset += bytesConsumed;
+
+            if (inHeader->nFilledLen != 0) {
+                ALOGE("All data not consumed");
+            }
+
+            /* In case of error, decoder would have given out empty buffer */
+            if ((0 != errorCode) && (0 == numOutBytes) && mIsCodecInitialized) {
+                numOutBytes = mOutputFrameLength * (mPcmWdSz / 8) * mNumChannels;
+            }
+            numLoops++;
+
+            if (0 == bytesConsumed) {
+                ALOGW("bytesConsumed is zero");
+            }
+
+            if (errorCode) {
+                /* Clear buffer for output buffer is done inside XAAC codec */
+                /* TODO - Check if below memset is on top of reset inside codec */
+                memset(mOutputBuffer, 0, numOutBytes);  // TODO: check for overflow, ASAN
+                // Discard input buffer.
+                inHeader->nFilledLen = 0;
+                // fall through
+            }
+
+            if (inHeader->nFilledLen == 0) {
+                inInfo->mOwnedByUs = false;
+                mInputBufferCount++;
+                inQueue.erase(inQueue.begin());
+                mLastInHeader = NULL;
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+            } else {
+                ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen);
+            }
+
+            if (!outQueue.empty() && numOutBytes) {
+                BufferInfo* outInfo = *outQueue.begin();
+                OMX_BUFFERHEADERTYPE* outHeader = outInfo->mHeader;
+
+                if (outHeader->nOffset != 0) {
+                    ALOGE("outHeader->nOffset != 0 is not handled");
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                    return;
+                }
+
+                signed short* outBuffer =
+                    reinterpret_cast<signed short*>(outHeader->pBuffer + outHeader->nOffset);
+                int samplesize = mNumChannels * sizeof(int16_t);
+                if (outHeader->nOffset + mOutputFrameLength * samplesize > outHeader->nAllocLen) {
+                    ALOGE("buffer overflow");
+                    mSignalledError = true;
+                    notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                    return;
+                }
+                memcpy(outBuffer, mOutputBuffer, numOutBytes);
+                outHeader->nFilledLen = numOutBytes;
+
+                if (mEndOfInput && !outQueue.empty()) {
+                    outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                    mEndOfOutput = true;
+                } else {
+                    outHeader->nFlags = 0;
+                }
+                outHeader->nTimeStamp = mCurrentTimestamp;
+                mPrevTimestamp = mCurrentTimestamp;
+
+                mOutputBufferCount++;
+                outInfo->mOwnedByUs = false;
+                outQueue.erase(outQueue.begin());
+                outInfo = NULL;
+                notifyFillBufferDone(outHeader);
+                outHeader = NULL;
+            }
+        }
+
+        if (mEndOfInput) {
+            if (!outQueue.empty()) {
+                if (!mEndOfOutput) {
+                    ALOGV(" empty block signaling EOS");
+                    // send partial or empty block signaling EOS
+                    mEndOfOutput = true;
+                    BufferInfo* outInfo = *outQueue.begin();
+                    OMX_BUFFERHEADERTYPE* outHeader = outInfo->mHeader;
+
+                    outHeader->nFilledLen = 0;
+                    outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                    outHeader->nTimeStamp = mPrevTimestamp;
+
+                    mOutputBufferCount++;
+                    outInfo->mOwnedByUs = false;
+                    outQueue.erase(outQueue.begin());
+                    outInfo = NULL;
+                    notifyFillBufferDone(outHeader);
+                    outHeader = NULL;
+                }
+                break;  // if outQueue not empty but no more output
+            }
+        }
+    }
+}
+
+void SoftXAAC::onPortFlushCompleted(OMX_U32 portIndex) {
+    if (portIndex == 0) {
+        // Make sure that the next buffer output does not still
+        // depend on fragments from the last one decoded.
+        // drain all existing data
+        if (mIsCodecInitialized) {
+            IA_ERRORCODE err_code = configflushDecode();
+            if (err_code != IA_NO_ERROR) {
+                ALOGE("Error in configflushDecode: Error %d", err_code);
+            }
+        }
+        drainDecoder();
+        mLastInHeader = NULL;
+        mEndOfInput = false;
+    } else {
+        mEndOfOutput = false;
+    }
+}
+
+int SoftXAAC::configflushDecode() {
+    IA_ERRORCODE err_code;
+    UWORD32 ui_init_done;
+    uint32_t inBufferLength = 8203;
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT, IA_CMD_TYPE_FLUSH_MEM, NULL);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_FLUSH_MEM");
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_INPUT_BYTES, 0, &inBufferLength);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT, IA_CMD_TYPE_FLUSH_MEM, NULL);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_FLUSH_MEM");
+
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT, IA_CMD_TYPE_INIT_DONE_QUERY,
+                                &ui_init_done);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_DONE_QUERY");
+
+    if (ui_init_done) {
+        err_code = getXAACStreamInfo();
+        RETURN_IF_FATAL(err_code, "getXAACStreamInfo");
+
+        ALOGV(
+            "Found Codec with below config---\nsampFreq %d\nnumChannels %d\npcmWdSz "
+            "%d\nchannelMask %d\noutputFrameLength %d",
+            mSampFreq, mNumChannels, mPcmWdSz, mChannelMask, mOutputFrameLength);
+        if (mNumChannels > MAX_CHANNEL_COUNT) {
+            ALOGE(" No of channels are more than max channels\n");
+            mIsCodecInitialized = false;
+        } else
+            mIsCodecInitialized = true;
+    }
+    return err_code;
+}
+int SoftXAAC::drainDecoder() {
+    return 0;
+}
+
+void SoftXAAC::onReset() {
+    drainDecoder();
+
+    // reset the "configured" state
+    mInputBufferCount = 0;
+    mOutputBufferCount = 0;
+    mEndOfInput = false;
+    mEndOfOutput = false;
+    mLastInHeader = NULL;
+
+    mSignalledError = false;
+    mOutputPortSettingsChange = NONE;
+}
+
+void SoftXAAC::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
+    if (portIndex != 1) {
+        return;
+    }
+
+    switch (mOutputPortSettingsChange) {
+        case NONE:
+            break;
+
+        case AWAITING_DISABLED: {
+            CHECK(!enabled);
+            mOutputPortSettingsChange = AWAITING_ENABLED;
+            break;
+        }
+
+        default: {
+            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
+            CHECK(enabled);
+            mOutputPortSettingsChange = NONE;
+            break;
+        }
+    }
+}
+
+int SoftXAAC::initXAACDecoder() {
+    LOOPIDX i;
+
+    /* Error code */
+    IA_ERRORCODE err_code = IA_NO_ERROR;
+
+    /* First part                                        */
+    /* Error Handler Init                                */
+    /* Get Library Name, Library Version and API Version */
+    /* Initialize API structure + Default config set     */
+    /* Set config params from user                       */
+    /* Initialize memory tables                          */
+    /* Get memory information and allocate memory        */
+
+    /* Memory variables */
+    UWORD32 ui_proc_mem_tabs_size;
+    /* API size */
+    UWORD32 pui_api_size;
+
+    mInputBufferSize = 0;
+    mInputBuffer = 0;
+    mOutputBuffer = 0;
+    mMallocCount = 0;
+
+    /* Process struct initing end */
+    /* ******************************************************************/
+    /* Initialize API structure and set config params to default        */
+    /* ******************************************************************/
+
+    /* Get the API size */
+    err_code = ixheaacd_dec_api(NULL, IA_API_CMD_GET_API_SIZE, 0, &pui_api_size);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_API_SIZE");
+
+    if (mMallocCount == MAX_MEM_ALLOCS) {
+        ALOGE("mMemoryArray is full");
+        return IA_FATAL_ERROR;
+    }
+
+    /* Allocate memory for API */
+    mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
+    if (mMemoryArray[mMallocCount] == NULL) {
+        ALOGE("malloc for pui_api_size + 4 >> %d Failed", pui_api_size + 4);
+        return IA_FATAL_ERROR;
+    }
+    /* Set API object with the memory allocated */
+    mXheaacCodecHandle = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
+    mMallocCount++;
+
+    /* Set the config params to default values */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT,
+                                IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS, NULL);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS");
+#ifdef ENABLE_MPEG_D_DRC
+    /* Get the API size */
+    err_code = ia_drc_dec_api(NULL, IA_API_CMD_GET_API_SIZE, 0, &pui_api_size);
+
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_API_SIZE");
+
+    if (mMallocCount == MAX_MEM_ALLOCS) {
+        ALOGE("mMemoryArray is full");
+        return IA_FATAL_ERROR;
+    }
+
+    /* Allocate memory for API */
+    mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
+
+    if (mMemoryArray[mMallocCount] == NULL) {
+        ALOGE("malloc for drc api structure Failed");
+        return IA_FATAL_ERROR;
+    }
+    memset(mMemoryArray[mMallocCount], 0, pui_api_size);
+
+    /* Set API object with the memory allocated */
+    mMpegDDrcHandle = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
+    mMallocCount++;
+
+    /* Set the config params to default values */
+    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                              IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS, NULL);
+
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS");
+#endif
+
+    /* ******************************************************************/
+    /* Set config parameters                                            */
+    /* ******************************************************************/
+    UWORD32 ui_mp4_flag = 1;
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_ISMP4, &ui_mp4_flag);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_ISMP4");
+
+    /* ******************************************************************/
+    /* Initialize Memory info tables                                    */
+    /* ******************************************************************/
+
+    /* Get memory info tables size */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_MEMTABS_SIZE, 0,
+                                &ui_proc_mem_tabs_size);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+    if (mMallocCount == MAX_MEM_ALLOCS) {
+        ALOGE("mMemoryArray is full");
+        return IA_FATAL_ERROR;
+    }
+
+    mMemoryArray[mMallocCount] = memalign(4, ui_proc_mem_tabs_size);
+    if (mMemoryArray[mMallocCount] == NULL) {
+        ALOGE("Malloc for size (ui_proc_mem_tabs_size + 4) = %d failed!",
+              ui_proc_mem_tabs_size + 4);
+        return IA_FATAL_ERROR;
+    }
+    mMallocCount++;
+    /* Set pointer for process memory tables    */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
+                                (pVOID)((WORD8*)mMemoryArray[mMallocCount - 1]));
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
+
+    /* initialize the API, post config, fill memory tables  */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT,
+                                IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, NULL);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS");
+
+    /* ******************************************************************/
+    /* Allocate Memory with info from library                           */
+    /* ******************************************************************/
+    /* There are four different types of memories, that needs to be allocated */
+    /* persistent,scratch,input and output */
+    for (i = 0; i < 4; i++) {
+        int ui_size = 0, ui_alignment = 0, ui_type = 0;
+        pVOID pv_alloc_ptr;
+
+        /* Get memory size */
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_MEM_INFO_SIZE, i, &ui_size);
+        RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
+
+        /* Get memory alignment */
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_MEM_INFO_ALIGNMENT, i,
+                                    &ui_alignment);
+        RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
+
+        /* Get memory type */
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_MEM_INFO_TYPE, i, &ui_type);
+        RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
+
+        if (mMallocCount == MAX_MEM_ALLOCS) {
+            ALOGE("mMemoryArray is full");
+            return IA_FATAL_ERROR;
+        }
+        mMemoryArray[mMallocCount] = memalign(ui_alignment, ui_size);
+        if (mMemoryArray[mMallocCount] == NULL) {
+            ALOGE("Malloc for size (ui_size + ui_alignment) = %d failed!", ui_size + ui_alignment);
+            return IA_FATAL_ERROR;
+        }
+        pv_alloc_ptr = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
+        mMallocCount++;
+
+        /* Set the buffer pointer */
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_MEM_PTR, i, pv_alloc_ptr);
+        RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+        if (ui_type == IA_MEMTYPE_INPUT) {
+            mInputBuffer = (pWORD8)pv_alloc_ptr;
+            mInputBufferSize = ui_size;
+        }
+
+        if (ui_type == IA_MEMTYPE_OUTPUT) {
+            mOutputBuffer = (pWORD8)pv_alloc_ptr;
+        }
+    }
+    /* End first part */
+
+    return IA_NO_ERROR;
+}
+
+int SoftXAAC::configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength) {
+    UWORD32 ui_init_done;
+    int32_t i_bytes_consumed;
+
+    if (mInputBufferSize < inBufferLength) {
+        ALOGE("Cannot config AAC, input buffer size %d < inBufferLength %d", mInputBufferSize,
+              inBufferLength);
+        return false;
+    }
+
+    /* Copy the buffer passed by Android plugin to codec input buffer */
+    memcpy(mInputBuffer, inBuffer, inBufferLength);
+
+    /* Set number of bytes to be processed */
+    IA_ERRORCODE err_code =
+        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_INPUT_BYTES, 0, &inBufferLength);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+    if (mIsCodecConfigFlushRequired) {
+        /* If codec is already initialized, then GA header is passed again */
+        /* Need to call the Flush API instead of INIT_PROCESS */
+        mIsCodecInitialized = false; /* Codec needs to be Reinitialized after flush */
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT, IA_CMD_TYPE_GA_HDR, NULL);
+        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_GA_HDR");
+    } else {
+        /* Initialize the process */
+        err_code =
+            ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT, IA_CMD_TYPE_INIT_PROCESS, NULL);
+        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_PROCESS");
+    }
+
+    /* Checking for end of initialization */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INIT, IA_CMD_TYPE_INIT_DONE_QUERY,
+                                &ui_init_done);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_DONE_QUERY");
+
+    /* How much buffer is used in input buffers */
+    err_code =
+        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF, 0, &i_bytes_consumed);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+
+    if (ui_init_done) {
+        err_code = getXAACStreamInfo();
+        RETURN_IF_FATAL(err_code, "getXAACStreamInfo");
+
+        ALOGI(
+            "Found Codec with below config---\nsampFreq %d\nnumChannels %d\npcmWdSz "
+            "%d\nchannelMask %d\noutputFrameLength %d",
+            mSampFreq, mNumChannels, mPcmWdSz, mChannelMask, mOutputFrameLength);
+        mIsCodecInitialized = true;
+
+#ifdef ENABLE_MPEG_D_DRC
+        err_code = configMPEGDDrc();
+        RETURN_IF_FATAL(err_code, "configMPEGDDrc");
+#endif
+    }
+
+    return IA_NO_ERROR;
+}
+int SoftXAAC::configMPEGDDrc() {
+    IA_ERRORCODE err_code = IA_NO_ERROR;
+    int i_effect_type;
+    int i_loud_norm;
+    int i_target_loudness;
+    unsigned int i_sbr_mode;
+    int n_mems;
+    int i;
+
+#ifdef ENABLE_MPEG_D_DRC
+    {
+        /* Sampling Frequency */
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                  IA_DRC_DEC_CONFIG_PARAM_SAMP_FREQ, &mSampFreq);
+        RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_SAMP_FREQ");
+        /* Total Number of Channels */
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                  IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS, &mNumChannels);
+        RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS");
+
+        /* PCM word size  */
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                  IA_DRC_DEC_CONFIG_PARAM_PCM_WDSZ, &mPcmWdSz);
+        RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_PCM_WDSZ");
+
+        /*Set Effect Type*/
+
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE, &i_effect_type);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE");
+
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                  IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE, &i_effect_type);
+        RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE");
+
+        /*Set target loudness */
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS,
+                                    &i_target_loudness);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS");
+
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                  IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS, &i_target_loudness);
+        RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS");
+
+        /*Set loud_norm_flag*/
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM, &i_loud_norm);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM");
+
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                  IA_DRC_DEC_CONFIG_DRC_LOUD_NORM, &i_loud_norm);
+        RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_LOUD_NORM");
+
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                  IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, NULL);
+
+        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS");
+
+        for (i = 0; i < (WORD32)2; i++) {
+            WORD32 ui_size, ui_alignment, ui_type;
+            pVOID pv_alloc_ptr;
+
+            /* Get memory size */
+            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_SIZE, i, &ui_size);
+
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
+
+            /* Get memory alignment */
+            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_ALIGNMENT, i,
+                                      &ui_alignment);
+
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
+
+            /* Get memory type */
+            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEM_INFO_TYPE, i, &ui_type);
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
+            if (mMallocCount == MAX_MEM_ALLOCS) {
+                ALOGE("mMemoryArray is full");
+                return IA_FATAL_ERROR;
+            }
+
+            mMemoryArray[mMallocCount] = memalign(4, ui_size);
+            if (mMemoryArray[mMallocCount] == NULL) {
+                ALOGE(" Cannot create requested memory  %d", ui_size);
+                return IA_FATAL_ERROR;
+            }
+            pv_alloc_ptr = (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
+            mMallocCount++;
+
+            /* Set the buffer pointer */
+            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, i, pv_alloc_ptr);
+
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+        }
+        {
+            WORD32 ui_size;
+            ui_size = 8192 * 2;
+            if (mMallocCount == MAX_MEM_ALLOCS) {
+                ALOGE("mMemoryArray is full");
+                return IA_FATAL_ERROR;
+            }
+
+            mMemoryArray[mMallocCount] = memalign(4, ui_size);
+            if (mMemoryArray[mMallocCount] == NULL) {
+                ALOGE(" Cannot create requested memory  %d", ui_size);
+                return IA_FATAL_ERROR;
+            }
+
+            mDrcInBuf = (int8_t*)mMemoryArray[mMallocCount];
+            mMallocCount++;
+            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, 2,
+                                      /*mOutputBuffer*/ mDrcInBuf);
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+
+            if (mMallocCount == MAX_MEM_ALLOCS) {
+                ALOGE("mMemoryArray is full");
+                return IA_FATAL_ERROR;
+            }
+            mMemoryArray[mMallocCount] = memalign(4, ui_size);
+            if (mMemoryArray[mMallocCount] == NULL) {
+                ALOGE(" Cannot create requested memory  %d", ui_size);
+                return IA_FATAL_ERROR;
+            }
+
+            mDrcOutBuf = (int8_t*)mMemoryArray[mMallocCount];
+            mMallocCount++;
+            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR, 3,
+                                      /*mOutputBuffer*/ mDrcOutBuf);
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+        }
+        /* DRC buffers
+            buf[0] - contains extension element pay load loudness related
+            buf[1] - contains extension element pay load*/
+        {
+            VOID* p_array[2][16];
+            WORD32 ii;
+            WORD32 buf_sizes[2][16];
+            WORD32 num_elements;
+            WORD32 num_config_ext;
+            WORD32 bit_str_fmt = 1;
+
+            WORD32 uo_num_chan;
+
+            memset(buf_sizes, 0, 32 * sizeof(WORD32));
+
+            err_code =
+                ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                 IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_BUF_SIZES, &buf_sizes[0][0]);
+            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_BUF_SIZES");
+
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                        IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_PTR, &p_array);
+            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_PTR");
+
+            err_code =
+                ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT, IA_CMD_TYPE_INIT_SET_BUFF_PTR, 0);
+            RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_SET_BUFF_PTR");
+
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                        IA_ENHAACPLUS_DEC_CONFIG_NUM_ELE, &num_elements);
+            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_NUM_ELE");
+
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                        IA_ENHAACPLUS_DEC_CONFIG_NUM_CONFIG_EXT, &num_config_ext);
+            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_NUM_CONFIG_EXT");
+
+            for (ii = 0; ii < num_config_ext; ii++) {
+                /*copy loudness bitstream*/
+                if (buf_sizes[0][ii] > 0) {
+                    memcpy(mDrcInBuf, p_array[0][ii], buf_sizes[0][ii]);
+
+                    /*Set bitstream_split_format */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                              IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    /* Set number of bytes to be processed */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_IL_BS, 0,
+                                              &buf_sizes[0][ii]);
+                    RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES_IL_BS");
+
+                    /* Execute process */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                              IA_CMD_TYPE_INIT_CPY_IL_BSF_BUFF, NULL);
+                    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_CPY_IL_BSF_BUFF");
+
+                    mDRCFlag = 1;
+                }
+            }
+
+            for (ii = 0; ii < num_elements; ii++) {
+                /*copy config bitstream*/
+                if (buf_sizes[1][ii] > 0) {
+                    memcpy(mDrcInBuf, p_array[1][ii], buf_sizes[1][ii]);
+                    /* Set number of bytes to be processed */
+
+                    /*Set bitstream_split_format */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                              IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_IC_BS, 0,
+                                              &buf_sizes[1][ii]);
+                    RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES_IC_BS");
+
+                    /* Execute process */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                              IA_CMD_TYPE_INIT_CPY_IC_BSF_BUFF, NULL);
+
+                    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_CPY_IC_BSF_BUFF");
+
+                    mDRCFlag = 1;
+                }
+            }
+
+            if (mDRCFlag == 1) {
+                mMpegDDRCPresent = 1;
+            } else {
+                mMpegDDRCPresent = 0;
+            }
+
+            /*Read interface buffer config file bitstream*/
+            if (mMpegDDRCPresent == 1) {
+                WORD32 interface_is_present = 1;
+                WORD32 frame_length;
+
+                if (i_sbr_mode != 0) {
+                    if (i_sbr_mode == 1) {
+                        frame_length = 2048;
+                    } else if (i_sbr_mode == 3) {
+                        frame_length = 4096;
+                    } else {
+                        frame_length = 1024;
+                    }
+                } else {
+                    frame_length = 4096;
+                }
+
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                          IA_DRC_DEC_CONFIG_PARAM_FRAME_SIZE, &frame_length);
+                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_FRAME_SIZE");
+
+                err_code =
+                    ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                   IA_DRC_DEC_CONFIG_PARAM_INT_PRESENT, &interface_is_present);
+                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_INT_PRESENT");
+
+                /* Execute process */
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                          IA_CMD_TYPE_INIT_CPY_IN_BSF_BUFF, NULL);
+                RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_CPY_IN_BSF_BUFF");
+
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                          IA_CMD_TYPE_INIT_PROCESS, NULL);
+                RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_PROCESS");
+
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                          IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS, &uo_num_chan);
+                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS");
+            }
+        }
+    }
+#endif
+
+    return err_code;
+}
+int SoftXAAC::decodeXAACStream(uint8_t* inBuffer, uint32_t inBufferLength, int32_t* bytesConsumed,
+                               int32_t* outBytes) {
+    if (mInputBufferSize < inBufferLength) {
+        ALOGE("Cannot config AAC, input buffer size %d < inBufferLength %d", mInputBufferSize,
+              inBufferLength);
+        return -1;
+    }
+
+    /* Copy the buffer passed by Android plugin to codec input buffer */
+    memcpy(mInputBuffer, inBuffer, inBufferLength);
+
+    /* Set number of bytes to be processed */
+    IA_ERRORCODE err_code =
+        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_INPUT_BYTES, 0, &inBufferLength);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+    /* Execute process */
+    err_code =
+        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, NULL);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+
+    UWORD32 ui_exec_done;
+    /* Checking for end of processing */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DONE_QUERY,
+                                &ui_exec_done);
+    RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DONE_QUERY");
+
+#ifdef ENABLE_MPEG_D_DRC
+    {
+        if (ui_exec_done != 1) {
+            VOID* p_array;        // ITTIAM:buffer to handle gain payload
+            WORD32 buf_size = 0;  // ITTIAM:gain payload length
+            WORD32 bit_str_fmt = 1;
+            WORD32 gain_stream_flag = 1;
+
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                        IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
+
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                        IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+
+            if (buf_size > 0) {
+                /*Set bitstream_split_format */
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                          IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                memcpy(mDrcInBuf, p_array, buf_size);
+                /* Set number of bytes to be processed */
+                err_code =
+                    ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
+                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                          IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
+                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                /* Execute process */
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                          IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
+                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                mMpegDDRCPresent = 1;
+            }
+        }
+    }
+#endif
+    /* How much buffer is used in input buffers */
+    err_code =
+        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF, 0, bytesConsumed);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+
+    /* Get the output bytes */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_OUTPUT_BYTES, 0, outBytes);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+#ifdef ENABLE_MPEG_D_DRC
+
+    if (mMpegDDRCPresent == 1) {
+        memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
+        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
+        RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+        err_code =
+            ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, NULL);
+        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+
+        memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+    }
+#endif
+    return err_code;
+}
+
+int SoftXAAC::deInitXAACDecoder() {
+    ALOGI("deInitXAACDecoder");
+
+    /* Tell that the input is over in this buffer */
+    IA_ERRORCODE err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_INPUT_OVER, 0, NULL);
+    RETURN_IF_FATAL(err_code, "IA_API_CMD_INPUT_OVER");
+
+    for (int i = 0; i < mMallocCount; i++) {
+        if (mMemoryArray[i]) free(mMemoryArray[i]);
+    }
+    mMallocCount = 0;
+
+    return err_code;
+}
+
+IA_ERRORCODE SoftXAAC::getXAACStreamInfo() {
+    IA_ERRORCODE err_code = IA_NO_ERROR;
+
+    /* Sampling frequency */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_SAMP_FREQ, &mSampFreq);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SAMP_FREQ");
+
+    /* Total Number of Channels */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS, &mNumChannels);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS");
+
+    /* PCM word size */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ, &mPcmWdSz);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ");
+
+    /* channel mask to tell the arrangement of channels in bit stream */
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MASK, &mChannelMask);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MASK");
+
+    /* Channel mode to tell MONO/STEREO/DUAL-MONO/NONE_OF_THESE */
+    UWORD32 ui_channel_mode;
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MODE, &ui_channel_mode);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MODE");
+    if (ui_channel_mode == 0)
+        ALOGV("Channel Mode: MONO_OR_PS\n");
+    else if (ui_channel_mode == 1)
+        ALOGV("Channel Mode: STEREO\n");
+    else if (ui_channel_mode == 2)
+        ALOGV("Channel Mode: DUAL-MONO\n");
+    else
+        ALOGV("Channel Mode: NONE_OF_THESE or MULTICHANNEL\n");
+
+    /* Channel mode to tell SBR PRESENT/NOT_PRESENT */
+    UWORD32 ui_sbr_mode;
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &ui_sbr_mode);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+    if (ui_sbr_mode == 0)
+        ALOGV("SBR Mode: NOT_PRESENT\n");
+    else if (ui_sbr_mode == 1)
+        ALOGV("SBR Mode: PRESENT\n");
+    else
+        ALOGV("SBR Mode: ILLEGAL\n");
+
+    /* mOutputFrameLength = 1024 * (1 + SBR_MODE) for AAC */
+    /* For USAC it could be 1024 * 3 , support to query  */
+    /* not yet added in codec                            */
+    mOutputFrameLength = 1024 * (1 + ui_sbr_mode);
+
+    ALOGI("mOutputFrameLength %d ui_sbr_mode %d", mOutputFrameLength, ui_sbr_mode);
+
+    return IA_NO_ERROR;
+}
+
+IA_ERRORCODE SoftXAAC::setXAACDRCInfo(int32_t drcCut, int32_t drcBoost, int32_t drcRefLevel,
+                                      int32_t drcHeavyCompression
+#ifdef ENABLE_MPEG_D_DRC
+                                      ,
+                                      int32_t drEffectType
+#endif
+) {
+    IA_ERRORCODE err_code = IA_NO_ERROR;
+
+    int32_t ui_drc_enable = 1;
+    int32_t i_effect_type, i_target_loudness, i_loud_norm;
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_ENABLE, &ui_drc_enable);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_ENABLE");
+    if (drcCut != -1) {
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT, &drcCut);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT");
+    }
+
+    if (drcBoost != -1) {
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST, &drcBoost);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST");
+    }
+
+    if (drcRefLevel != -1) {
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL, &drcRefLevel);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL");
+    }
+#ifdef ENABLE_MPEG_D_DRC
+    if (drcRefLevel != -1) {
+        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                    IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS, &drcRefLevel);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS");
+    }
+#endif
+    if (drcHeavyCompression != -1) {
+        err_code =
+            ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                             IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP, &drcHeavyCompression);
+        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP");
+    }
+
+#ifdef ENABLE_MPEG_D_DRC
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE, &drEffectType);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE");
+#endif
+
+#ifdef ENABLE_MPEG_D_DRC
+    /*Set Effect Type*/
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE, &i_effect_type);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE");
+
+    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                              IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE, &i_effect_type);
+
+    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE");
+
+    /*Set target loudness */
+    err_code =
+        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                         IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS, &i_target_loudness);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS");
+
+    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                              IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS, &i_target_loudness);
+    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS");
+
+    /*Set loud_norm_flag*/
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM, &i_loud_norm);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM");
+
+    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                              IA_DRC_DEC_CONFIG_DRC_LOUD_NORM, &i_loud_norm);
+
+    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_LOUD_NORM");
+
+#endif
+
+    return IA_NO_ERROR;
+}
+
+}  // namespace android
+
+android::SoftOMXComponent* createSoftOMXComponent(const char* name,
+                                                  const OMX_CALLBACKTYPE* callbacks,
+                                                  OMX_PTR appData, OMX_COMPONENTTYPE** component) {
+    ALOGI("createSoftOMXComponent for SoftXAACDEC");
+    return new android::SoftXAAC(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.h b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
new file mode 100644
index 0000000..6176082
--- /dev/null
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFTXAAC_H_
+#define SOFTXAAC_H_
+
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "ixheaacd_type_def.h"
+#include "ixheaacd_error_standards.h"
+#include "ixheaacd_error_handler.h"
+#include "ixheaacd_apicmd_standards.h"
+#include "ixheaacd_memory_standards.h"
+#include "ixheaacd_aac_config.h"
+
+#include "impd_apicmd_standards.h"
+#include "impd_drc_config_params.h"
+
+#define MAX_MEM_ALLOCS 100
+
+extern "C" IA_ERRORCODE ixheaacd_dec_api(pVOID p_ia_module_obj, WORD32 i_cmd, WORD32 i_idx,
+                                         pVOID pv_value);
+extern "C" IA_ERRORCODE ia_drc_dec_api(pVOID p_ia_module_obj, WORD32 i_cmd, WORD32 i_idx,
+                                       pVOID pv_value);
+extern "C" IA_ERRORCODE ixheaacd_get_config_param(pVOID p_ia_process_api_obj, pWORD32 pi_samp_freq,
+                                                  pWORD32 pi_num_chan, pWORD32 pi_pcm_wd_sz,
+                                                  pWORD32 pi_channel_mask);
+
+namespace android {
+
+struct SoftXAAC : public SimpleSoftOMXComponent {
+    SoftXAAC(const char* name, const OMX_CALLBACKTYPE* callbacks, OMX_PTR appData,
+             OMX_COMPONENTTYPE** component);
+
+   protected:
+    virtual ~SoftXAAC();
+
+    virtual OMX_ERRORTYPE internalGetParameter(OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual void onQueueFilled(OMX_U32 portIndex);
+    virtual void onPortFlushCompleted(OMX_U32 portIndex);
+    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
+
+   private:
+    enum {
+        kNumInputBuffers = 4,
+        kNumOutputBuffers = 4,
+        kNumDelayBlocksMax = 8,
+    };
+
+    bool mIsADTS;
+    size_t mInputBufferCount;
+    size_t mOutputBufferCount;
+    bool mSignalledError;
+    OMX_BUFFERHEADERTYPE* mLastInHeader;
+    int64_t mPrevTimestamp;
+    int64_t mCurrentTimestamp;
+    uint32_t mBufSize;
+
+    enum { NONE, AWAITING_DISABLED, AWAITING_ENABLED } mOutputPortSettingsChange;
+
+    void initPorts();
+    status_t initDecoder();
+    bool isConfigured() const;
+    int drainDecoder();
+    int initXAACDecoder();
+    int deInitXAACDecoder();
+
+    int configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength);
+    int configMPEGDDrc();
+    int decodeXAACStream(uint8_t* inBuffer, uint32_t inBufferLength, int32_t* bytesConsumed,
+                         int32_t* outBytes);
+
+    int configflushDecode();
+    IA_ERRORCODE getXAACStreamInfo();
+    IA_ERRORCODE setXAACDRCInfo(int32_t drcCut, int32_t drcBoost, int32_t drcRefLevel,
+                                int32_t drcHeavyCompression
+#ifdef ENABLE_MPEG_D_DRC
+                                ,
+                                int32_t drEffectType
+#endif
+    );
+
+    bool mEndOfInput;
+    bool mEndOfOutput;
+
+    void* mXheaacCodecHandle;
+    void* mMpegDDrcHandle;
+    uint32_t mInputBufferSize;
+    uint32_t mOutputFrameLength;
+    int8_t* mInputBuffer;
+    int8_t* mOutputBuffer;
+    int32_t mSampFreq;
+    int32_t mNumChannels;
+    int32_t mPcmWdSz;
+    int32_t mChannelMask;
+    bool mIsCodecInitialized;
+    bool mIsCodecConfigFlushRequired;
+    int8_t* mDrcInBuf;
+    int8_t* mDrcOutBuf;
+    int32_t mMpegDDRCPresent;
+    int32_t mDRCFlag;
+
+    void* mMemoryArray[MAX_MEM_ALLOCS];
+    int32_t mMallocCount;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftXAAC);
+};
+
+}  // namespace android
+
+#endif  // SOFTXAAC_H_
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 1b38852..c46a40f 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -25,7 +25,7 @@
 
 #include "libyuv/convert_from.h"
 #include "libyuv/video_common.h"
-
+#include <functional>
 #include <sys/time.h>
 
 #define USE_LIBYUV
@@ -58,14 +58,16 @@
 
 bool ColorConverter::isValid() const {
     switch (mSrcFormat) {
+        case OMX_COLOR_FormatYUV420Planar16:
+            if (mDstFormat == OMX_COLOR_FormatYUV444Y410) {
+                return true;
+            }
+            // fall-thru
         case OMX_COLOR_FormatYUV420Planar:
             return mDstFormat == OMX_COLOR_Format16bitRGB565
                     || mDstFormat == OMX_COLOR_Format32BitRGBA8888
                     || mDstFormat == OMX_COLOR_Format32bitBGRA8888;
 
-        case OMX_COLOR_FormatYUV420Planar16:
-            return mDstFormat == OMX_COLOR_FormatYUV444Y410;
-
         case OMX_COLOR_FormatCbYCrY:
         case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
         case OMX_COLOR_FormatYUV420SemiPlanar:
@@ -83,9 +85,15 @@
             || mDstFormat == OMX_COLOR_Format32bitBGRA8888;
 }
 
+/*
+ * If stride is non-zero, client's stride will be used. For planar
+ * or semi-planar YUV formats, stride must be even numbers.
+ * If stride is zero, it will be calculated based on width and bpp
+ * of the format, assuming no padding on the right edge.
+ */
 ColorConverter::BitmapParams::BitmapParams(
         void *bits,
-        size_t width, size_t height,
+        size_t width, size_t height, size_t stride,
         size_t cropLeft, size_t cropTop,
         size_t cropRight, size_t cropBottom,
         OMX_COLOR_FORMATTYPE colorFromat)
@@ -99,6 +107,8 @@
       mCropBottom(cropBottom) {
     switch(mColorFormat) {
     case OMX_COLOR_Format16bitRGB565:
+    case OMX_COLOR_FormatYUV420Planar16:
+    case OMX_COLOR_FormatCbYCrY:
         mBpp = 2;
         mStride = 2 * mWidth;
         break;
@@ -110,13 +120,7 @@
         mStride = 4 * mWidth;
         break;
 
-    case OMX_COLOR_FormatYUV420Planar16:
-        mBpp = 2;
-        mStride = 2 * mWidth;
-        break;
-
     case OMX_COLOR_FormatYUV420Planar:
-    case OMX_COLOR_FormatCbYCrY:
     case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
     case OMX_COLOR_FormatYUV420SemiPlanar:
     case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
@@ -130,6 +134,10 @@
         mStride = mWidth;
         break;
     }
+    // use client's stride if it's specified.
+    if (stride != 0) {
+        mStride = stride;
+    }
 }
 
 size_t ColorConverter::BitmapParams::cropWidth() const {
@@ -142,21 +150,21 @@
 
 status_t ColorConverter::convert(
         const void *srcBits,
-        size_t srcWidth, size_t srcHeight,
+        size_t srcWidth, size_t srcHeight, size_t srcStride,
         size_t srcCropLeft, size_t srcCropTop,
         size_t srcCropRight, size_t srcCropBottom,
         void *dstBits,
-        size_t dstWidth, size_t dstHeight,
+        size_t dstWidth, size_t dstHeight, size_t dstStride,
         size_t dstCropLeft, size_t dstCropTop,
         size_t dstCropRight, size_t dstCropBottom) {
     BitmapParams src(
             const_cast<void *>(srcBits),
-            srcWidth, srcHeight,
+            srcWidth, srcHeight, srcStride,
             srcCropLeft, srcCropTop, srcCropRight, srcCropBottom, mSrcFormat);
 
     BitmapParams dst(
             dstBits,
-            dstWidth, dstHeight,
+            dstWidth, dstHeight, dstStride,
             dstCropLeft, dstCropTop, dstCropRight, dstCropBottom, mDstFormat);
 
     if (!((src.mCropLeft & 1) == 0
@@ -311,84 +319,120 @@
     return OK;
 }
 
-void ColorConverter::writeToDst(
-        void *dst_ptr, uint8_t *kAdjustedClip, bool uncropped,
-        signed r1, signed g1, signed b1,
-        signed r2, signed g2, signed b2) {
-    switch (mDstFormat) {
+std::function<void (void *, void *, void *, size_t,
+                    signed *, signed *, signed *, signed *)>
+getReadFromSrc(OMX_COLOR_FORMATTYPE srcFormat) {
+    switch(srcFormat) {
+    case OMX_COLOR_FormatYUV420Planar:
+        return [](void *src_y, void *src_u, void *src_v, size_t x,
+                  signed *y1, signed *y2, signed *u, signed *v) {
+            *y1 = ((uint8_t*)src_y)[x] - 16;
+            *y2 = ((uint8_t*)src_y)[x + 1] - 16;
+            *u = ((uint8_t*)src_u)[x / 2] - 128;
+            *v = ((uint8_t*)src_v)[x / 2] - 128;
+        };
+    case OMX_COLOR_FormatYUV420Planar16:
+        return [](void *src_y, void *src_u, void *src_v, size_t x,
+                signed *y1, signed *y2, signed *u, signed *v) {
+            *y1 = (signed)(((uint16_t*)src_y)[x] >> 2) - 16;
+            *y2 = (signed)(((uint16_t*)src_y)[x + 1] >> 2) - 16;
+            *u = (signed)(((uint16_t*)src_u)[x / 2] >> 2) - 128;
+            *v = (signed)(((uint16_t*)src_v)[x / 2] >> 2) - 128;
+        };
+    default:
+        TRESPASS();
+    }
+    return nullptr;
+}
+
+std::function<void (void *, bool, signed, signed, signed, signed, signed, signed)>
+getWriteToDst(OMX_COLOR_FORMATTYPE dstFormat, uint8_t *kAdjustedClip) {
+    switch (dstFormat) {
     case OMX_COLOR_Format16bitRGB565:
     {
-        uint32_t rgb1 =
-            ((kAdjustedClip[r1] >> 3) << 11)
-            | ((kAdjustedClip[g1] >> 2) << 5)
-            | (kAdjustedClip[b1] >> 3);
+        return [kAdjustedClip](void *dst_ptr, bool uncropped,
+                               signed r1, signed g1, signed b1,
+                               signed r2, signed g2, signed b2) {
+            uint32_t rgb1 =
+                ((kAdjustedClip[r1] >> 3) << 11)
+                | ((kAdjustedClip[g1] >> 2) << 5)
+                | (kAdjustedClip[b1] >> 3);
 
-        if (uncropped) {
-            uint32_t rgb2 =
-                ((kAdjustedClip[r2] >> 3) << 11)
-                | ((kAdjustedClip[g2] >> 2) << 5)
-                | (kAdjustedClip[b2] >> 3);
+            if (uncropped) {
+                uint32_t rgb2 =
+                    ((kAdjustedClip[r2] >> 3) << 11)
+                    | ((kAdjustedClip[g2] >> 2) << 5)
+                    | (kAdjustedClip[b2] >> 3);
 
-            *(uint32_t *)dst_ptr = (rgb2 << 16) | rgb1;
-        } else {
-            *(uint16_t *)dst_ptr = rgb1;
-        }
-        break;
+                *(uint32_t *)dst_ptr = (rgb2 << 16) | rgb1;
+            } else {
+                *(uint16_t *)dst_ptr = rgb1;
+            }
+        };
     }
     case OMX_COLOR_Format32BitRGBA8888:
     {
-        ((uint32_t *)dst_ptr)[0] =
-                (kAdjustedClip[r1])
-                | (kAdjustedClip[g1] << 8)
-                | (kAdjustedClip[b1] << 16)
-                | (0xFF << 24);
-
-        if (uncropped) {
-            ((uint32_t *)dst_ptr)[1] =
-                    (kAdjustedClip[r2])
-                    | (kAdjustedClip[g2] << 8)
-                    | (kAdjustedClip[b2] << 16)
+        return [kAdjustedClip](void *dst_ptr, bool uncropped,
+                               signed r1, signed g1, signed b1,
+                               signed r2, signed g2, signed b2) {
+            ((uint32_t *)dst_ptr)[0] =
+                    (kAdjustedClip[r1])
+                    | (kAdjustedClip[g1] << 8)
+                    | (kAdjustedClip[b1] << 16)
                     | (0xFF << 24);
-        }
-        break;
+
+            if (uncropped) {
+                ((uint32_t *)dst_ptr)[1] =
+                        (kAdjustedClip[r2])
+                        | (kAdjustedClip[g2] << 8)
+                        | (kAdjustedClip[b2] << 16)
+                        | (0xFF << 24);
+            }
+        };
     }
     case OMX_COLOR_Format32bitBGRA8888:
     {
-        ((uint32_t *)dst_ptr)[0] =
-                (kAdjustedClip[b1])
-                | (kAdjustedClip[g1] << 8)
-                | (kAdjustedClip[r1] << 16)
-                | (0xFF << 24);
-
-        if (uncropped) {
-            ((uint32_t *)dst_ptr)[1] =
-                    (kAdjustedClip[b2])
-                    | (kAdjustedClip[g2] << 8)
-                    | (kAdjustedClip[r2] << 16)
+        return [kAdjustedClip](void *dst_ptr, bool uncropped,
+                               signed r1, signed g1, signed b1,
+                               signed r2, signed g2, signed b2) {
+            ((uint32_t *)dst_ptr)[0] =
+                    (kAdjustedClip[b1])
+                    | (kAdjustedClip[g1] << 8)
+                    | (kAdjustedClip[r1] << 16)
                     | (0xFF << 24);
-        }
-        break;
+
+            if (uncropped) {
+                ((uint32_t *)dst_ptr)[1] =
+                        (kAdjustedClip[b2])
+                        | (kAdjustedClip[g2] << 8)
+                        | (kAdjustedClip[r2] << 16)
+                        | (0xFF << 24);
+            }
+        };
     }
     default:
-        break;
+        TRESPASS();
     }
+    return nullptr;
 }
+
 status_t ColorConverter::convertYUV420Planar(
         const BitmapParams &src, const BitmapParams &dst) {
     uint8_t *kAdjustedClip = initClip();
 
+    auto readFromSrc = getReadFromSrc(mSrcFormat);
+    auto writeToDst = getWriteToDst(mDstFormat, kAdjustedClip);
+
     uint8_t *dst_ptr = (uint8_t *)dst.mBits
-        + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
+            + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
 
-    const uint8_t *src_y =
-        (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
+    uint8_t *src_y = (uint8_t *)src.mBits
+            + src.mCropTop * src.mStride + src.mCropLeft * src.mBpp;
 
-    const uint8_t *src_u =
-        (const uint8_t *)src.mBits + src.mStride * src.mHeight
-        + (src.mCropTop / 2) * (src.mStride / 2) + src.mCropLeft / 2;
+    uint8_t *src_u = (uint8_t *)src.mBits + src.mStride * src.mHeight
+            + (src.mCropTop / 2) * (src.mStride / 2) + src.mCropLeft / 2 * src.mBpp;
 
-    const uint8_t *src_v =
-        src_u + (src.mStride / 2) * (src.mHeight / 2);
+    uint8_t *src_v = src_u + (src.mStride / 2) * (src.mHeight / 2);
 
     for (size_t y = 0; y < src.cropHeight(); ++y) {
         for (size_t x = 0; x < src.cropWidth(); x += 2) {
@@ -410,11 +454,8 @@
 
             // clip range -278 .. 535
 
-            signed y1 = (signed)src_y[x] - 16;
-            signed y2 = (signed)src_y[x + 1] - 16;
-
-            signed u = (signed)src_u[x / 2] - 128;
-            signed v = (signed)src_v[x / 2] - 128;
+            signed y1, y2, u, v;
+            readFromSrc(src_y, src_u, src_v, x, &y1, &y2, &u, &v);
 
             signed u_b = u * 517;
             signed u_g = -u * 100;
@@ -432,8 +473,7 @@
             signed r2 = (tmp2 + v_r) / 256;
 
             bool uncropped = x + 1 < src.cropWidth();
-            (void)writeToDst(dst_ptr + x * dst.mBpp,
-                    kAdjustedClip, uncropped, r1, g1, b1, r2, g2, b2);
+            writeToDst(dst_ptr + x * dst.mBpp, uncropped, r1, g1, b1, r2, g2, b2);
         }
 
         src_y += src.mStride;
@@ -449,6 +489,15 @@
     return OK;
 }
 
+status_t ColorConverter::convertYUV420Planar16(
+        const BitmapParams &src, const BitmapParams &dst) {
+    if (mDstFormat == OMX_COLOR_FormatYUV444Y410) {
+        return convertYUV420Planar16ToY410(src, dst);
+    }
+
+    return convertYUV420Planar(src, dst);
+}
+
 /*
  * Pack 10-bit YUV into RGBA_1010102.
  *
@@ -480,7 +529,7 @@
 
 #if !USE_NEON_Y410
 
-status_t ColorConverter::convertYUV420Planar16(
+status_t ColorConverter::convertYUV420Planar16ToY410(
         const BitmapParams &src, const BitmapParams &dst) {
     uint8_t *dst_ptr = (uint8_t *)dst.mBits
         + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -554,7 +603,7 @@
 
 #else
 
-status_t ColorConverter::convertYUV420Planar16(
+status_t ColorConverter::convertYUV420Planar16ToY410(
         const BitmapParams &src, const BitmapParams &dst) {
     uint8_t *out = (uint8_t *)dst.mBits
         + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -749,15 +798,15 @@
 
     uint8_t *kAdjustedClip = initClip();
 
-    uint16_t *dst_ptr = (uint16_t *)dst.mBits
-        + dst.mCropTop * dst.mWidth + dst.mCropLeft;
+    uint16_t *dst_ptr = (uint16_t *)((uint8_t *)
+            dst.mBits + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp);
 
     const uint8_t *src_y =
-        (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
+        (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
 
     const uint8_t *src_u =
-        (const uint8_t *)src_y + src.mWidth * src.mHeight
-        + src.mCropTop * src.mWidth + src.mCropLeft;
+        (const uint8_t *)src.mBits + src.mHeight * src.mStride +
+        src.mCropTop * src.mStride / 2 + src.mCropLeft;
 
     for (size_t y = 0; y < src.cropHeight(); ++y) {
         for (size_t x = 0; x < src.cropWidth(); x += 2) {
@@ -799,13 +848,13 @@
             }
         }
 
-        src_y += src.mWidth;
+        src_y += src.mStride;
 
         if (y & 1) {
-            src_u += src.mWidth;
+            src_u += src.mStride;
         }
 
-        dst_ptr += dst.mWidth;
+        dst_ptr = (uint16_t*)((uint8_t*)dst_ptr + dst.mStride);
     }
 
     return OK;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 838bc5f..359df3d 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -44,6 +44,7 @@
       mNativeWindow(nativeWindow),
       mWidth(0),
       mHeight(0),
+      mStride(0),
       mCropLeft(0),
       mCropTop(0),
       mCropRight(0),
@@ -67,9 +68,10 @@
     int32_t colorFormatNew;
     CHECK(format->findInt32("color-format", &colorFormatNew));
 
-    int32_t widthNew, heightNew;
-    CHECK(format->findInt32("stride", &widthNew));
+    int32_t widthNew, heightNew, strideNew;
+    CHECK(format->findInt32("width", &widthNew));
     CHECK(format->findInt32("slice-height", &heightNew));
+    CHECK(format->findInt32("stride", &strideNew));
 
     int32_t cropLeftNew, cropTopNew, cropRightNew, cropBottomNew;
     if (!format->findRect(
@@ -79,13 +81,26 @@
         cropBottomNew = heightNew - 1;
     }
 
+    // The native window buffer format for high-bitdepth content could
+    // depend on the dataspace also.
+    android_dataspace dataSpace;
+    bool dataSpaceChangedForPlanar16 = false;
+    if (colorFormatNew == OMX_COLOR_FormatYUV420Planar16
+            && format->findInt32("android._dataspace", (int32_t *)&dataSpace)
+            && dataSpace != mDataSpace) {
+        // Do not modify mDataSpace here, it's only modified at last
+        // when we do native_window_set_buffers_data_space().
+        dataSpaceChangedForPlanar16 = true;
+    }
+
     if (static_cast<int32_t>(mColorFormat) == colorFormatNew &&
         mWidth == widthNew &&
         mHeight == heightNew &&
         mCropLeft == cropLeftNew &&
         mCropTop == cropTopNew &&
         mCropRight == cropRightNew &&
-        mCropBottom == cropBottomNew) {
+        mCropBottom == cropBottomNew &&
+        !dataSpaceChangedForPlanar16) {
         // Nothing changed, no need to reset renderer.
         return;
     }
@@ -93,6 +108,7 @@
     mColorFormat = static_cast<OMX_COLOR_FORMATTYPE>(colorFormatNew);
     mWidth = widthNew;
     mHeight = heightNew;
+    mStride = strideNew;
     mCropLeft = cropLeftNew;
     mCropTop = cropTopNew;
     mCropRight = cropRightNew;
@@ -135,11 +151,16 @@
             }
             case OMX_COLOR_FormatYUV420Planar16:
             {
-                // Here we would convert OMX_COLOR_FormatYUV420Planar16 into
-                // OMX_COLOR_FormatYUV444Y410, and put it inside a buffer with
-                // format HAL_PIXEL_FORMAT_RGBA_1010102. Surfaceflinger will
-                // use render engine to convert it to RGB if needed.
-                halFormat = HAL_PIXEL_FORMAT_RGBA_1010102;
+                if (((dataSpace & HAL_DATASPACE_STANDARD_MASK) == HAL_DATASPACE_STANDARD_BT2020)
+                 && ((dataSpace & HAL_DATASPACE_TRANSFER_MASK) == HAL_DATASPACE_TRANSFER_ST2084)) {
+                    // Here we would convert OMX_COLOR_FormatYUV420Planar16 into
+                    // OMX_COLOR_FormatYUV444Y410, and put it inside a buffer with
+                    // format HAL_PIXEL_FORMAT_RGBA_1010102. Surfaceflinger will
+                    // use render engine to convert it to RGB if needed.
+                    halFormat = HAL_PIXEL_FORMAT_RGBA_1010102;
+                } else {
+                    halFormat = HAL_PIXEL_FORMAT_YV12;
+                }
                 bufWidth = (mCropWidth + 1) & ~1;
                 bufHeight = (mCropHeight + 1) & ~1;
                 break;
@@ -155,7 +176,7 @@
         mConverter = new ColorConverter(
                 mColorFormat, OMX_COLOR_Format16bitRGB565);
         CHECK(mConverter->isValid());
-    } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
+    } else if (halFormat == HAL_PIXEL_FORMAT_RGBA_1010102) {
         mConverter = new ColorConverter(
                 mColorFormat, OMX_COLOR_FormatYUV444Y410);
         CHECK(mConverter->isValid());
@@ -258,20 +279,15 @@
     if (mConverter) {
         mConverter->convert(
                 data,
-                mWidth, mHeight,
+                mWidth, mHeight, mStride,
                 mCropLeft, mCropTop, mCropRight, mCropBottom,
                 dst,
-                buf->stride, buf->height,
+                buf->stride, buf->height, 0,
                 0, 0, mCropWidth - 1, mCropHeight - 1);
     } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
-        const uint8_t *src_y = (const uint8_t *)data;
-        const uint8_t *src_u =
-                (const uint8_t *)data + mWidth * mHeight;
-        const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
-
-        src_y +=mCropLeft + mCropTop * mWidth;
-        src_u +=(mCropLeft + mCropTop * mWidth / 2)/2;
-        src_v +=(mCropLeft + mCropTop * mWidth / 2)/2;
+        const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft;
+        const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+        const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
 
         uint8_t *dst_y = (uint8_t *)dst;
         size_t dst_y_size = buf->stride * buf->height;
@@ -287,7 +303,7 @@
         for (int y = 0; y < mCropHeight; ++y) {
             memcpy(dst_y, src_y, mCropWidth);
 
-            src_y += mWidth;
+            src_y += mStride;
             dst_y += buf->stride;
         }
 
@@ -295,8 +311,44 @@
             memcpy(dst_u, src_u, (mCropWidth + 1) / 2);
             memcpy(dst_v, src_v, (mCropWidth + 1) / 2);
 
-            src_u += mWidth / 2;
-            src_v += mWidth / 2;
+            src_u += mStride / 2;
+            src_v += mStride / 2;
+            dst_u += dst_c_stride;
+            dst_v += dst_c_stride;
+        }
+    } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
+        const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft * 2;
+        const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+        const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
+
+        uint8_t *dst_y = (uint8_t *)dst;
+        size_t dst_y_size = buf->stride * buf->height;
+        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
+        size_t dst_c_size = dst_c_stride * buf->height / 2;
+        uint8_t *dst_v = dst_y + dst_y_size;
+        uint8_t *dst_u = dst_v + dst_c_size;
+
+        dst_y += mCropTop * buf->stride + mCropLeft;
+        dst_v += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
+        dst_u += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
+
+        for (int y = 0; y < mCropHeight; ++y) {
+            for (int x = 0; x < mCropWidth; ++x) {
+                dst_y[x] = (uint8_t)(((uint16_t *)src_y)[x] >> 2);
+            }
+
+            src_y += mStride;
+            dst_y += buf->stride;
+        }
+
+        for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
+            for (int x = 0; x < (mCropWidth + 1) / 2; ++x) {
+                dst_u[x] = (uint8_t)(((uint16_t *)src_u)[x] >> 2);
+                dst_v[x] = (uint8_t)(((uint16_t *)src_v)[x] >> 2);
+            }
+
+            src_u += mStride / 2;
+            src_v += mStride / 2;
             dst_u += dst_c_stride;
             dst_v += dst_c_stride;
         }
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index b86f4ad..0b554a2 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -16,77 +16,77 @@
 
 <Included>
     <Decoders>
-        <MediaCodec name="c2.google.mp3.decoder" type="audio/mpeg">
+        <MediaCodec name="c2.android.mp3.decoder" type="audio/mpeg">
             <Limit name="channel-count" max="2" />
             <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
             <Limit name="bitrate" range="8000-320000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.amrnb.decoder" type="audio/3gpp">
+        <MediaCodec name="c2.android.amrnb.decoder" type="audio/3gpp">
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000" />
             <Limit name="bitrate" range="4750-12200" />
         </MediaCodec>
-        <MediaCodec name="c2.google.amrwb.decoder" type="audio/amr-wb">
+        <MediaCodec name="c2.android.amrwb.decoder" type="audio/amr-wb">
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="16000" />
             <Limit name="bitrate" range="6600-23850" />
         </MediaCodec>
-        <MediaCodec name="c2.google.aac.decoder" type="audio/mp4a-latm">
+        <MediaCodec name="c2.android.aac.decoder" type="audio/mp4a-latm">
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="7350,8000,11025,12000,16000,22050,24000,32000,44100,48000" />
             <Limit name="bitrate" range="8000-960000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.g711.alaw.decoder" type="audio/g711-alaw">
+        <MediaCodec name="c2.android.g711.alaw.decoder" type="audio/g711-alaw">
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000-48000" />
             <Limit name="bitrate" range="64000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.g711.mlaw.decoder" type="audio/g711-mlaw">
+        <MediaCodec name="c2.android.g711.mlaw.decoder" type="audio/g711-mlaw">
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000-48000" />
             <Limit name="bitrate" range="64000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.vorbis.decoder" type="audio/vorbis">
+        <MediaCodec name="c2.android.vorbis.decoder" type="audio/vorbis">
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="8000-96000" />
             <Limit name="bitrate" range="32000-500000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.opus.decoder" type="audio/opus">
+        <MediaCodec name="c2.android.opus.decoder" type="audio/opus">
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="48000" />
             <Limit name="bitrate" range="6000-510000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.raw.decoder" type="audio/raw">
+        <MediaCodec name="c2.android.raw.decoder" type="audio/raw">
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="8000-96000" />
             <Limit name="bitrate" range="1-10000000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.flac.decoder" type="audio/flac">
+        <MediaCodec name="c2.android.flac.decoder" type="audio/flac">
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="1-655350" />
             <Limit name="bitrate" range="1-21000000" />
         </MediaCodec>
     </Decoders>
     <Encoders>
-        <MediaCodec name="c2.google.aac.encoder" type="audio/mp4a-latm">
+        <MediaCodec name="c2.android.aac.encoder" type="audio/mp4a-latm">
             <Limit name="channel-count" max="6" />
             <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
             <!-- also may support 64000, 88200  and 96000 Hz -->
             <Limit name="bitrate" range="8000-960000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.amrnb.encoder" type="audio/3gpp">
+        <MediaCodec name="c2.android.amrnb.encoder" type="audio/3gpp">
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000" />
             <Limit name="bitrate" range="4750-12200" />
             <Feature name="bitrate-modes" value="CBR" />
         </MediaCodec>
-        <MediaCodec name="c2.google.amrwb.encoder" type="audio/amr-wb">
+        <MediaCodec name="c2.android.amrwb.encoder" type="audio/amr-wb">
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="16000" />
             <Limit name="bitrate" range="6600-23850" />
             <Feature name="bitrate-modes" value="CBR" />
         </MediaCodec>
-        <MediaCodec name="c2.google.flac.encoder" type="audio/flac">
+        <MediaCodec name="c2.android.flac.encoder" type="audio/flac">
             <Limit name="channel-count" max="2" />
             <Limit name="sample-rate" ranges="1-655350" />
             <Limit name="bitrate" range="1-21000000" />
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 593463b..adb45b3 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -16,7 +16,7 @@
 
 <Included>
     <Decoders>
-        <MediaCodec name="c2.google.mpeg4.decoder" type="video/mp4v-es">
+        <MediaCodec name="c2.android.mpeg4.decoder" type="video/mp4v-es">
             <!-- profiles and levels:  ProfileSimple : Level3 -->
             <Limit name="size" min="2x2" max="352x288" />
             <Limit name="alignment" value="2x2" />
@@ -25,7 +25,7 @@
             <Limit name="bitrate" range="1-384000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.google.h263.decoder" type="video/3gpp">
+        <MediaCodec name="c2.android.h263.decoder" type="video/3gpp">
             <!-- profiles and levels:  ProfileBaseline : Level30, ProfileBaseline : Level45
                     ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
             <Limit name="size" min="2x2" max="352x288" />
@@ -33,7 +33,7 @@
             <Limit name="bitrate" range="1-384000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.google.avc.decoder" type="video/avc">
+        <MediaCodec name="c2.android.avc.decoder" type="video/avc">
             <!-- profiles and levels:  ProfileHigh : Level52 -->
             <Limit name="size" min="2x2" max="4080x4080" />
             <Limit name="alignment" value="2x2" />
@@ -43,7 +43,7 @@
             <Limit name="bitrate" range="1-48000000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.google.hevc.decoder" type="video/hevc">
+        <MediaCodec name="c2.android.hevc.decoder" type="video/hevc">
             <!-- profiles and levels:  ProfileMain : MainTierLevel51 -->
             <Limit name="size" min="2x2" max="4096x4096" />
             <Limit name="alignment" value="2x2" />
@@ -53,7 +53,7 @@
             <Limit name="bitrate" range="1-10000000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+        <MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8">
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
@@ -62,7 +62,7 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+        <MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9">
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
@@ -74,13 +74,13 @@
     </Decoders>
 
     <Encoders>
-        <MediaCodec name="c2.google.h263.encoder" type="video/3gpp">
+        <MediaCodec name="c2.android.h263.encoder" type="video/3gpp">
             <!-- profiles and levels:  ProfileBaseline : Level45 -->
             <Limit name="size" min="176x144" max="176x144" />
             <Limit name="alignment" value="16x16" />
             <Limit name="bitrate" range="1-128000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.avc.encoder" type="video/avc">
+        <MediaCodec name="c2.android.avc.encoder" type="video/avc">
             <!-- profiles and levels:  ProfileBaseline : Level41 -->
             <Limit name="size" min="16x16" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
@@ -90,7 +90,7 @@
             <Limit name="bitrate" range="1-12000000" />
             <Feature name="intra-refresh" />
         </MediaCodec>
-        <MediaCodec name="c2.google.mpeg4.encoder" type="video/mp4v-es">
+        <MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
             <!-- profiles and levels:  ProfileCore : Level2 -->
             <Limit name="size" min="16x16" max="176x144" />
             <Limit name="alignment" value="16x16" />
@@ -98,7 +98,7 @@
             <Limit name="blocks-per-second" range="12-1485" />
             <Limit name="bitrate" range="1-64000" />
         </MediaCodec>
-        <MediaCodec name="c2.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+        <MediaCodec name="c2.android.vp8.encoder" type="video/x-vnd.on2.vp8">
             <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
@@ -108,7 +108,7 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="bitrate-modes" value="VBR,CBR" />
         </MediaCodec>
-        <MediaCodec name="c2.google.vp9.encoder" type="video/x-vnd.on2.vp9">
+        <MediaCodec name="c2.android.vp9.encoder" type="video/x-vnd.on2.vp9">
             <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 3b84018..df66ac6 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -116,6 +116,7 @@
         default:
             break;
     }
+    item->mType = kTypeInt32; // clear type
 }
 
 #ifdef DUMP_STATS
@@ -196,6 +197,7 @@
         CHECK(mNumItems < kMaxNumItems);
         i = mNumItems++;
         item = &mItems[i];
+        item->mType = kTypeInt32;
         item->setName(name, len);
     }
 
@@ -944,6 +946,40 @@
     return mItems[index].mName;
 }
 
+AMessage::ItemData AMessage::getEntryAt(size_t index) const {
+    ItemData it;
+    if (index < mNumItems) {
+        switch (mItems[index].mType) {
+            case kTypeInt32:    it.set(mItems[index].u.int32Value); break;
+            case kTypeInt64:    it.set(mItems[index].u.int64Value); break;
+            case kTypeSize:     it.set(mItems[index].u.sizeValue); break;
+            case kTypeFloat:    it.set(mItems[index].u.floatValue); break;
+            case kTypeDouble:   it.set(mItems[index].u.doubleValue); break;
+            case kTypePointer:  it.set(mItems[index].u.ptrValue); break;
+            case kTypeRect:     it.set(mItems[index].u.rectValue); break;
+            case kTypeString:   it.set(*mItems[index].u.stringValue); break;
+            case kTypeObject: {
+                sp<RefBase> obj = mItems[index].u.refValue;
+                it.set(obj);
+                break;
+            }
+            case kTypeMessage: {
+                sp<AMessage> msg = static_cast<AMessage *>(mItems[index].u.refValue);
+                it.set(msg);
+                break;
+            }
+            case kTypeBuffer: {
+                sp<ABuffer> buf = static_cast<ABuffer *>(mItems[index].u.refValue);
+                it.set(buf);
+                break;
+            }
+            default:
+                break;
+        }
+    }
+    return it;
+}
+
 status_t AMessage::setEntryNameAt(size_t index, const char *name) {
     if (index >= mNumItems) {
         return BAD_INDEX;
@@ -964,6 +1000,60 @@
     return OK;
 }
 
+status_t AMessage::setEntryAt(size_t index, const ItemData &item) {
+    AString stringValue;
+    sp<RefBase> refValue;
+    sp<AMessage> msgValue;
+    sp<ABuffer> bufValue;
+
+    if (index >= mNumItems) {
+        return BAD_INDEX;
+    }
+    if (!item.used()) {
+        return BAD_VALUE;
+    }
+    Item *dst = &mItems[index];
+    freeItemValue(dst);
+
+    // some values can be directly set with the getter. others need items to be allocated
+    if (item.find(&dst->u.int32Value)) {
+        dst->mType = kTypeInt32;
+    } else if (item.find(&dst->u.int64Value)) {
+        dst->mType = kTypeInt64;
+    } else if (item.find(&dst->u.sizeValue)) {
+        dst->mType = kTypeSize;
+    } else if (item.find(&dst->u.floatValue)) {
+        dst->mType = kTypeFloat;
+    } else if (item.find(&dst->u.doubleValue)) {
+        dst->mType = kTypeDouble;
+    } else if (item.find(&dst->u.ptrValue)) {
+        dst->mType = kTypePointer;
+    } else if (item.find(&dst->u.rectValue)) {
+        dst->mType = kTypeRect;
+    } else if (item.find(&stringValue)) {
+        dst->u.stringValue = new AString(stringValue);
+        dst->mType = kTypeString;
+    } else if (item.find(&refValue)) {
+        if (refValue != NULL) { refValue->incStrong(this); }
+        dst->u.refValue = refValue.get();
+        dst->mType = kTypeObject;
+    } else if (item.find(&msgValue)) {
+        if (msgValue != NULL) { msgValue->incStrong(this); }
+        dst->u.refValue = msgValue.get();
+        dst->mType = kTypeMessage;
+    } else if (item.find(&bufValue)) {
+        if (bufValue != NULL) { bufValue->incStrong(this); }
+        dst->u.refValue = bufValue.get();
+        dst->mType = kTypeBuffer;
+    } else {
+        // unsupported item - we should not be here.
+        dst->mType = kTypeInt32;
+        dst->u.int32Value = 0xDEADDEAD;
+        return BAD_TYPE;
+    }
+    return OK;
+}
+
 status_t AMessage::removeEntryAt(size_t index) {
     if (index >= mNumItems) {
         return BAD_INDEX;
@@ -983,6 +1073,34 @@
     return OK;
 }
 
+void AMessage::setItem(const char *name, const ItemData &item) {
+    if (item.used()) {
+        Item *it = allocateItem(name);
+        if (it != nullptr) {
+            setEntryAt(it - mItems, item);
+        }
+    }
+}
+
+AMessage::ItemData AMessage::findItem(const char *name) const {
+    return getEntryAt(findEntryByName(name));
+}
+
+void AMessage::extend(const sp<AMessage> &other) {
+    // ignore null messages
+    if (other == nullptr) {
+        return;
+    }
+
+    for (size_t ix = 0; ix < other->mNumItems; ++ix) {
+        Item *it = allocateItem(other->mItems[ix].mName);
+        if (it != nullptr) {
+            ItemData data = other->getEntryAt(ix);
+            setEntryAt(it - mItems, data);
+        }
+    }
+}
+
 size_t AMessage::findEntryByName(const char *name) const {
     return name == nullptr ? countEntries() : findItemIndex(name, strlen(name));
 }
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index 04fac19..c6ef75f 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -31,7 +31,7 @@
 namespace android {
 
 // static
-const char *AString::kEmptyString = "";
+constexpr const char *AString::kEmptyString;
 
 AString::AString()
     : mData((char *)kEmptyString),
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index f663542..6b384c0 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -10,6 +10,7 @@
     vndk: {
         enabled: true,
     },
+    double_loadable: true,
     include_dirs: [
         "frameworks/av/include",
         "frameworks/native/include",
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
index 49aa0dc..85e4378 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
@@ -25,6 +25,9 @@
 #include <media/stagefright/foundation/TypeTraits.h>
 #include <media/stagefright/foundation/Flagged.h>
 
+#undef HIDE
+#define HIDE __attribute__((visibility("hidden")))
+
 namespace android {
 
 /**
@@ -78,7 +81,7 @@
  * This class is needed as member function specialization is not allowed for a
  * templated class.
  */
-struct _AUnion_impl {
+struct HIDE _AUnion_impl {
     /**
      * Calls placement constuctor for type T with arbitrary arguments for a storage at an address.
      * Storage MUST be large enough to contain T.
@@ -113,13 +116,13 @@
 
 /** Constructor specialization for void type */
 template<>
-inline void _AUnion_impl::emplace<void>(size_t totalSize, void *addr) {
+HIDE inline void _AUnion_impl::emplace<void>(size_t totalSize, void *addr) {
     memset(addr, 0, totalSize);
 }
 
 /** Destructor specialization for void type */
 template<>
-inline void _AUnion_impl::del<void>(void *) {
+HIDE inline void _AUnion_impl::del<void>(void *) {
 }
 
 /// \endcond
@@ -221,7 +224,7 @@
 template<
         typename T,
         bool=std::is_copy_assignable<T>::value>
-struct _AData_copier {
+struct HIDE _AData_copier {
     static_assert(std::is_copy_assignable<T>::value, "T must be copy assignable here");
 
     /**
@@ -294,7 +297,7 @@
  *
  */
 template<typename T>
-struct _AData_copier<T, false> {
+struct HIDE _AData_copier<T, false> {
     static_assert(!std::is_copy_assignable<T>::value, "T must not be copy assignable here");
     static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible here");
 
@@ -318,7 +321,7 @@
 template<
         typename T,
         bool=std::is_move_assignable<T>::value>
-struct _AData_mover {
+struct HIDE _AData_mover {
     static_assert(std::is_move_assignable<T>::value, "T must be move assignable here");
 
     /**
@@ -389,7 +392,7 @@
  *
  */
 template<typename T>
-struct _AData_mover<T, false> {
+struct HIDE _AData_mover<T, false> {
     static_assert(!std::is_move_assignable<T>::value, "T must not be move assignable here");
     static_assert(std::is_move_constructible<T>::value, "T must be move constructible here");
 
@@ -407,13 +410,13 @@
  * \param Ts types to consider for the member
  */
 template<typename Flagger, typename U, typename ...Ts>
-struct _AData_deleter;
+struct HIDE _AData_deleter;
 
 /**
  * Template specialization when there are still types to consider (T and rest)
  */
 template<typename Flagger, typename U, typename T, typename ...Ts>
-struct _AData_deleter<Flagger, U, T, Ts...> {
+struct HIDE _AData_deleter<Flagger, U, T, Ts...> {
     static bool del(typename Flagger::type flags, U &data) {
         if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
             data.template del<T>();
@@ -427,13 +430,101 @@
  * Template specialization when there are no more types to consider.
  */
 template<typename Flagger, typename U>
-struct _AData_deleter<Flagger, U> {
+struct HIDE _AData_deleter<Flagger, U> {
     inline static bool del(typename Flagger::type, U &) {
         return false;
     }
 };
 
 /**
+ * Helper template that copy assigns an object of a specific type (member) in an
+ * AUnion.
+ *
+ * \param Flagger type flagger class (see AData)
+ * \param U AUnion object in which the member should be copy assigned
+ * \param Ts types to consider for the member
+ */
+template<typename Flagger, typename U, typename ...Ts>
+struct HIDE _AData_copy_assigner;
+
+/**
+ * Template specialization when there are still types to consider (T and rest)
+ */
+template<typename Flagger, typename U, typename T, typename ...Ts>
+struct HIDE _AData_copy_assigner<Flagger, U, T, Ts...> {
+    static bool assign(typename Flagger::type flags, U &dst, const U &src) {
+        static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible");
+        // if we can delete as, we can also assign as
+        if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+            dst.template emplace<T>(src.template get<T>());
+            return true;
+        }
+        return _AData_copy_assigner<Flagger, U, Ts...>::assign(flags, dst, src);
+    }
+};
+
+/**
+ * Template specialization when there are no more types to consider.
+ */
+template<typename Flagger, typename U>
+struct HIDE _AData_copy_assigner<Flagger, U> {
+    inline static bool assign(typename Flagger::type, U &, const U &) {
+        return false;
+    }
+};
+
+/**
+ * Helper template that move assigns an object of a specific type (member) in an
+ * AUnion.
+ *
+ * \param Flagger type flagger class (see AData)
+ * \param U AUnion object in which the member should be copy assigned
+ * \param Ts types to consider for the member
+ */
+template<typename Flagger, typename U, typename ...Ts>
+struct HIDE _AData_move_assigner;
+
+/**
+ * Template specialization when there are still types to consider (T and rest)
+ */
+template<typename Flagger, typename U, typename T, typename ...Ts>
+struct HIDE _AData_move_assigner<Flagger, U, T, Ts...> {
+    template<typename V = T>
+    static typename std::enable_if<std::is_move_constructible<V>::value, bool>::type
+    assign(typename Flagger::type flags, U &dst, U &src) {
+        // if we can delete as, we can also assign as
+        if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+            dst.template emplace<T>(std::move(src.template get<T>()));
+            return true;
+        }
+        return _AData_move_assigner<Flagger, U, Ts...>::assign(flags, dst, src);
+    }
+
+    // Fall back to copy construction if T is not move constructible
+    template<typename V = T>
+    static typename std::enable_if<!std::is_move_constructible<V>::value, bool>::type
+    assign(typename Flagger::type flags, U &dst, U &src) {
+        static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible");
+        // if we can delete as, we can also assign as
+        if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+            dst.template emplace<T>(src.template get<T>());
+            return true;
+        }
+        return _AData_move_assigner<Flagger, U, Ts...>::assign(flags, dst, src);
+    }
+};
+
+/**
+ * Template specialization when there are no more types to consider.
+ */
+template<typename Flagger, typename U>
+struct HIDE _AData_move_assigner<Flagger, U> {
+    inline static bool assign(typename Flagger::type, U &, U &) {
+        return false;
+    }
+};
+
+/**
  * Container that can store an arbitrary object of a set of specified types.
  *
  * This struct is an outer class that contains various inner classes based on desired type
@@ -654,6 +745,61 @@
         Custom() : base_t(Flagger::flagFor((void*)0)) { }
 
         /**
+         * Copy assignment operator.
+         */
+        Custom& operator=(const Custom &o) {
+            if (&o != this) {
+                if (this->used() && !this->clear()) {
+                    __builtin_trap();
+                }
+                if (o.used()) {
+                    if (_AData_copy_assigner<Flagger, data_t, Ts...>::assign(
+                            o.flags(), this->get(), o.get())) {
+                        this->setFlags(o.flags());
+                    } else {
+                        __builtin_trap();
+                    }
+                }
+            }
+            return *this;
+        }
+
+        /**
+         * Copy constructor.
+         */
+        Custom(const Custom &o) : Custom() {
+            *this = o;
+        }
+
+        /**
+         * Move assignment operator.
+         */
+        Custom& operator=(Custom &&o) {
+            if (&o != this) {
+                if (this->used() && !this->clear()) {
+                    __builtin_trap();
+                }
+                if (o.used()) {
+                    if (_AData_move_assigner<Flagger, data_t, Ts...>::assign(
+                            o.flags(), this->get(), o.get())) {
+                        this->setFlags(o.flags());
+                        o.clear();
+                    } else {
+                        __builtin_trap();
+                    }
+                }
+            }
+            return *this;
+        }
+
+        /**
+         * Move constructor.
+         */
+        Custom(Custom &&o) : Custom() {
+            *this = std::move(o);
+        }
+
+        /**
          * Removes the contained object, if any.
          */
         ~Custom() {
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index d90a0de..742651e 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -19,6 +19,7 @@
 #define A_MESSAGE_H_
 
 #include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AData.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
@@ -155,6 +156,9 @@
     // their refcount incremented.
     sp<AMessage> dup() const;
 
+    // Adds all items from other into this.
+    void extend(const sp<AMessage> &other);
+
     // Performs a shallow or deep comparison of |this| and |other| and returns
     // an AMessage with the differences.
     // Warning: RefBase items, i.e. "objects" are _not_ copied but only have
@@ -180,10 +184,39 @@
         kTypeBuffer,
     };
 
+    struct Rect {
+        int32_t mLeft, mTop, mRight, mBottom;
+    };
+
     size_t countEntries() const;
     const char *getEntryNameAt(size_t index, Type *type) const;
 
     /**
+     * Retrieves the item at a specific index.
+     */
+    typedef AData<
+        int32_t, int64_t, size_t, float, double, Rect, AString,
+        void *, sp<AMessage>, sp<ABuffer>, sp<RefBase>>::Basic ItemData;
+
+    /**
+     * Finds an item by name. This can be used if the type is unknown.
+     *
+     * \param name name of the item
+     * Returns an empty item if no item is present with that name.
+     */
+    ItemData findItem(const char *name) const;
+
+    /**
+     * Sets an item of arbitrary type. Does nothing if the item value is empty.
+     *
+     * \param name name of the item
+     * \param item value of the item
+     */
+    void setItem(const char *name, const ItemData &item);
+
+    ItemData getEntryAt(size_t index) const;
+
+    /**
      * Finds an entry by name and returns its index.
      *
      * \retval countEntries() if the entry is not found.
@@ -204,6 +237,19 @@
     status_t setEntryNameAt(size_t index, const char *name);
 
     /**
+     * Sets the item of an entry based on index.
+     *
+     * \param index index of the entry
+     * \param item new item of the entry
+     *
+     * \retval OK the item was set successfully
+     * \retval BAD_INDEX invalid index
+     * \retval BAD_VALUE item is invalid (null)
+     * \retval BAD_TYPE type is unsupported (should not happen)
+     */
+    status_t setEntryAt(size_t index, const ItemData &item);
+
+    /**
      * Removes an entry based on index.
      *
      * \param index index of the entry
@@ -227,10 +273,6 @@
     wp<AHandler> mHandler;
     wp<ALooper> mLooper;
 
-    struct Rect {
-        int32_t mLeft, mTop, mRight, mBottom;
-    };
-
     struct Item {
         union {
             int32_t int32Value;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
index ff086b3..0f6299c 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
@@ -93,7 +93,7 @@
     status_t writeToParcel(Parcel *parcel) const;
 
 private:
-    static const char *kEmptyString;
+    constexpr static const char *kEmptyString = "";
 
     char *mData;
     size_t mSize;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h b/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
index 1250e9b..2041b22 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
@@ -19,6 +19,9 @@
 
 #include <type_traits>
 
+#undef HIDE
+#define HIDE __attribute__((visibility("hidden")))
+
 namespace android {
 
 /**
@@ -31,7 +34,7 @@
  * Type support utility class to check if a type is an integral type or an enum.
  */
 template<typename T>
-struct is_integral_or_enum
+struct HIDE is_integral_or_enum
     : std::integral_constant<bool, std::is_integral<T>::value || std::is_enum<T>::value> { };
 
 /**
@@ -46,7 +49,7 @@
         typename U=typename std::enable_if<is_integral_or_enum<T>::value>::type,
         bool=std::is_enum<T>::value,
         bool=std::is_integral<T>::value>
-struct underlying_integral_type {
+struct HIDE underlying_integral_type {
     static_assert(!std::is_enum<T>::value, "T should not be enum here");
     static_assert(!std::is_integral<T>::value, "T should not be integral here");
     typedef U type;
@@ -54,7 +57,7 @@
 
 /** Specialization for enums. */
 template<typename T, typename U>
-struct underlying_integral_type<T, U, true, false> {
+struct HIDE underlying_integral_type<T, U, true, false> {
     static_assert(std::is_enum<T>::value, "T should be enum here");
     static_assert(!std::is_integral<T>::value, "T should not be integral here");
     typedef typename std::underlying_type<T>::type type;
@@ -62,7 +65,7 @@
 
 /** Specialization for non-enum std-integral types. */
 template<typename T, typename U>
-struct underlying_integral_type<T, U, false, true> {
+struct HIDE underlying_integral_type<T, U, false, true> {
     static_assert(!std::is_enum<T>::value, "T should not be enum here");
     static_assert(std::is_integral<T>::value, "T should be integral here");
     typedef T type;
@@ -72,7 +75,7 @@
  * Type support utility class to check if the underlying integral type is signed.
  */
 template<typename T>
-struct is_signed_integral
+struct HIDE is_signed_integral
     : std::integral_constant<bool, std::is_signed<
             typename underlying_integral_type<T, unsigned>::type>::value> { };
 
@@ -80,7 +83,7 @@
  * Type support utility class to check if the underlying integral type is unsigned.
  */
 template<typename T>
-struct is_unsigned_integral
+struct HIDE is_unsigned_integral
     : std::integral_constant<bool, std::is_unsigned<
             typename underlying_integral_type<T, signed>::type>::value> {
 };
@@ -92,26 +95,26 @@
  * member constant |value| equal to true. Otherwise value is false.
  */
 template<typename T, typename ...Us>
-struct is_one_of;
+struct HIDE is_one_of;
 
 /// \if 0
 /**
  * Template specialization when first type matches the searched type.
  */
 template<typename T, typename ...Us>
-struct is_one_of<T, T, Us...> : std::true_type {};
+struct HIDE is_one_of<T, T, Us...> : std::true_type {};
 
 /**
  * Template specialization when first type does not match the searched type.
  */
 template<typename T, typename U, typename ...Us>
-struct is_one_of<T, U, Us...> : is_one_of<T, Us...> {};
+struct HIDE is_one_of<T, U, Us...> : is_one_of<T, Us...> {};
 
 /**
  * Template specialization when there are no types to search.
  */
 template<typename T>
-struct is_one_of<T> : std::false_type {};
+struct HIDE is_one_of<T> : std::false_type {};
 /// \endif
 
 /**
@@ -121,44 +124,44 @@
  * Otherwise value is false.
  */
 template<typename ...Us>
-struct are_unique;
+struct HIDE are_unique;
 
 /// \if 0
 /**
  * Template specialization when there are no types.
  */
 template<>
-struct are_unique<> : std::true_type {};
+struct HIDE are_unique<> : std::true_type {};
 
 /**
  * Template specialization when there is at least one type to check.
  */
 template<typename T, typename ...Us>
-struct are_unique<T, Us...>
+struct HIDE are_unique<T, Us...>
     : std::integral_constant<bool, are_unique<Us...>::value && !is_one_of<T, Us...>::value> {};
 /// \endif
 
 /// \if 0
 template<size_t Base, typename T, typename ...Us>
-struct _find_first_impl;
+struct HIDE _find_first_impl;
 
 /**
  * Template specialization when there are no types to search.
  */
 template<size_t Base, typename T>
-struct _find_first_impl<Base, T> : std::integral_constant<size_t, 0> {};
+struct HIDE _find_first_impl<Base, T> : std::integral_constant<size_t, 0> {};
 
 /**
  * Template specialization when T is the first type in Us.
  */
 template<size_t Base, typename T, typename ...Us>
-struct _find_first_impl<Base, T, T, Us...> : std::integral_constant<size_t, Base> {};
+struct HIDE _find_first_impl<Base, T, T, Us...> : std::integral_constant<size_t, Base> {};
 
 /**
  * Template specialization when T is not the first type in Us.
  */
 template<size_t Base, typename T, typename U, typename ...Us>
-struct _find_first_impl<Base, T, U, Us...>
+struct HIDE _find_first_impl<Base, T, U, Us...>
     : std::integral_constant<size_t, _find_first_impl<Base + 1, T, Us...>::value> {};
 
 /// \endif
@@ -169,7 +172,7 @@
  * If T occurs in Us, index is the 1-based left-most index of T in Us. Otherwise, index is 0.
  */
 template<typename T, typename ...Us>
-struct find_first {
+struct HIDE find_first {
     static constexpr size_t index = _find_first_impl<1, T, Us...>::value;
 };
 
@@ -180,13 +183,13 @@
  * Adds a base index.
  */
 template<size_t Base, typename T, typename ...Us>
-struct _find_first_convertible_to_helper;
+struct HIDE _find_first_convertible_to_helper;
 
 /**
  * Template specialization for when there are more types to consider
  */
 template<size_t Base, typename T, typename U, typename ...Us>
-struct _find_first_convertible_to_helper<Base, T, U, Us...> {
+struct HIDE _find_first_convertible_to_helper<Base, T, U, Us...> {
     static constexpr size_t index =
         std::is_convertible<T, U>::value ? Base :
                 _find_first_convertible_to_helper<Base + 1, T, Us...>::index;
@@ -199,7 +202,7 @@
  * Template specialization for when there are no more types to consider
  */
 template<size_t Base, typename T>
-struct _find_first_convertible_to_helper<Base, T> {
+struct HIDE _find_first_convertible_to_helper<Base, T> {
     static constexpr size_t index = 0;
     typedef void type;
 };
@@ -216,7 +219,7 @@
  * \param Us types into which the conversion is considered
  */
 template<typename T, typename ...Us>
-struct find_first_convertible_to : public _find_first_convertible_to_helper<1, T, Us...> { };
+struct HIDE find_first_convertible_to : public _find_first_convertible_to_helper<1, T, Us...> { };
 
 }  // namespace android
 
diff --git a/media/libstagefright/foundation/tests/AData_test.cpp b/media/libstagefright/foundation/tests/AData_test.cpp
index f014c25..2628a47 100644
--- a/media/libstagefright/foundation/tests/AData_test.cpp
+++ b/media/libstagefright/foundation/tests/AData_test.cpp
@@ -978,4 +978,63 @@
     }
 };
 
+TEST_F(ADataTest, AData_AssignmentTest) {
+    typedef AData<sp<ABuffer>, int32_t>::Basic Data;
+
+    sp<ABuffer> buf1 = new ABuffer((void *)"hello", 6);
+    wp<ABuffer> buf1w = buf1;
+
+    Data obj1;
+    obj1.set(buf1);
+    EXPECT_NE(buf1w.promote(), nullptr);
+    buf1.clear();
+    EXPECT_NE(buf1w.promote(), nullptr);
+    obj1.clear();
+    EXPECT_EQ(buf1w.promote(), nullptr);
+
+    buf1 = new ABuffer((void *)"again", 6);
+    buf1w = buf1;
+
+    obj1.set(buf1);
+    EXPECT_TRUE(obj1.used());
+    Data obj2 = obj1;
+
+    sp<ABuffer> buf2;
+    EXPECT_TRUE(obj2.find(&buf2));
+    EXPECT_EQ(buf2, buf1);
+    buf1.clear();
+    buf2.clear();
+    EXPECT_NE(buf1w.promote(), nullptr);
+    obj1.clear();
+    EXPECT_NE(buf1w.promote(), nullptr);
+    obj2.clear();
+    EXPECT_EQ(buf1w.promote(), nullptr);
+
+    buf1 = new ABuffer((void *)"still", 6);
+    buf1w = buf1;
+
+    obj1.set(buf1);
+    EXPECT_TRUE(obj1.used());
+    obj2 = std::move(obj1);
+    EXPECT_FALSE(obj1.used());
+
+    EXPECT_TRUE(obj2.find(&buf2));
+    EXPECT_EQ(buf2, buf1);
+    buf1.clear();
+    buf2.clear();
+    EXPECT_NE(buf1w.promote(), nullptr);
+    obj2.clear();
+    EXPECT_EQ(buf1w.promote(), nullptr);
+
+    typedef AData<sp<ABuffer>, std::unique_ptr<int32_t>>::Basic Data2;
+    Data2 obj3, obj4;
+
+    buf1 = new ABuffer((void *)"hence", 6);
+    obj3.set(buf1);
+    obj4 = std::move(obj3);
+    EXPECT_FALSE(obj3.used());
+    EXPECT_TRUE(obj4.find(&buf2));
+    EXPECT_EQ(buf2, buf1);
+}
+
 } // namespace android
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 8f349fc..52791b9 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -57,7 +57,7 @@
             const char *language,
             uint32_t flags);
 
-    bool getActiveURI(AString *uri) const;
+    bool getActiveURI(AString *uri, const char *baseURL) const;
 
     void pickRandomMediaItems();
     status_t selectTrack(size_t index, bool select);
@@ -76,6 +76,7 @@
         AString mURI;
         AString mLanguage;
         uint32_t mFlags;
+        AString makeURL(const char *baseURL) const;
     };
 
     Type mType;
@@ -228,12 +229,12 @@
     return format;
 }
 
-bool M3UParser::MediaGroup::getActiveURI(AString *uri) const {
+bool M3UParser::MediaGroup::getActiveURI(AString *uri, const char *baseURL) const {
     for (size_t i = 0; i < mMediaItems.size(); ++i) {
         if (mSelectedIndex >= 0 && i == (size_t)mSelectedIndex) {
             const Media &item = mMediaItems.itemAt(i);
 
-            *uri = item.mURI;
+            *uri = item.makeURL(baseURL);
             return true;
         }
     }
@@ -322,7 +323,7 @@
     }
 
     if (uri) {
-        *uri = mItems.itemAt(index).mURI;
+        *uri = mItems.itemAt(index).makeURL(mBaseURI.c_str());
     }
 
     if (meta) {
@@ -428,7 +429,7 @@
     AString groupID;
     if (!meta->findString(key, &groupID)) {
         if (uri != NULL) {
-            *uri = mItems.itemAt(index).mURI;
+            *uri = mItems.itemAt(index).makeURL(mBaseURI.c_str());
         }
 
         AString codecs;
@@ -459,7 +460,7 @@
     // don't care about the active URI (or if there is an active one)
     if (uri != NULL) {
         sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
-        if (!group->getActiveURI(uri)) {
+        if (!group->getActiveURI(uri, mBaseURI.c_str())) {
             return false;
         }
 
@@ -484,6 +485,9 @@
         // Base URL must be absolute
         return false;
     }
+    if (!strncasecmp("data:", url, 5)) {
+        return false;
+    }
     const size_t schemeEnd = (strstr(baseURL, "//") - baseURL) + 2;
     CHECK(schemeEnd == 7 || schemeEnd == 8);
 
@@ -545,6 +549,18 @@
     return true;
 }
 
+AString M3UParser::Item::makeURL(const char *baseURL) const {
+    AString out;
+    CHECK(MakeURL(baseURL, mURI.c_str(), &out));
+    return out;
+}
+
+AString M3UParser::MediaGroup::Media::makeURL(const char *baseURL) const {
+    AString out;
+    CHECK(MakeURL(baseURL, mURI.c_str(), &out));
+    return out;
+}
+
 status_t M3UParser::parse(const void *_data, size_t size) {
     int32_t lineNo = 0;
 
@@ -675,7 +691,7 @@
             mItems.push();
             Item *item = &mItems.editItemAt(mItems.size() - 1);
 
-            CHECK(MakeURL(mBaseURI.c_str(), line.c_str(), &item->mURI));
+            item->mURI = line;
 
             item->mMeta = itemMeta;
 
@@ -1187,9 +1203,7 @@
 
             AString tmp(val, 1, val.size() - 2);
 
-            if (!MakeURL(mBaseURI.c_str(), tmp.c_str(), &groupURI)) {
-                ALOGI("Failed to make absolute URI from '%s'.", tmp.c_str());
-            }
+            groupURI = tmp;
 
             haveGroupURI = true;
         }
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index fa648ed..c85335a 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -64,6 +64,7 @@
     struct Item {
         AString mURI;
         sp<AMessage> mMeta;
+        AString makeURL(const char *baseURL) const;
     };
 
     status_t mInitCheck;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 5624f4a..9f39b5e 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -33,6 +33,7 @@
 #include <media/stagefright/foundation/ByteUtils.h>
 #include <media/stagefright/foundation/MediaKeys.h>
 #include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/DataURISource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MetaDataUtils.h>
@@ -347,6 +348,16 @@
     sp<ABuffer> key;
     if (index >= 0) {
         key = mAESKeyForURI.valueAt(index);
+    } else if (keyURI.startsWith("data:")) {
+        sp<DataSource> keySrc = DataURISource::Create(keyURI.c_str());
+        off64_t keyLen;
+        if (keySrc == NULL || keySrc->getSize(&keyLen) != OK || keyLen < 0) {
+            ALOGE("Malformed cipher key data uri.");
+            return ERROR_MALFORMED;
+        }
+        key = new ABuffer(keyLen);
+        keySrc->readAt(0, key->data(), keyLen);
+        key->setRange(0, keyLen);
     } else {
         ssize_t err = mHTTPDownloader->fetchFile(keyURI.c_str(), &key);
 
@@ -1018,7 +1029,8 @@
     sp<AMessage> itemMeta;
     int64_t itemDurationUs;
     int32_t targetDuration;
-    if (mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
+    if (mPlaylist->meta() != NULL
+            && mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
         do {
             --index;
             if (!mPlaylist->itemAt(index, NULL /* uri */, &itemMeta)
diff --git a/media/libstagefright/id3/Android.bp b/media/libstagefright/id3/Android.bp
index 30008d9..37f9d50 100644
--- a/media/libstagefright/id3/Android.bp
+++ b/media/libstagefright/id3/Android.bp
@@ -16,8 +16,6 @@
             cfi: true,
         },
     },
-
-    shared_libs: ["libmedia"],
 }
 
 //###############################################################################
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index f4bba59..a0a62f4 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -329,12 +329,25 @@
 }
 
 void ID3::removeUnsynchronization() {
-    for (size_t i = 0; i + 1 < mSize; ++i) {
-        if (mData[i] == 0xff && mData[i + 1] == 0x00) {
-            memmove(&mData[i + 1], &mData[i + 2], mSize - i - 2);
-            --mSize;
+
+    // This file has "unsynchronization", so we have to replace occurrences
+    // of 0xff 0x00 with just 0xff in order to get the real data.
+
+    size_t writeOffset = 1;
+    for (size_t readOffset = 1; readOffset < mSize; ++readOffset) {
+        if (mData[readOffset - 1] == 0xff && mData[readOffset] == 0x00) {
+            continue;
         }
+        // Only move data if there's actually something to move.
+        // This handles the special case of the data being only [0xff, 0x00]
+        // which should be converted to just 0xff if unsynchronization is on.
+        mData[writeOffset++] = mData[readOffset];
     }
+
+    if (writeOffset < mSize) {
+        mSize = writeOffset;
+    }
+
 }
 
 static void WriteSyncsafeInteger(uint8_t *dst, size_t x) {
@@ -594,6 +607,9 @@
         // UCS-2
         // API wants number of characters, not number of bytes...
         int len = n / 2;
+        if (len == 0) {
+            return;
+        }
         const char16_t *framedata = (const char16_t *) (frameData + 1);
         char16_t *framedatacopy = NULL;
         if (*framedata == 0xfffe) {
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index dfbe2cd..dc58c15 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -32,33 +32,30 @@
 class MediaCodecBuffer;
 class IMediaSource;
 class VideoFrame;
+struct MediaCodec;
 
-struct FrameDecoder {
+struct FrameRect {
+    int32_t left, top, right, bottom;
+};
+
+struct FrameDecoder : public RefBase {
     FrameDecoder(
             const AString &componentName,
             const sp<MetaData> &trackMeta,
-            const sp<IMediaSource> &source) :
-                mComponentName(componentName),
-                mTrackMeta(trackMeta),
-                mSource(source),
-                mDstFormat(OMX_COLOR_Format16bitRGB565),
-                mDstBpp(2) {}
+            const sp<IMediaSource> &source);
 
-    VideoFrame* extractFrame(
-            int64_t frameTimeUs,
-            int option,
-            int colorFormat,
-            bool metaOnly);
+    status_t init(
+            int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
 
-    status_t extractFrames(
-            int64_t frameTimeUs,
-            size_t numFrames,
-            int option,
-            int colorFormat,
-            std::vector<VideoFrame*>* frames);
+    sp<IMemory> extractFrame(FrameRect *rect = NULL);
+
+    status_t extractFrames(std::vector<sp<IMemory> >* frames);
+
+    static sp<IMemory> getMetadataOnly(
+            const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
 
 protected:
-    virtual ~FrameDecoder() {}
+    virtual ~FrameDecoder();
 
     virtual sp<AMessage> onGetFormatAndSeekOptions(
             int64_t frameTimeUs,
@@ -66,6 +63,8 @@
             int seekMode,
             MediaSource::ReadOptions *options) = 0;
 
+    virtual status_t onExtractRect(FrameRect *rect) = 0;
+
     virtual status_t onInputReceived(
             const sp<MediaCodecBuffer> &codecBuffer,
             MetaDataBase &sampleMeta,
@@ -78,14 +77,12 @@
             int64_t timeUs,
             bool *done) = 0;
 
-    VideoFrame *allocVideoFrame(int32_t width, int32_t height, bool metaOnly);
-
     sp<MetaData> trackMeta()     const      { return mTrackMeta; }
     OMX_COLOR_FORMATTYPE dstFormat() const  { return mDstFormat; }
     int32_t dstBpp()             const      { return mDstBpp; }
 
-    void addFrame(VideoFrame *frame) {
-        mFrames.push_back(std::unique_ptr<VideoFrame>(frame));
+    void addFrame(const sp<IMemory> &frame) {
+        mFrames.push_back(frame);
     }
 
 private:
@@ -94,10 +91,14 @@
     sp<IMediaSource> mSource;
     OMX_COLOR_FORMATTYPE mDstFormat;
     int32_t mDstBpp;
-    std::vector<std::unique_ptr<VideoFrame> > mFrames;
+    std::vector<sp<IMemory> > mFrames;
+    MediaSource::ReadOptions mReadOptions;
+    sp<MediaCodec> mDecoder;
+    sp<AMessage> mOutputFormat;
+    bool mHaveMoreInputs;
+    bool mFirstSample;
 
-    bool setDstColorFormat(android_pixel_format_t colorFormat);
-    status_t extractInternal(int64_t frameTimeUs, size_t numFrames, int option);
+    status_t extractInternal();
 
     DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
 };
@@ -106,13 +107,7 @@
     VideoFrameDecoder(
             const AString &componentName,
             const sp<MetaData> &trackMeta,
-            const sp<IMediaSource> &source) :
-                FrameDecoder(componentName, trackMeta, source),
-                mIsAvcOrHevc(false),
-                mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
-                mTargetTimeUs(-1ll),
-                mNumFrames(0),
-                mNumFramesDecoded(0) {}
+            const sp<IMediaSource> &source);
 
 protected:
     virtual sp<AMessage> onGetFormatAndSeekOptions(
@@ -121,6 +116,11 @@
             int seekMode,
             MediaSource::ReadOptions *options) override;
 
+    virtual status_t onExtractRect(FrameRect *rect) override {
+        // Rect extraction for sequences is not supported for now.
+        return (rect == NULL) ? OK : ERROR_UNSUPPORTED;
+    }
+
     virtual status_t onInputReceived(
             const sp<MediaCodecBuffer> &codecBuffer,
             MetaDataBase &sampleMeta,
@@ -145,9 +145,7 @@
     ImageDecoder(
             const AString &componentName,
             const sp<MetaData> &trackMeta,
-            const sp<IMediaSource> &source) :
-                FrameDecoder(componentName, trackMeta, source),
-                mFrame(NULL), mGridRows(1), mGridCols(1), mTilesDecoded(0) {}
+            const sp<IMediaSource> &source);
 
 protected:
     virtual sp<AMessage> onGetFormatAndSeekOptions(
@@ -156,6 +154,8 @@
             int seekMode,
             MediaSource::ReadOptions *options) override;
 
+    virtual status_t onExtractRect(FrameRect *rect) override;
+
     virtual status_t onInputReceived(
             const sp<MediaCodecBuffer> &codecBuffer __unused,
             MetaDataBase &sampleMeta __unused,
@@ -170,9 +170,14 @@
 
 private:
     VideoFrame *mFrame;
+    int32_t mWidth;
+    int32_t mHeight;
     int32_t mGridRows;
     int32_t mGridCols;
+    int32_t mTileWidth;
+    int32_t mTileHeight;
     int32_t mTilesDecoded;
+    int32_t mTargetTiles;
 };
 
 }  // namespace android
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index c286516..64dca4e 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -51,7 +51,7 @@
     ColorConverter *mConverter;
     YUVMode mYUVMode;
     sp<ANativeWindow> mNativeWindow;
-    int32_t mWidth, mHeight;
+    int32_t mWidth, mHeight, mStride;
     int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
     int32_t mCropWidth, mCropHeight;
     int32_t mRotationDegrees;
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 58442fe..f78e125 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -27,8 +27,10 @@
 
 class DataSource;
 class MediaExtractor;
+struct ImageDecoder;
+struct FrameRect;
 
-struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
+struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
     StagefrightMetadataRetriever();
     virtual ~StagefrightMetadataRetriever();
 
@@ -40,12 +42,14 @@
     virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
     virtual status_t setDataSource(const sp<DataSource>& source, const char *mime);
 
-    virtual VideoFrame* getFrameAtTime(
+    virtual sp<IMemory> getFrameAtTime(
             int64_t timeUs, int option, int colorFormat, bool metaOnly);
-    virtual VideoFrame* getImageAtIndex(
-            int index, int colorFormat, bool metaOnly);
+    virtual sp<IMemory> getImageAtIndex(
+            int index, int colorFormat, bool metaOnly, bool thumbnail);
+    virtual sp<IMemory> getImageRectAtIndex(
+            int index, int colorFormat, int left, int top, int right, int bottom);
     virtual status_t getFrameAtIndex(
-            std::vector<VideoFrame*>* frames,
+            std::vector<sp<IMemory> >* frames,
             int frameIndex, int numFrames, int colorFormat, bool metaOnly);
 
     virtual MediaAlbumArt *extractAlbumArt();
@@ -59,13 +63,17 @@
     KeyedVector<int, String8> mMetaData;
     MediaAlbumArt *mAlbumArt;
 
+    sp<ImageDecoder> mImageDecoder;
+    int mLastImageIndex;
     void parseMetaData();
     // Delete album art and clear metadata.
     void clearMetadata();
 
     status_t getFrameInternal(
             int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-            VideoFrame **outFrame, std::vector<VideoFrame*>* outFrames);
+            sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
+    virtual sp<IMemory> getImageInternal(
+            int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
 
     StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
 
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 64caeed..97d15a7 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -446,6 +446,7 @@
         int32_t heavyCompression;
         int32_t targetRefLevel;
         int32_t encodedTargetLevel;
+        int32_t effectType;
     } drcParams_t;
 
     status_t setupAACCodec(
diff --git a/media/libstagefright/include/media/stagefright/CodecBase.h b/media/libstagefright/include/media/stagefright/CodecBase.h
index 1cbf865..ad60f46 100644
--- a/media/libstagefright/include/media/stagefright/CodecBase.h
+++ b/media/libstagefright/include/media/stagefright/CodecBase.h
@@ -223,6 +223,7 @@
     virtual void signalEndOfInputStream() = 0;
 
     typedef CodecBase *(*CreateCodecFunc)(void);
+    typedef PersistentSurface *(*CreateInputSurfaceFunc)(void);
 
 protected:
     CodecBase() = default;
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index a6c8981..2b8c7c8 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -37,11 +37,11 @@
 
     status_t convert(
             const void *srcBits,
-            size_t srcWidth, size_t srcHeight,
+            size_t srcWidth, size_t srcHeight, size_t srcStride,
             size_t srcCropLeft, size_t srcCropTop,
             size_t srcCropRight, size_t srcCropBottom,
             void *dstBits,
-            size_t dstWidth, size_t dstHeight,
+            size_t dstWidth, size_t dstHeight, size_t dstStride,
             size_t dstCropLeft, size_t dstCropTop,
             size_t dstCropRight, size_t dstCropBottom);
 
@@ -49,7 +49,7 @@
     struct BitmapParams {
         BitmapParams(
                 void *bits,
-                size_t width, size_t height,
+                size_t width, size_t height, size_t stride,
                 size_t cropLeft, size_t cropTop,
                 size_t cropRight, size_t cropBottom,
                 OMX_COLOR_FORMATTYPE colorFromat);
@@ -75,10 +75,16 @@
     status_t convertYUV420Planar(
             const BitmapParams &src, const BitmapParams &dst);
 
+    status_t convertYUV420PlanarUseLibYUV(
+            const BitmapParams &src, const BitmapParams &dst);
+
     status_t convertYUV420Planar16(
             const BitmapParams &src, const BitmapParams &dst);
 
-    status_t convertYUV420PlanarUseLibYUV(
+    status_t convertYUV420Planar16ToY410(
+            const BitmapParams &src, const BitmapParams &dst);
+
+    status_t convertYUV420Planar16ToRGB(
             const BitmapParams &src, const BitmapParams &dst);
 
     status_t convertQCOMYUV420SemiPlanar(
@@ -90,10 +96,6 @@
     status_t convertTIYUV420PackedSemiPlanar(
             const BitmapParams &src, const BitmapParams &dst);
 
-    void writeToDst(void *dst_ptr, uint8_t *kAdjustedClip, bool uncropped,
-            signed r1, signed g1, signed b1,
-            signed r2, signed g2, signed b2);
-
     ColorConverter(const ColorConverter &);
     ColorConverter &operator=(const ColorConverter &);
 };
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 7b41362..f18940d 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -132,7 +132,7 @@
     status_t startTracks(MetaData *params);
     size_t numTracks();
     int64_t estimateMoovBoxSize(int32_t bitRate);
-    int64_t estimateFileLevelMetaSize();
+    int64_t estimateFileLevelMetaSize(MetaData *params);
     void writeCachedBoxToFile(const char *type);
 
     struct Chunk {
@@ -167,8 +167,10 @@
     Condition       mChunkReadyCondition;   // Signal that chunks are available
 
     // HEIF writing
+    typedef key_value_pair_t< const char *, Vector<uint16_t> > ItemRefs;
     typedef struct _ItemInfo {
         bool isGrid() const { return !strcmp("grid", itemType); }
+        bool isImage() const { return !strcmp("hvc1", itemType) || isGrid(); }
         const char *itemType;
         uint16_t itemId;
         bool isPrimary;
@@ -188,7 +190,7 @@
             };
         };
         Vector<uint16_t> properties;
-        Vector<uint16_t> dimgRefs;
+        Vector<ItemRefs> refsList;
     } ItemInfo;
 
     typedef struct _ItemProperty {
@@ -204,6 +206,7 @@
     uint32_t mPrimaryItemId;
     uint32_t mAssociationEntryCount;
     uint32_t mNumGrids;
+    bool mHasRefs;
     Vector<ItemInfo> mItems;
     Vector<ItemProperty> mProperties;
 
@@ -252,11 +255,12 @@
     void initInternal(int fd, bool isFirstSession);
 
     // Acquire lock before calling these methods
-    off64_t addSample_l(MediaBuffer *buffer, bool usePrefix, size_t *bytesWritten);
+    off64_t addSample_l(MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten);
     void addLengthPrefixedSample_l(MediaBuffer *buffer);
     void addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer);
     uint16_t addProperty_l(const ItemProperty &);
     uint16_t addItem_l(const ItemInfo &);
+    void addRefs_l(uint16_t itemId, const ItemRefs &);
 
     bool exceedsFileSizeLimit();
     bool use32BitFileOffset() const;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 48a1224..ad02004 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -61,9 +61,11 @@
     };
 
     enum BufferFlags {
-        BUFFER_FLAG_SYNCFRAME   = 1,
-        BUFFER_FLAG_CODECCONFIG = 2,
-        BUFFER_FLAG_EOS         = 4,
+        BUFFER_FLAG_SYNCFRAME     = 1,
+        BUFFER_FLAG_CODECCONFIG   = 2,
+        BUFFER_FLAG_EOS           = 4,
+        BUFFER_FLAG_PARTIAL_FRAME = 8,
+        BUFFER_FLAG_MUXER_DATA    = 16,
     };
 
     enum {
@@ -369,6 +371,7 @@
 
     bool mHaveInputSurface;
     bool mHavePendingInputBuffers;
+    bool mCpuBoostRequested;
 
     std::shared_ptr<BufferChannelBase> mBufferChannel;
 
@@ -425,6 +428,7 @@
 
     uint64_t getGraphicBufferSize();
     void addResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
+    void requestCpuBoostIfNeeded();
 
     bool hasPendingBuffer(int portIndex);
     bool hasPendingBuffer();
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
new file mode 100644
index 0000000..3ef4c0e
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef MEDIA_CODEC_CONSTANTS_H_
+#define MEDIA_CODEC_CONSTANTS_H_
+
+namespace {
+
+// from MediaCodecInfo.java
+constexpr int32_t AVCProfileBaseline = 0x01;
+constexpr int32_t AVCProfileMain     = 0x02;
+constexpr int32_t AVCProfileExtended = 0x04;
+constexpr int32_t AVCProfileHigh     = 0x08;
+constexpr int32_t AVCProfileHigh10   = 0x10;
+constexpr int32_t AVCProfileHigh422  = 0x20;
+constexpr int32_t AVCProfileHigh444  = 0x40;
+constexpr int32_t AVCProfileConstrainedBaseline = 0x10000;
+constexpr int32_t AVCProfileConstrainedHigh     = 0x80000;
+
+constexpr int32_t AVCLevel1       = 0x01;
+constexpr int32_t AVCLevel1b      = 0x02;
+constexpr int32_t AVCLevel11      = 0x04;
+constexpr int32_t AVCLevel12      = 0x08;
+constexpr int32_t AVCLevel13      = 0x10;
+constexpr int32_t AVCLevel2       = 0x20;
+constexpr int32_t AVCLevel21      = 0x40;
+constexpr int32_t AVCLevel22      = 0x80;
+constexpr int32_t AVCLevel3       = 0x100;
+constexpr int32_t AVCLevel31      = 0x200;
+constexpr int32_t AVCLevel32      = 0x400;
+constexpr int32_t AVCLevel4       = 0x800;
+constexpr int32_t AVCLevel41      = 0x1000;
+constexpr int32_t AVCLevel42      = 0x2000;
+constexpr int32_t AVCLevel5       = 0x4000;
+constexpr int32_t AVCLevel51      = 0x8000;
+constexpr int32_t AVCLevel52      = 0x10000;
+
+constexpr int32_t H263ProfileBaseline             = 0x01;
+constexpr int32_t H263ProfileH320Coding           = 0x02;
+constexpr int32_t H263ProfileBackwardCompatible   = 0x04;
+constexpr int32_t H263ProfileISWV2                = 0x08;
+constexpr int32_t H263ProfileISWV3                = 0x10;
+constexpr int32_t H263ProfileHighCompression      = 0x20;
+constexpr int32_t H263ProfileInternet             = 0x40;
+constexpr int32_t H263ProfileInterlace            = 0x80;
+constexpr int32_t H263ProfileHighLatency          = 0x100;
+
+constexpr int32_t H263Level10      = 0x01;
+constexpr int32_t H263Level20      = 0x02;
+constexpr int32_t H263Level30      = 0x04;
+constexpr int32_t H263Level40      = 0x08;
+constexpr int32_t H263Level45      = 0x10;
+constexpr int32_t H263Level50      = 0x20;
+constexpr int32_t H263Level60      = 0x40;
+constexpr int32_t H263Level70      = 0x80;
+
+constexpr int32_t MPEG4ProfileSimple              = 0x01;
+constexpr int32_t MPEG4ProfileSimpleScalable      = 0x02;
+constexpr int32_t MPEG4ProfileCore                = 0x04;
+constexpr int32_t MPEG4ProfileMain                = 0x08;
+constexpr int32_t MPEG4ProfileNbit                = 0x10;
+constexpr int32_t MPEG4ProfileScalableTexture     = 0x20;
+constexpr int32_t MPEG4ProfileSimpleFace          = 0x40;
+constexpr int32_t MPEG4ProfileSimpleFBA           = 0x80;
+constexpr int32_t MPEG4ProfileBasicAnimated       = 0x100;
+constexpr int32_t MPEG4ProfileHybrid              = 0x200;
+constexpr int32_t MPEG4ProfileAdvancedRealTime    = 0x400;
+constexpr int32_t MPEG4ProfileCoreScalable        = 0x800;
+constexpr int32_t MPEG4ProfileAdvancedCoding      = 0x1000;
+constexpr int32_t MPEG4ProfileAdvancedCore        = 0x2000;
+constexpr int32_t MPEG4ProfileAdvancedScalable    = 0x4000;
+constexpr int32_t MPEG4ProfileAdvancedSimple      = 0x8000;
+
+constexpr int32_t MPEG4Level0      = 0x01;
+constexpr int32_t MPEG4Level0b     = 0x02;
+constexpr int32_t MPEG4Level1      = 0x04;
+constexpr int32_t MPEG4Level2      = 0x08;
+constexpr int32_t MPEG4Level3      = 0x10;
+constexpr int32_t MPEG4Level3b     = 0x18;
+constexpr int32_t MPEG4Level4      = 0x20;
+constexpr int32_t MPEG4Level4a     = 0x40;
+constexpr int32_t MPEG4Level5      = 0x80;
+constexpr int32_t MPEG4Level6      = 0x100;
+
+constexpr int32_t MPEG2ProfileSimple              = 0x00;
+constexpr int32_t MPEG2ProfileMain                = 0x01;
+constexpr int32_t MPEG2Profile422                 = 0x02;
+constexpr int32_t MPEG2ProfileSNR                 = 0x03;
+constexpr int32_t MPEG2ProfileSpatial             = 0x04;
+constexpr int32_t MPEG2ProfileHigh                = 0x05;
+
+constexpr int32_t MPEG2LevelLL     = 0x00;
+constexpr int32_t MPEG2LevelML     = 0x01;
+constexpr int32_t MPEG2LevelH14    = 0x02;
+constexpr int32_t MPEG2LevelHL     = 0x03;
+constexpr int32_t MPEG2LevelHP     = 0x04;
+
+constexpr int32_t AACObjectMain       = 1;
+constexpr int32_t AACObjectLC         = 2;
+constexpr int32_t AACObjectSSR        = 3;
+constexpr int32_t AACObjectLTP        = 4;
+constexpr int32_t AACObjectHE         = 5;
+constexpr int32_t AACObjectScalable   = 6;
+constexpr int32_t AACObjectERLC       = 17;
+constexpr int32_t AACObjectERScalable = 20;
+constexpr int32_t AACObjectLD         = 23;
+constexpr int32_t AACObjectHE_PS      = 29;
+constexpr int32_t AACObjectELD        = 39;
+constexpr int32_t AACObjectXHE        = 42;
+
+constexpr int32_t VP8Level_Version0 = 0x01;
+constexpr int32_t VP8Level_Version1 = 0x02;
+constexpr int32_t VP8Level_Version2 = 0x04;
+constexpr int32_t VP8Level_Version3 = 0x08;
+
+constexpr int32_t VP8ProfileMain = 0x01;
+
+constexpr int32_t VP9Profile0 = 0x01;
+constexpr int32_t VP9Profile1 = 0x02;
+constexpr int32_t VP9Profile2 = 0x04;
+constexpr int32_t VP9Profile3 = 0x08;
+constexpr int32_t VP9Profile2HDR = 0x1000;
+constexpr int32_t VP9Profile3HDR = 0x2000;
+
+constexpr int32_t VP9Level1  = 0x1;
+constexpr int32_t VP9Level11 = 0x2;
+constexpr int32_t VP9Level2  = 0x4;
+constexpr int32_t VP9Level21 = 0x8;
+constexpr int32_t VP9Level3  = 0x10;
+constexpr int32_t VP9Level31 = 0x20;
+constexpr int32_t VP9Level4  = 0x40;
+constexpr int32_t VP9Level41 = 0x80;
+constexpr int32_t VP9Level5  = 0x100;
+constexpr int32_t VP9Level51 = 0x200;
+constexpr int32_t VP9Level52 = 0x400;
+constexpr int32_t VP9Level6  = 0x800;
+constexpr int32_t VP9Level61 = 0x1000;
+constexpr int32_t VP9Level62 = 0x2000;
+
+constexpr int32_t HEVCProfileMain        = 0x01;
+constexpr int32_t HEVCProfileMain10      = 0x02;
+constexpr int32_t HEVCProfileMainStill   = 0x04;
+constexpr int32_t HEVCProfileMain10HDR10 = 0x1000;
+
+constexpr int32_t HEVCMainTierLevel1  = 0x1;
+constexpr int32_t HEVCHighTierLevel1  = 0x2;
+constexpr int32_t HEVCMainTierLevel2  = 0x4;
+constexpr int32_t HEVCHighTierLevel2  = 0x8;
+constexpr int32_t HEVCMainTierLevel21 = 0x10;
+constexpr int32_t HEVCHighTierLevel21 = 0x20;
+constexpr int32_t HEVCMainTierLevel3  = 0x40;
+constexpr int32_t HEVCHighTierLevel3  = 0x80;
+constexpr int32_t HEVCMainTierLevel31 = 0x100;
+constexpr int32_t HEVCHighTierLevel31 = 0x200;
+constexpr int32_t HEVCMainTierLevel4  = 0x400;
+constexpr int32_t HEVCHighTierLevel4  = 0x800;
+constexpr int32_t HEVCMainTierLevel41 = 0x1000;
+constexpr int32_t HEVCHighTierLevel41 = 0x2000;
+constexpr int32_t HEVCMainTierLevel5  = 0x4000;
+constexpr int32_t HEVCHighTierLevel5  = 0x8000;
+constexpr int32_t HEVCMainTierLevel51 = 0x10000;
+constexpr int32_t HEVCHighTierLevel51 = 0x20000;
+constexpr int32_t HEVCMainTierLevel52 = 0x40000;
+constexpr int32_t HEVCHighTierLevel52 = 0x80000;
+constexpr int32_t HEVCMainTierLevel6  = 0x100000;
+constexpr int32_t HEVCHighTierLevel6  = 0x200000;
+constexpr int32_t HEVCMainTierLevel61 = 0x400000;
+constexpr int32_t HEVCHighTierLevel61 = 0x800000;
+constexpr int32_t HEVCMainTierLevel62 = 0x1000000;
+constexpr int32_t HEVCHighTierLevel62 = 0x2000000;
+
+constexpr int32_t DolbyVisionProfileDvavPer = 0x1;
+constexpr int32_t DolbyVisionProfileDvavPen = 0x2;
+constexpr int32_t DolbyVisionProfileDvheDer = 0x4;
+constexpr int32_t DolbyVisionProfileDvheDen = 0x8;
+constexpr int32_t DolbyVisionProfileDvheDtr = 0x10;
+constexpr int32_t DolbyVisionProfileDvheStn = 0x20;
+constexpr int32_t DolbyVisionProfileDvheDth = 0x40;
+constexpr int32_t DolbyVisionProfileDvheDtb = 0x80;
+constexpr int32_t DolbyVisionProfileDvheSt = 0x100;
+constexpr int32_t DolbyVisionProfileDvavSe = 0x200;
+
+constexpr int32_t DolbyVisionLevelHd24    = 0x1;
+constexpr int32_t DolbyVisionLevelHd30    = 0x2;
+constexpr int32_t DolbyVisionLevelFhd24   = 0x4;
+constexpr int32_t DolbyVisionLevelFhd30   = 0x8;
+constexpr int32_t DolbyVisionLevelFhd60   = 0x10;
+constexpr int32_t DolbyVisionLevelUhd24   = 0x20;
+constexpr int32_t DolbyVisionLevelUhd30   = 0x40;
+constexpr int32_t DolbyVisionLevelUhd48   = 0x80;
+constexpr int32_t DolbyVisionLevelUhd60   = 0x100;
+
+constexpr int32_t BITRATE_MODE_CBR = 2;
+constexpr int32_t BITRATE_MODE_CQ = 0;
+constexpr int32_t BITRATE_MODE_VBR = 1;
+
+constexpr int32_t COLOR_Format12bitRGB444             = 3;
+constexpr int32_t COLOR_Format16bitARGB1555           = 5;
+constexpr int32_t COLOR_Format16bitARGB4444           = 4;
+constexpr int32_t COLOR_Format16bitBGR565             = 7;
+constexpr int32_t COLOR_Format16bitRGB565             = 6;
+constexpr int32_t COLOR_Format18bitARGB1665           = 9;
+constexpr int32_t COLOR_Format18BitBGR666             = 41;
+constexpr int32_t COLOR_Format18bitRGB666             = 8;
+constexpr int32_t COLOR_Format19bitARGB1666           = 10;
+constexpr int32_t COLOR_Format24BitABGR6666           = 43;
+constexpr int32_t COLOR_Format24bitARGB1887           = 13;
+constexpr int32_t COLOR_Format24BitARGB6666           = 42;
+constexpr int32_t COLOR_Format24bitBGR888             = 12;
+constexpr int32_t COLOR_Format24bitRGB888             = 11;
+constexpr int32_t COLOR_Format25bitARGB1888           = 14;
+constexpr int32_t COLOR_Format32bitABGR8888           = 0x7F00A000;
+constexpr int32_t COLOR_Format32bitARGB8888           = 16;
+constexpr int32_t COLOR_Format32bitBGRA8888           = 15;
+constexpr int32_t COLOR_Format8bitRGB332              = 2;
+constexpr int32_t COLOR_FormatCbYCrY                  = 27;
+constexpr int32_t COLOR_FormatCrYCbY                  = 28;
+constexpr int32_t COLOR_FormatL16                     = 36;
+constexpr int32_t COLOR_FormatL2                      = 33;
+constexpr int32_t COLOR_FormatL24                     = 37;
+constexpr int32_t COLOR_FormatL32                     = 38;
+constexpr int32_t COLOR_FormatL4                      = 34;
+constexpr int32_t COLOR_FormatL8                      = 35;
+constexpr int32_t COLOR_FormatMonochrome              = 1;
+constexpr int32_t COLOR_FormatRawBayer10bit           = 31;
+constexpr int32_t COLOR_FormatRawBayer8bit            = 30;
+constexpr int32_t COLOR_FormatRawBayer8bitcompressed  = 32;
+constexpr int32_t COLOR_FormatRGBAFlexible            = 0x7F36A888;
+constexpr int32_t COLOR_FormatRGBFlexible             = 0x7F36B888;
+constexpr int32_t COLOR_FormatSurface                 = 0x7F000789;
+constexpr int32_t COLOR_FormatYCbYCr                  = 25;
+constexpr int32_t COLOR_FormatYCrYCb                  = 26;
+constexpr int32_t COLOR_FormatYUV411PackedPlanar      = 18;
+constexpr int32_t COLOR_FormatYUV411Planar            = 17;
+constexpr int32_t COLOR_FormatYUV420Flexible          = 0x7F420888;
+constexpr int32_t COLOR_FormatYUV420PackedPlanar      = 20;
+constexpr int32_t COLOR_FormatYUV420PackedSemiPlanar  = 39;
+constexpr int32_t COLOR_FormatYUV420Planar            = 19;
+constexpr int32_t COLOR_FormatYUV420SemiPlanar        = 21;
+constexpr int32_t COLOR_FormatYUV422Flexible          = 0x7F422888;
+constexpr int32_t COLOR_FormatYUV422PackedPlanar      = 23;
+constexpr int32_t COLOR_FormatYUV422PackedSemiPlanar  = 40;
+constexpr int32_t COLOR_FormatYUV422Planar            = 22;
+constexpr int32_t COLOR_FormatYUV422SemiPlanar        = 24;
+constexpr int32_t COLOR_FormatYUV444Flexible          = 0x7F444888;
+constexpr int32_t COLOR_FormatYUV444Interleaved       = 29;
+constexpr int32_t COLOR_QCOM_FormatYUV420SemiPlanar   = 0x7fa30c00;
+constexpr int32_t COLOR_TI_FormatYUV420PackedSemiPlanar = 0x7f000100;
+
+constexpr char FEATURE_AdaptivePlayback[]       = "adaptive-playback";
+constexpr char FEATURE_IntraRefresh[] = "intra-refresh";
+constexpr char FEATURE_PartialFrame[] = "partial-frame";
+constexpr char FEATURE_SecurePlayback[]         = "secure-playback";
+constexpr char FEATURE_TunneledPlayback[]       = "tunneled-playback";
+
+// from MediaFormat.java
+constexpr char MIMETYPE_VIDEO_VP8[] = "video/x-vnd.on2.vp8";
+constexpr char MIMETYPE_VIDEO_VP9[] = "video/x-vnd.on2.vp9";
+constexpr char MIMETYPE_VIDEO_AVC[] = "video/avc";
+constexpr char MIMETYPE_VIDEO_HEVC[] = "video/hevc";
+constexpr char MIMETYPE_VIDEO_MPEG4[] = "video/mp4v-es";
+constexpr char MIMETYPE_VIDEO_H263[] = "video/3gpp";
+constexpr char MIMETYPE_VIDEO_MPEG2[] = "video/mpeg2";
+constexpr char MIMETYPE_VIDEO_RAW[] = "video/raw";
+constexpr char MIMETYPE_VIDEO_DOLBY_VISION[] = "video/dolby-vision";
+constexpr char MIMETYPE_VIDEO_SCRAMBLED[] = "video/scrambled";
+
+constexpr char MIMETYPE_AUDIO_AMR_NB[] = "audio/3gpp";
+constexpr char MIMETYPE_AUDIO_AMR_WB[] = "audio/amr-wb";
+constexpr char MIMETYPE_AUDIO_MPEG[] = "audio/mpeg";
+constexpr char MIMETYPE_AUDIO_AAC[] = "audio/mp4a-latm";
+constexpr char MIMETYPE_AUDIO_QCELP[] = "audio/qcelp";
+constexpr char MIMETYPE_AUDIO_VORBIS[] = "audio/vorbis";
+constexpr char MIMETYPE_AUDIO_OPUS[] = "audio/opus";
+constexpr char MIMETYPE_AUDIO_G711_ALAW[] = "audio/g711-alaw";
+constexpr char MIMETYPE_AUDIO_G711_MLAW[] = "audio/g711-mlaw";
+constexpr char MIMETYPE_AUDIO_RAW[] = "audio/raw";
+constexpr char MIMETYPE_AUDIO_FLAC[] = "audio/flac";
+constexpr char MIMETYPE_AUDIO_MSGSM[] = "audio/gsm";
+constexpr char MIMETYPE_AUDIO_AC3[] = "audio/ac3";
+constexpr char MIMETYPE_AUDIO_EAC3[] = "audio/eac3";
+constexpr char MIMETYPE_AUDIO_SCRAMBLED[] = "audio/scrambled";
+
+constexpr char MIMETYPE_IMAGE_ANDROID_HEIC[] = "image/vnd.android.heic";
+
+constexpr char MIMETYPE_TEXT_CEA_608[] = "text/cea-608";
+constexpr char MIMETYPE_TEXT_CEA_708[] = "text/cea-708";
+constexpr char MIMETYPE_TEXT_SUBRIP[] = "application/x-subrip";
+constexpr char MIMETYPE_TEXT_VTT[] = "text/vtt";
+
+constexpr int32_t COLOR_RANGE_FULL = 1;
+constexpr int32_t COLOR_RANGE_LIMITED = 2;
+constexpr int32_t COLOR_STANDARD_BT2020 = 6;
+constexpr int32_t COLOR_STANDARD_BT601_NTSC = 4;
+constexpr int32_t COLOR_STANDARD_BT601_PAL = 2;
+constexpr int32_t COLOR_STANDARD_BT709 = 1;
+constexpr int32_t COLOR_TRANSFER_HLG = 7;
+constexpr int32_t COLOR_TRANSFER_LINEAR = 1;
+constexpr int32_t COLOR_TRANSFER_SDR_VIDEO = 3;
+constexpr int32_t COLOR_TRANSFER_ST2084 = 6;
+
+constexpr char KEY_AAC_DRC_ATTENUATION_FACTOR[] = "aac-drc-cut-level";
+constexpr char KEY_AAC_DRC_BOOST_FACTOR[] = "aac-drc-boost-level";
+constexpr char KEY_AAC_DRC_EFFECT_TYPE[] = "aac-drc-effect-type";
+constexpr char KEY_AAC_DRC_HEAVY_COMPRESSION[] = "aac-drc-heavy-compression";
+constexpr char KEY_AAC_DRC_TARGET_REFERENCE_LEVEL[] = "aac-target-ref-level";
+constexpr char KEY_AAC_ENCODED_TARGET_LEVEL[] = "aac-encoded-target-level";
+constexpr char KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT[] = "aac-max-output-channel_count";
+constexpr char KEY_AAC_PROFILE[] = "aac-profile";
+constexpr char KEY_AAC_SBR_MODE[] = "aac-sbr-mode";
+constexpr char KEY_AUDIO_SESSION_ID[] = "audio-session-id";
+constexpr char KEY_BIT_RATE[] = "bitrate";
+constexpr char KEY_BITRATE_MODE[] = "bitrate-mode";
+constexpr char KEY_CA_SESSION_ID[] = "ca-session-id";
+constexpr char KEY_CA_SYSTEM_ID[] = "ca-system-id";
+constexpr char KEY_CAPTURE_RATE[] = "capture-rate";
+constexpr char KEY_CHANNEL_COUNT[] = "channel-count";
+constexpr char KEY_CHANNEL_MASK[] = "channel-mask";
+constexpr char KEY_COLOR_FORMAT[] = "color-format";
+constexpr char KEY_COLOR_RANGE[] = "color-range";
+constexpr char KEY_COLOR_STANDARD[] = "color-standard";
+constexpr char KEY_COLOR_TRANSFER[] = "color-transfer";
+constexpr char KEY_COMPLEXITY[] = "complexity";
+constexpr char KEY_DURATION[] = "durationUs";
+constexpr char KEY_FEATURE_[] = "feature-";
+constexpr char KEY_FLAC_COMPRESSION_LEVEL[] = "flac-compression-level";
+constexpr char KEY_FRAME_RATE[] = "frame-rate";
+constexpr char KEY_GRID_COLUMNS[] = "grid-cols";
+constexpr char KEY_GRID_ROWS[] = "grid-rows";
+constexpr char KEY_HDR_STATIC_INFO[] = "hdr-static-info";
+constexpr char KEY_HEIGHT[] = "height";
+constexpr char KEY_I_FRAME_INTERVAL[] = "i-frame-interval";
+constexpr char KEY_INTRA_REFRESH_PERIOD[] = "intra-refresh-period";
+constexpr char KEY_IS_ADTS[] = "is-adts";
+constexpr char KEY_IS_AUTOSELECT[] = "is-autoselect";
+constexpr char KEY_IS_DEFAULT[] = "is-default";
+constexpr char KEY_IS_FORCED_SUBTITLE[] = "is-forced-subtitle";
+constexpr char KEY_IS_TIMED_TEXT[] = "is-timed-text";
+constexpr char KEY_LANGUAGE[] = "language";
+constexpr char KEY_LATENCY[] = "latency";
+constexpr char KEY_LEVEL[] = "level";
+constexpr char KEY_MAX_BIT_RATE[] = "max-bitrate";
+constexpr char KEY_MAX_HEIGHT[] = "max-height";
+constexpr char KEY_MAX_INPUT_SIZE[] = "max-input-size";
+constexpr char KEY_MAX_WIDTH[] = "max-width";
+constexpr char KEY_MIME[] = "mime";
+constexpr char KEY_OPERATING_RATE[] = "operating-rate";
+constexpr char KEY_OUTPUT_REORDER_DEPTH[] = "output-reorder-depth";
+constexpr char KEY_PCM_ENCODING[] = "pcm-encoding";
+constexpr char KEY_PRIORITY[] = "priority";
+constexpr char KEY_PROFILE[] = "profile";
+constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
+constexpr char KEY_QUALITY[] = "quality";
+constexpr char KEY_REPEAT_PREVIOUS_FRAME_AFTER[] = "repeat-previous-frame-after";
+constexpr char KEY_ROTATION[] = "rotation-degrees";
+constexpr char KEY_SAMPLE_RATE[] = "sample-rate";
+constexpr char KEY_SLICE_HEIGHT[] = "slice-height";
+constexpr char KEY_STRIDE[] = "stride";
+constexpr char KEY_TEMPORAL_LAYERING[] = "ts-schema";
+constexpr char KEY_TILE_HEIGHT[] = "tile-height";
+constexpr char KEY_TILE_WIDTH[] = "tile-width";
+constexpr char KEY_TRACK_ID[] = "track-id";
+constexpr char KEY_WIDTH[] = "width";
+
+// from MediaCodec.java
+constexpr int32_t ERROR_INSUFFICIENT_OUTPUT_PROTECTION = 4;
+constexpr int32_t ERROR_INSUFFICIENT_RESOURCE = 1100;
+constexpr int32_t ERROR_KEY_EXPIRED = 2;
+constexpr int32_t ERROR_NO_KEY = 1;
+constexpr int32_t ERROR_RECLAIMED = 1101;
+constexpr int32_t ERROR_RESOURCE_BUSY = 3;
+constexpr int32_t ERROR_SESSION_NOT_OPENED = 5;
+constexpr int32_t ERROR_UNSUPPORTED_OPERATION = 6;
+constexpr char CODEC[] = "android.media.mediacodec.codec";
+constexpr char ENCODER[] = "android.media.mediacodec.encoder";
+constexpr char HEIGHT[] = "android.media.mediacodec.height";
+constexpr char MIME_TYPE[] = "android.media.mediacodec.mime";
+constexpr char MODE[] = "android.media.mediacodec.mode";
+constexpr char MODE_AUDIO[] = "audio";
+constexpr char MODE_VIDEO[] = "video";
+constexpr char ROTATION[] = "android.media.mediacodec.rotation";
+constexpr char SECURE[] = "android.media.mediacodec.secure";
+constexpr char WIDTH[] = "android.media.mediacodec.width";
+
+constexpr int32_t BUFFER_FLAG_CODEC_CONFIG = 2;
+constexpr int32_t BUFFER_FLAG_END_OF_STREAM = 4;
+constexpr int32_t BUFFER_FLAG_KEY_FRAME = 1;
+constexpr int32_t BUFFER_FLAG_PARTIAL_FRAME = 8;
+constexpr int32_t BUFFER_FLAG_SYNC_FRAME = 1;
+constexpr int32_t CONFIGURE_FLAG_ENCODE = 1;
+constexpr int32_t CRYPTO_MODE_AES_CBC     = 2;
+constexpr int32_t CRYPTO_MODE_AES_CTR     = 1;
+constexpr int32_t CRYPTO_MODE_UNENCRYPTED = 0;
+constexpr int32_t INFO_OUTPUT_BUFFERS_CHANGED = -3;
+constexpr int32_t INFO_OUTPUT_FORMAT_CHANGED  = -2;
+constexpr int32_t INFO_TRY_AGAIN_LATER        = -1;
+constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT               = 1;
+constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT_WITH_CROPPING = 2;
+constexpr char PARAMETER_KEY_REQUEST_SYNC_FRAME[] = "request-sync";
+constexpr char PARAMETER_KEY_SUSPEND[] = "drop-input-frames";
+constexpr char PARAMETER_KEY_VIDEO_BITRATE[] = "video-bitrate";
+
+}
+
+#endif  // MEDIA_CODEC_CONSTANTS_H_
+
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index fb9f5bd..d5f4b35 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -22,7 +22,6 @@
 
 #include <media/IMediaExtractor.h>
 #include <media/MediaExtractor.h>
-#include <utils/List.h>
 
 namespace android {
 
@@ -40,15 +39,15 @@
 
 private:
     static Mutex gPluginMutex;
-    static std::shared_ptr<List<sp<ExtractorPlugin>>> gPlugins;
+    static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
     static bool gPluginsRegistered;
 
     static void RegisterExtractorsInApk(
-            const char *apkPath, List<sp<ExtractorPlugin>> &pluginList);
+            const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractorsInSystem(
-            const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList);
+            const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractor(
-            const sp<ExtractorPlugin> &plugin, List<sp<ExtractorPlugin>> &pluginList);
+            const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
 
     static MediaExtractor::CreatorFunc sniff(DataSourceBase *source,
             float *confidence, void **meta, MediaExtractor::FreeMetaFunc *freeMeta,
diff --git a/media/libstagefright/include/media/stagefright/PersistentSurface.h b/media/libstagefright/include/media/stagefright/PersistentSurface.h
index d8b75a2..49b36c9 100644
--- a/media/libstagefright/include/media/stagefright/PersistentSurface.h
+++ b/media/libstagefright/include/media/stagefright/PersistentSurface.h
@@ -18,22 +18,34 @@
 
 #define PERSISTENT_SURFACE_H_
 
-#include <gui/IGraphicBufferProducer.h>
 #include <android/IGraphicBufferSource.h>
-#include <media/stagefright/foundation/ABase.h>
 #include <binder/Parcel.h>
+#include <hidl/HidlSupport.h>
+#include <hidl/HybridInterface.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <media/stagefright/foundation/ABase.h>
+
+using android::hidl::base::V1_0::IBase;
 
 namespace android {
 
 struct PersistentSurface : public RefBase {
     PersistentSurface() {}
 
+    // create an OMX persistent surface
     PersistentSurface(
             const sp<IGraphicBufferProducer>& bufferProducer,
             const sp<IGraphicBufferSource>& bufferSource) :
         mBufferProducer(bufferProducer),
         mBufferSource(bufferSource) { }
 
+    // create a HIDL persistent surface
+    PersistentSurface(
+            const sp<IGraphicBufferProducer>& bufferProducer,
+            const sp<IBase>& hidlTarget) :
+        mBufferProducer(bufferProducer),
+        mHidlTarget(hidlTarget) { }
+
     sp<IGraphicBufferProducer> getBufferProducer() const {
         return mBufferProducer;
     }
@@ -42,9 +54,25 @@
         return mBufferSource;
     }
 
+    sp<IBase> getHidlTarget() const {
+        return mHidlTarget;
+    }
+
     status_t writeToParcel(Parcel *parcel) const {
         parcel->writeStrongBinder(IInterface::asBinder(mBufferProducer));
+        // this can handle null
         parcel->writeStrongBinder(IInterface::asBinder(mBufferSource));
+        // write hidl target
+        if (mHidlTarget != nullptr) {
+            HalToken token;
+            bool result = createHalToken(mHidlTarget, &token);
+            parcel->writeBool(result);
+            if (result) {
+                parcel->writeByteArray(token.size(), token.data());
+            }
+        } else {
+            parcel->writeBool(false);
+        }
         return NO_ERROR;
     }
 
@@ -53,12 +81,24 @@
                 parcel->readStrongBinder());
         mBufferSource = interface_cast<IGraphicBufferSource>(
                 parcel->readStrongBinder());
+        // read hidl target
+        bool haveHidlTarget = parcel->readBool();
+        if (haveHidlTarget) {
+            std::vector<uint8_t> tokenVector;
+            parcel->readByteVector(&tokenVector);
+            HalToken token = HalToken(tokenVector);
+            mHidlTarget = retrieveHalInterface(token);
+            deleteHalToken(token);
+        } else {
+            mHidlTarget.clear();
+        }
         return NO_ERROR;
     }
 
 private:
     sp<IGraphicBufferProducer> mBufferProducer;
     sp<IGraphicBufferSource> mBufferSource;
+    sp<IBase> mHidlTarget;
 
     DISALLOW_EVIL_CONSTRUCTORS(PersistentSurface);
 };
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 3eb98f3..3e6942b 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -62,6 +62,7 @@
         "libmedia_omx",
         "libstagefright_foundation",
         "libstagefright_xmlparser",
+        "libutils",
     ],
 
     cflags: [
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index ff58eb6..7d2c2dd 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -354,7 +354,7 @@
       mQuirks(0),
       mBufferIDCount(0),
       mRestorePtsFailed(false),
-      mMaxTimestampGapUs(-1ll),
+      mMaxTimestampGapUs(0ll),
       mPrevOriginalTimeUs(-1ll),
       mPrevModifiedTimeUs(-1ll)
 {
@@ -686,6 +686,7 @@
 
     CLOG_CONFIG(setPortMode, "%s(%d), port %d", asString(mode), mode, portIndex);
 
+    status_t err = OK;
     switch (mode) {
     case IOMX::kPortModeDynamicANWBuffer:
     {
@@ -694,17 +695,19 @@
                 CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
                         "not setting port mode to %s(%d) on output",
                         asString(mode), mode);
-                return StatusFromOMXError(OMX_ErrorUnsupportedIndex);
+                err = StatusFromOMXError(OMX_ErrorUnsupportedIndex);
+                break;
             }
 
-            status_t err = enableNativeBuffers_l(
+            err = enableNativeBuffers_l(
                     portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
             if (err != OK) {
-                return err;
+                break;
             }
         }
         (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
-        return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL);
+        err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL);
+        break;
     }
 
     case IOMX::kPortModeDynamicNativeHandle:
@@ -712,13 +715,15 @@
         if (portIndex != kPortIndexInput) {
             CLOG_ERROR(setPortMode, BAD_VALUE,
                     "%s(%d) mode is only supported on input port", asString(mode), mode);
-            return BAD_VALUE;
+            err = BAD_VALUE;
+            break;
         }
         (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
         (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
 
         MetadataBufferType metaType = kMetadataBufferTypeNativeHandleSource;
-        return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, &metaType);
+        err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, &metaType);
+        break;
     }
 
     case IOMX::kPortModePresetSecureBuffer:
@@ -726,7 +731,8 @@
         // Allow on both input and output.
         (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
         (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
-        return enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_TRUE);
+        err = enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_TRUE);
+        break;
     }
 
     case IOMX::kPortModePresetANWBuffer:
@@ -734,7 +740,8 @@
         if (portIndex != kPortIndexOutput) {
             CLOG_ERROR(setPortMode, BAD_VALUE,
                     "%s(%d) mode is only supported on output port", asString(mode), mode);
-            return BAD_VALUE;
+            err = BAD_VALUE;
+            break;
         }
 
         // Check if we're simulating legacy mode with metadata mode,
@@ -743,7 +750,7 @@
             if (storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL) == OK) {
                 CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
                         "metdata mode enabled successfully");
-                return OK;
+                break;
             }
 
             CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
@@ -754,15 +761,15 @@
 
         // Disable secure buffer and enable graphic buffer
         (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
-        status_t err = enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+        err = enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
         if (err != OK) {
-            return err;
+            break;
         }
 
         // Not running experiment, or metadata is not supported.
         // Disable metadata mode and use legacy mode.
         (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
-        return OK;
+        break;
     }
 
     case IOMX::kPortModePresetByteBuffer:
@@ -771,15 +778,19 @@
         (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
         (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
         (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
-        return OK;
-    }
-
-    default:
         break;
     }
 
-    CLOG_ERROR(setPortMode, BAD_VALUE, "invalid port mode %d", mode);
-    return BAD_VALUE;
+    default:
+        CLOG_ERROR(setPortMode, BAD_VALUE, "invalid port mode %d", mode);
+        err = BAD_VALUE;
+        break;
+    }
+
+    if (err == OK) {
+        mPortMode[portIndex] = mode;
+    }
+    return err;
 }
 
 status_t OMXNodeInstance::enableNativeBuffers_l(
@@ -1057,28 +1068,51 @@
     }
 
     switch (omxBuffer.mBufferType) {
-        case OMXBuffer::kBufferTypePreset:
+        case OMXBuffer::kBufferTypePreset: {
+            if (mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer
+                    && mPortMode[portIndex] != IOMX::kPortModeDynamicNativeHandle) {
+                break;
+            }
             return useBuffer_l(portIndex, NULL, NULL, buffer);
+        }
 
-        case OMXBuffer::kBufferTypeSharedMem:
+        case OMXBuffer::kBufferTypeSharedMem: {
+            if (mPortMode[portIndex] != IOMX::kPortModePresetByteBuffer
+                    && mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer) {
+                break;
+            }
             return useBuffer_l(portIndex, omxBuffer.mMem, NULL, buffer);
+        }
 
-        case OMXBuffer::kBufferTypeANWBuffer:
+        case OMXBuffer::kBufferTypeANWBuffer: {
+            if (mPortMode[portIndex] != IOMX::kPortModePresetANWBuffer) {
+                break;
+            }
             return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
+        }
 
         case OMXBuffer::kBufferTypeHidlMemory: {
+                if (mPortMode[portIndex] != IOMX::kPortModePresetByteBuffer
+                        && mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer
+                        && mPortMode[portIndex] != IOMX::kPortModeDynamicNativeHandle) {
+                    break;
+                }
                 sp<IHidlMemory> hidlMemory = mapMemory(omxBuffer.mHidlMemory);
                 if (hidlMemory == nullptr) {
                     ALOGE("OMXNodeInstance useBuffer() failed to map memory");
                     return NO_MEMORY;
                 }
                 return useBuffer_l(portIndex, NULL, hidlMemory, buffer);
-            }
+        }
         default:
+            return BAD_VALUE;
             break;
     }
 
-    return BAD_VALUE;
+    ALOGE("b/77486542 : bufferType = %d vs. portMode = %d",
+          omxBuffer.mBufferType, mPortMode[portIndex]);
+    android_errorWriteLog(0x534e4554, "77486542");
+    return INVALID_OPERATION;
 }
 
 status_t OMXNodeInstance::useBuffer_l(
@@ -1514,6 +1548,11 @@
         android_errorWriteLog(0x534e4554, "35467458");
         return BAD_VALUE;
     }
+    if (mPortMode[portIndex] != IOMX::kPortModePresetSecureBuffer) {
+        ALOGE("b/77486542");
+        android_errorWriteLog(0x534e4554, "77486542");
+        return INVALID_OPERATION;
+    }
     BufferMeta *buffer_meta = new BufferMeta(portIndex);
 
     OMX_BUFFERHEADERTYPE *header;
@@ -1843,7 +1882,9 @@
         return BAD_VALUE;
     }
 
-    mMaxTimestampGapUs = (int64_t)((OMX_PARAM_U32TYPE*)params)->nU32;
+    // The incoming number is an int32_t contained in OMX_U32.
+    // Cast to int32_t first then int64_t.
+    mMaxTimestampGapUs = (int32_t)((OMX_PARAM_U32TYPE*)params)->nU32;
 
     return OK;
 }
@@ -1867,12 +1908,26 @@
         ALOGV("IN  timestamp: %lld -> %lld",
             static_cast<long long>(originalTimeUs),
             static_cast<long long>(timestamp));
+    } else if (mMaxTimestampGapUs < 0ll) {
+        /*
+         * Apply a fixed timestamp gap between adjacent frames.
+         *
+         * This is used by scenarios like still image capture where timestamps
+         * on frames could go forward or backward. Some encoders may silently
+         * drop frames when it goes backward (or even stay unchanged).
+         */
+        if (mPrevOriginalTimeUs >= 0ll) {
+            timestamp = mPrevModifiedTimeUs - mMaxTimestampGapUs;
+        }
+        ALOGV("IN  timestamp: %lld -> %lld",
+            static_cast<long long>(originalTimeUs),
+            static_cast<long long>(timestamp));
     }
 
     mPrevOriginalTimeUs = originalTimeUs;
     mPrevModifiedTimeUs = timestamp;
 
-    if (mMaxTimestampGapUs > 0ll && !mRestorePtsFailed) {
+    if (mMaxTimestampGapUs != 0ll && !mRestorePtsFailed) {
         mOriginalTimeUs.add(timestamp, originalTimeUs);
     }
 
@@ -1905,7 +1960,7 @@
 void OMXNodeInstance::codecBufferFilled(omx_message &msg) {
     Mutex::Autolock autoLock(mLock);
 
-    if (mMaxTimestampGapUs <= 0ll || mRestorePtsFailed) {
+    if (mMaxTimestampGapUs == 0ll || mRestorePtsFailed) {
         return;
     }
 
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 4946ada..1f3e8c1 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -34,7 +34,12 @@
     const char *mRole;
 
 } kComponents[] = {
+    // two choices for aac decoding.
+    // configurable in media/libstagefright/data/media_codecs_google_audio.xml
+    // default implementation
     { "OMX.google.aac.decoder", "aacdec", "audio_decoder.aac" },
+    // alternate implementation
+    { "OMX.google.xaac.decoder", "xaacdec", "audio_decoder.aac" },
     { "OMX.google.aac.encoder", "aacenc", "audio_encoder.aac" },
     { "OMX.google.amrnb.decoder", "amrdec", "audio_decoder.amrnb" },
     { "OMX.google.amrnb.encoder", "amrnbenc", "audio_encoder.amrnb" },
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index cd0f75c..672a37c 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -154,12 +154,12 @@
     outDef->format.video.nFrameWidth = outputBufferWidth();
     outDef->format.video.nFrameHeight = outputBufferHeight();
     outDef->format.video.eColorFormat = mOutputFormat;
-    outDef->format.video.nStride = outDef->format.video.nFrameWidth;
     outDef->format.video.nSliceHeight = outDef->format.video.nFrameHeight;
 
     int32_t bpp = (mOutputFormat == OMX_COLOR_FormatYUV420Planar16) ? 2 : 1;
+    outDef->format.video.nStride = outDef->format.video.nFrameWidth * bpp;
     outDef->nBufferSize =
-        (outDef->format.video.nStride * outDef->format.video.nSliceHeight * bpp * 3) / 2;
+            (outDef->format.video.nStride * outDef->format.video.nSliceHeight * 3) / 2;
 
     OMX_PARAM_PORTDEFINITIONTYPE *inDef = &editPortInfo(kInputPortIndex)->mDef;
     inDef->format.video.nFrameWidth = mWidth;
@@ -651,7 +651,6 @@
                 return OMX_ErrorBadPortIndex;
             }
 
-            mOutputFormat = OMX_COLOR_FormatYUV420Planar16;
             mHdrStaticInfo = hdrStaticInfoParams->sInfo;
             updatePortDefinitions(false);
 
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
index baa7b81..5a46b26 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
@@ -22,6 +22,8 @@
 
 #include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
 #include <android/hardware/media/omx/1.0/IOmx.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
 
 namespace android {
 
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h b/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
index c436121..a761ef6 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
@@ -288,6 +288,21 @@
 
     bool handleDataSpaceChanged(omx_message &msg);
 
+    /*
+     * Set the max pts gap between frames.
+     *
+     * When the pts gap number is positive, it indicates the maximum pts gap between
+     * two adjacent frames. If two frames are further apart, timestamps will be modified
+     * to meet this requirement before the frames are sent to the encoder.
+     *
+     * When the pts gap number is negative, it indicates that the original timestamp
+     * should always be modified such that all adjacent frames have the same pts gap
+     * equal to the absolute value of the passed in number. This option is typically
+     * used when client wants to make sure all frames are captured even when source
+     * potentially sends out-of-order frames.
+     *
+     * Timestamps will be restored to the original when the output is sent back to the client.
+     */
     status_t setMaxPtsGapUs(const void *params, size_t size);
     int64_t getCodecTimestamp(OMX_TICKS timestamp);
 
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 952b907..f25fc71 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -127,27 +127,45 @@
     mControl.reset();
 }
 
-int MtpFfsHandle::doAsync(void* data, size_t len, bool read) {
-    struct io_event ioevs[1];
-    if (len > AIO_BUF_LEN) {
-        LOG(ERROR) << "Mtp read/write too large " << len;
-        errno = EINVAL;
-        return -1;
+int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
+    struct io_event ioevs[AIO_BUFS_MAX];
+    size_t total = 0;
+
+    while (total < len) {
+        size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
+        int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
+        for (int i = 0; i < num_bufs; i++) {
+            mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
+        }
+        int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
+        if (ret < 0) return -1;
+        ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
+        if (ret < 0) return -1;
+        total += ret;
+        if (static_cast<size_t>(ret) < this_len) break;
     }
-    mIobuf[0].buf[0] = reinterpret_cast<unsigned char*>(data);
-    if (iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, len, read) == -1)
-        return -1;
-    int ret = waitEvents(&mIobuf[0], 1, ioevs, nullptr);
-    mIobuf[0].buf[0] = mIobuf[0].bufs.data();
-    return ret;
+
+    int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
+    if (len % packet_size == 0 && zero_packet) {
+        int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
+        if (ret < 0) return -1;
+        ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
+        if (ret < 0) return -1;
+    }
+
+    for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
+        mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
+    }
+    return total;
 }
 
 int MtpFfsHandle::read(void* data, size_t len) {
-    return doAsync(data, len, true);
+    // Zero packets are handled by receiveFile()
+    return doAsync(data, len, true, false);
 }
 
 int MtpFfsHandle::write(const void* data, size_t len) {
-    return doAsync(const_cast<void*>(data), len, false);
+    return doAsync(const_cast<void*>(data), len, false, true);
 }
 
 int MtpFfsHandle::handleEvent() {
@@ -570,7 +588,8 @@
     if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
                     sizeof(mtp_data_header), init_read_len, offset))
             != init_read_len) return -1;
-    if (write(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len) == -1)
+    if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
+                false, false /* zlps are handled below */) == -1)
         return -1;
     file_length -= init_read_len;
     offset += init_read_len;
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index 24a8bd5..fe343f7 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -48,7 +48,7 @@
     void closeEndpoints();
     void advise(int fd);
     int handleControlRequest(const struct usb_ctrlrequest *request);
-    int doAsync(void* data, size_t len, bool read);
+    int doAsync(void* data, size_t len, bool read, bool zero_packet);
     int handleEvent();
     void cancelTransaction();
     void doSendEvent(mtp_event me);
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 86d59dd..ccddd6e 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -809,7 +809,7 @@
     uint64_t finalsize = sstat.st_size;
     ALOGV("Sent a file over MTP. Time: %f s, Size: %" PRIu64 ", Rate: %f bytes/s",
             diff.count(), finalsize, ((double) finalsize) / diff.count());
-    close(mfr.fd);
+    closeObjFd(mfr.fd, filePath);
     return result;
 }
 
@@ -887,7 +887,7 @@
         else
             result = MTP_RESPONSE_GENERAL_ERROR;
     }
-    close(mfr.fd);
+    closeObjFd(mfr.fd, filePath);
     return result;
 }
 
@@ -1043,7 +1043,7 @@
 
     if (info.mStorageID == storageID) {
         ALOGV("Moving file from %s to %s", (const char*)fromPath, (const char*)path);
-        if (rename(fromPath, path)) {
+        if (renameTo(fromPath, path)) {
             PLOG(ERROR) << "rename() failed from " << fromPath << " to " << path;
             result = MTP_RESPONSE_GENERAL_ERROR;
         }
@@ -1226,7 +1226,7 @@
     }
 
     fstat(mfr.fd, &sstat);
-    close(mfr.fd);
+    closeObjFd(mfr.fd, mSendObjectFilePath);
 
     if (ret < 0) {
         ALOGE("Mtp receive file got error %s", strerror(errno));
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 51cfd7d..8564576 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -36,6 +36,13 @@
 
 constexpr unsigned long FILE_COPY_SIZE = 262144;
 
+static void access_ok(const char *path) {
+    if (access(path, F_OK) == -1) {
+        // Ignore. Failure could be common in cases of delete where
+        // the metadata was updated through other paths.
+    }
+}
+
 /*
 DateTime strings follow a compatible subset of the definition found in ISO 8601, and
 take the form of a Unicode string formatted as: "YYYYMMDDThhmmss.s". In this
@@ -101,6 +108,7 @@
     } else {
         chown((const char *)path, getuid(), FILE_GROUP);
     }
+    access_ok(path);
     return ret;
 }
 
@@ -181,6 +189,7 @@
     LOG(DEBUG) << "Copied a file with MTP. Time: " << diff.count() << " s, Size: " << length <<
         ", Rate: " << ((double) length) / diff.count() << " bytes/s";
     chown(toPath, getuid(), FILE_GROUP);
+    access_ok(toPath);
     return ret == -1 ? -1 : 0;
 }
 
@@ -212,6 +221,7 @@
         } else {
             success = unlink(childPath.c_str());
         }
+        access_ok(childPath.c_str());
         if (success == -1)
             PLOG(ERROR) << "Deleting path " << childPath << " failed";
     }
@@ -236,7 +246,22 @@
     }
     if (success == -1)
         PLOG(ERROR) << "Deleting path " << path << " failed";
+    access_ok(path);
     return success == 0;
 }
 
+int renameTo(const char *oldPath, const char *newPath) {
+    int ret = rename(oldPath, newPath);
+    access_ok(oldPath);
+    access_ok(newPath);
+    return ret;
+}
+
+// Calls access(2) on the path to update underlying filesystems,
+// then closes the fd.
+void closeObjFd(int fd, const char *path) {
+    close(fd);
+    access_ok(path);
+}
+
 }  // namespace android
diff --git a/media/mtp/MtpUtils.h b/media/mtp/MtpUtils.h
index 744546b..21f5df0 100644
--- a/media/mtp/MtpUtils.h
+++ b/media/mtp/MtpUtils.h
@@ -34,7 +34,9 @@
 int copyRecursive(const char *fromPath, const char *toPath);
 int copyFile(const char *fromPath, const char *toPath);
 bool deletePath(const char* path);
+int renameTo(const char *oldPath, const char *newPath);
 
+void closeObjFd(int fd, const char *path);
 }; // namespace android
 
 #endif // _MTP_UTILS_H
diff --git a/media/mtp/tests/MtpFfsHandle_test.cpp b/media/mtp/tests/MtpFfsHandle_test.cpp
index d11fe07..c9c9e62 100644
--- a/media/mtp/tests/MtpFfsHandle_test.cpp
+++ b/media/mtp/tests/MtpFfsHandle_test.cpp
@@ -123,6 +123,21 @@
     EXPECT_STREQ(buf, dummyDataStr.c_str());
 }
 
+TYPED_TEST(MtpFfsHandleTest, testReadLarge) {
+    std::stringstream ss;
+    int size = TEST_PACKET_SIZE * MED_MULT;
+    char buf[size + 1];
+    buf[size] = '\0';
+
+    for (int i = 0; i < MED_MULT; i++)
+        ss << dummyDataStr;
+
+    EXPECT_EQ(write(this->bulk_out, ss.str().c_str(), size), size);
+    EXPECT_EQ(this->handle->read(buf, size), size);
+
+    EXPECT_STREQ(buf, ss.str().c_str());
+}
+
 TYPED_TEST(MtpFfsHandleTest, testWrite) {
     char buf[TEST_PACKET_SIZE + 1];
     buf[TEST_PACKET_SIZE] = '\0';
@@ -131,6 +146,21 @@
     EXPECT_STREQ(buf, dummyDataStr.c_str());
 }
 
+TYPED_TEST(MtpFfsHandleTest, testWriteLarge) {
+    std::stringstream ss;
+    int size = TEST_PACKET_SIZE * MED_MULT;
+    char buf[size + 1];
+    buf[size] = '\0';
+
+    for (int i = 0; i < MED_MULT; i++)
+        ss << dummyDataStr;
+
+    EXPECT_EQ(this->handle->write(ss.str().c_str(), size), size);
+    EXPECT_EQ(read(this->bulk_in, buf, size), size);
+
+    EXPECT_STREQ(buf, ss.str().c_str());
+}
+
 TYPED_TEST(MtpFfsHandleTest, testReceiveFileEmpty) {
     std::stringstream ss;
     mtp_file_range mfr;
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index fe08ab9..6d10f1c 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -224,7 +224,7 @@
 
 static bool findId(AMediaDrm *mObj, const AMediaDrmByteArray &id, List<idvec_t>::iterator &iter) {
     for (iter = mObj->mIds.begin(); iter != mObj->mIds.end(); ++iter) {
-        if (iter->array() == id.ptr && iter->size() == id.length) {
+        if (id.length == iter->size() && memcmp(iter->array(), id.ptr, iter->size()) == 0) {
             return true;
         }
     }
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 19df760..38e12e3 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -516,8 +516,6 @@
     int32_t bottom;
 } AImageCropRect;
 
-#if __ANDROID_API__ >= 24
-
 /**
  * Return the image back the the system and delete the AImage object from memory.
  *
@@ -529,7 +527,7 @@
  *
  * @param image The {@link AImage} to be deleted.
  */
-void AImage_delete(AImage* image);
+void AImage_delete(AImage* image) __INTRODUCED_IN(24);
 
 /**
  * Query the width of the input {@link AImage}.
@@ -543,7 +541,7 @@
  *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
  *                 image has been deleted.</li></ul>
  */
-media_status_t AImage_getWidth(const AImage* image, /*out*/int32_t* width);
+media_status_t AImage_getWidth(const AImage* image, /*out*/int32_t* width) __INTRODUCED_IN(24);
 
 /**
  * Query the height of the input {@link AImage}.
@@ -557,7 +555,7 @@
  *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
  *                 image has been deleted.</li></ul>
  */
-media_status_t AImage_getHeight(const AImage* image, /*out*/int32_t* height);
+media_status_t AImage_getHeight(const AImage* image, /*out*/int32_t* height) __INTRODUCED_IN(24);
 
 /**
  * Query the format of the input {@link AImage}.
@@ -573,7 +571,7 @@
  *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
  *                 image has been deleted.</li></ul>
  */
-media_status_t AImage_getFormat(const AImage* image, /*out*/int32_t* format);
+media_status_t AImage_getFormat(const AImage* image, /*out*/int32_t* format) __INTRODUCED_IN(24);
 
 /**
  * Query the cropped rectangle of the input {@link AImage}.
@@ -590,7 +588,7 @@
  *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
  *                 image has been deleted.</li></ul>
  */
-media_status_t AImage_getCropRect(const AImage* image, /*out*/AImageCropRect* rect);
+media_status_t AImage_getCropRect(const AImage* image, /*out*/AImageCropRect* rect) __INTRODUCED_IN(24);
 
 /**
  * Query the timestamp of the input {@link AImage}.
@@ -614,7 +612,7 @@
  *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
  *                 image has been deleted.</li></ul>
  */
-media_status_t AImage_getTimestamp(const AImage* image, /*out*/int64_t* timestampNs);
+media_status_t AImage_getTimestamp(const AImage* image, /*out*/int64_t* timestampNs) __INTRODUCED_IN(24);
 
 /**
  * Query the number of planes of the input {@link AImage}.
@@ -632,7 +630,7 @@
  *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
  *                 image has been deleted.</li></ul>
  */
-media_status_t AImage_getNumberOfPlanes(const AImage* image, /*out*/int32_t* numPlanes);
+media_status_t AImage_getNumberOfPlanes(const AImage* image, /*out*/int32_t* numPlanes) __INTRODUCED_IN(24);
 
 /**
  * Query the pixel stride of the input {@link AImage}.
@@ -660,7 +658,7 @@
  *                 for CPU access.</li></ul>
  */
 media_status_t AImage_getPlanePixelStride(
-        const AImage* image, int planeIdx, /*out*/int32_t* pixelStride);
+        const AImage* image, int planeIdx, /*out*/int32_t* pixelStride) __INTRODUCED_IN(24);
 
 /**
  * Query the row stride of the input {@link AImage}.
@@ -687,7 +685,7 @@
  *                 for CPU access.</li></ul>
  */
 media_status_t AImage_getPlaneRowStride(
-        const AImage* image, int planeIdx, /*out*/int32_t* rowStride);
+        const AImage* image, int planeIdx, /*out*/int32_t* rowStride) __INTRODUCED_IN(24);
 
 /**
  * Get the data pointer of the input image for direct application access.
@@ -712,11 +710,7 @@
  */
 media_status_t AImage_getPlaneData(
         const AImage* image, int planeIdx,
-        /*out*/uint8_t** data, /*out*/int* dataLength);
-
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 26
+        /*out*/uint8_t** data, /*out*/int* dataLength) __INTRODUCED_IN(24);
 
 /**
  * Return the image back the the system and delete the AImage object from memory asynchronously.
@@ -732,7 +726,7 @@
  *
  * @see sync.h
  */
-void AImage_deleteAsync(AImage* image, int releaseFenceFd);
+void AImage_deleteAsync(AImage* image, int releaseFenceFd) __INTRODUCED_IN(26);
 
 /**
  * Get the hardware buffer handle of the input image intended for GPU and/or hardware access.
@@ -760,9 +754,7 @@
  *
  * @see AImageReader_ImageCallback
  */
-media_status_t AImage_getHardwareBuffer(const AImage* image, /*out*/AHardwareBuffer** buffer);
-
-#endif /* __ANDROID_API__ >= 26 */
+media_status_t AImage_getHardwareBuffer(const AImage* image, /*out*/AHardwareBuffer** buffer) __INTRODUCED_IN(26);
 
 __END_DECLS
 
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index 571410b..eb1a44a 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -50,8 +50,6 @@
  */
 typedef struct AImageReader AImageReader;
 
-#if __ANDROID_API__ >= 24
-
 /**
  * Create a new reader for images of the desired size and format.
  *
@@ -88,7 +86,7 @@
  */
 media_status_t AImageReader_new(
         int32_t width, int32_t height, int32_t format, int32_t maxImages,
-        /*out*/AImageReader** reader);
+        /*out*/AImageReader** reader) __INTRODUCED_IN(24);
 
 /**
  * Delete an {@link AImageReader} and return all images generated by this reader to system.
@@ -100,7 +98,7 @@
  *
  * @param reader The image reader to be deleted.
  */
-void AImageReader_delete(AImageReader* reader);
+void AImageReader_delete(AImageReader* reader) __INTRODUCED_IN(24);
 
 /**
  * Get a {@link ANativeWindow} that can be used to produce {@link AImage} for this image reader.
@@ -114,7 +112,7 @@
  *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
  *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or window is NULL.</li></ul>
  */
-media_status_t AImageReader_getWindow(AImageReader* reader, /*out*/ANativeWindow** window);
+media_status_t AImageReader_getWindow(AImageReader* reader, /*out*/ANativeWindow** window) __INTRODUCED_IN(24);
 
 /**
  * Query the default width of the {@link AImage} generated by this reader, in pixels.
@@ -130,7 +128,7 @@
  *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
  *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or width is NULL.</li></ul>
  */
-media_status_t AImageReader_getWidth(const AImageReader* reader, /*out*/int32_t* width);
+media_status_t AImageReader_getWidth(const AImageReader* reader, /*out*/int32_t* width) __INTRODUCED_IN(24);
 
 /**
  * Query the default height of the {@link AImage} generated by this reader, in pixels.
@@ -146,7 +144,7 @@
  *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
  *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or height is NULL.</li></ul>
  */
-media_status_t AImageReader_getHeight(const AImageReader* reader, /*out*/int32_t* height);
+media_status_t AImageReader_getHeight(const AImageReader* reader, /*out*/int32_t* height) __INTRODUCED_IN(24);
 
 /**
  * Query the format of the {@link AImage} generated by this reader.
@@ -159,7 +157,7 @@
  *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
  *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or format is NULL.</li></ul>
  */
-media_status_t AImageReader_getFormat(const AImageReader* reader, /*out*/int32_t* format);
+media_status_t AImageReader_getFormat(const AImageReader* reader, /*out*/int32_t* format) __INTRODUCED_IN(24);
 
 /**
  * Query the maximum number of concurrently acquired {@link AImage}s of this reader.
@@ -172,7 +170,7 @@
  *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
  *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or maxImages is NULL.</li></ul>
  */
-media_status_t AImageReader_getMaxImages(const AImageReader* reader, /*out*/int32_t* maxImages);
+media_status_t AImageReader_getMaxImages(const AImageReader* reader, /*out*/int32_t* maxImages) __INTRODUCED_IN(24);
 
 /**
  * Acquire the next {@link AImage} from the image reader's queue.
@@ -208,7 +206,7 @@
  *
  * @see AImageReader_acquireLatestImage
  */
-media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image);
+media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image) __INTRODUCED_IN(24);
 
 /**
 
@@ -252,7 +250,7 @@
  *
  * @see AImageReader_acquireNextImage
  */
-media_status_t AImageReader_acquireLatestImage(AImageReader* reader, /*out*/AImage** image);
+media_status_t AImageReader_acquireLatestImage(AImageReader* reader, /*out*/AImage** image) __INTRODUCED_IN(24);
 
 
 /**
@@ -296,11 +294,7 @@
  *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader is NULL.</li></ul>
  */
 media_status_t AImageReader_setImageListener(
-        AImageReader* reader, AImageReader_ImageListener* listener);
-
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 26
+        AImageReader* reader, AImageReader_ImageListener* listener) __INTRODUCED_IN(24);
 
 /**
  * AImageReader constructor similar to {@link AImageReader_new} that takes an additional parameter
@@ -365,7 +359,7 @@
  */
 media_status_t AImageReader_newWithUsage(
         int32_t width, int32_t height, int32_t format, uint64_t usage, int32_t maxImages,
-        /*out*/ AImageReader** reader);
+        /*out*/ AImageReader** reader) __INTRODUCED_IN(26);
 
 /**
  * Acquire the next {@link AImage} from the image reader's queue asynchronously.
@@ -384,7 +378,7 @@
  * @see sync_get_fence_info
  */
 media_status_t AImageReader_acquireNextImageAsync(
-        AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd);
+        AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd) __INTRODUCED_IN(26);
 
 /**
  * Acquire the latest {@link AImage} from the image reader's queue asynchronously, dropping older
@@ -404,7 +398,7 @@
  * @see sync_get_fence_info
  */
 media_status_t AImageReader_acquireLatestImageAsync(
-        AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd);
+        AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd) __INTRODUCED_IN(26);
 /**
  * Signature of the callback which is called when {@link AImageReader} is about to remove a buffer.
  *
@@ -459,9 +453,7 @@
  * @see AImage_getHardwareBuffer
  */
 media_status_t AImageReader_setBufferRemovedListener(
-        AImageReader* reader, AImageReader_BufferRemovedListener* listener);
-
-#endif /* __ANDROID_API__ >= 26 */
+        AImageReader* reader, AImageReader_BufferRemovedListener* listener) __INTRODUCED_IN(26);
 
 __END_DECLS
 
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index f4a51d0..b329b39 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -14,6 +14,15 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Media
+ * @{
+ */
+
+/**
+ * @file NdkMediaCodec.h
+ */
+
 /*
  * This file defines an NDK API.
  * Do not remove methods.
@@ -112,30 +121,28 @@
       AMediaCodecOnAsyncError           onAsyncError;
 };
 
-#if __ANDROID_API__ >= 21
-
 /**
  * Create codec by name. Use this if you know the exact codec you want to use.
  * When configuring, you will need to specify whether to use the codec as an
  * encoder or decoder.
  */
-AMediaCodec* AMediaCodec_createCodecByName(const char *name);
+AMediaCodec* AMediaCodec_createCodecByName(const char *name) __INTRODUCED_IN(21);
 
 /**
  * Create codec by mime type. Most applications will use this, specifying a
  * mime type obtained from media extractor.
  */
-AMediaCodec* AMediaCodec_createDecoderByType(const char *mime_type);
+AMediaCodec* AMediaCodec_createDecoderByType(const char *mime_type) __INTRODUCED_IN(21);
 
 /**
  * Create encoder by name.
  */
-AMediaCodec* AMediaCodec_createEncoderByType(const char *mime_type);
+AMediaCodec* AMediaCodec_createEncoderByType(const char *mime_type) __INTRODUCED_IN(21);
 
 /**
  * delete the codec and free its resources
  */
-media_status_t AMediaCodec_delete(AMediaCodec*);
+media_status_t AMediaCodec_delete(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * Configure the codec. For decoding you would typically get the format from an extractor.
@@ -145,43 +152,43 @@
         const AMediaFormat* format,
         ANativeWindow* surface,
         AMediaCrypto *crypto,
-        uint32_t flags);
+        uint32_t flags) __INTRODUCED_IN(21);
 
 /**
  * Start the codec. A codec must be configured before it can be started, and must be started
  * before buffers can be sent to it.
  */
-media_status_t AMediaCodec_start(AMediaCodec*);
+media_status_t AMediaCodec_start(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * Stop the codec.
  */
-media_status_t AMediaCodec_stop(AMediaCodec*);
+media_status_t AMediaCodec_stop(AMediaCodec*) __INTRODUCED_IN(21);
 
 /*
  * Flush the codec's input and output. All indices previously returned from calls to
  * AMediaCodec_dequeueInputBuffer and AMediaCodec_dequeueOutputBuffer become invalid.
  */
-media_status_t AMediaCodec_flush(AMediaCodec*);
+media_status_t AMediaCodec_flush(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * Get an input buffer. The specified buffer index must have been previously obtained from
  * dequeueInputBuffer, and not yet queued.
  */
-uint8_t* AMediaCodec_getInputBuffer(AMediaCodec*, size_t idx, size_t *out_size);
+uint8_t* AMediaCodec_getInputBuffer(AMediaCodec*, size_t idx, size_t *out_size) __INTRODUCED_IN(21);
 
 /**
  * Get an output buffer. The specified buffer index must have been previously obtained from
  * dequeueOutputBuffer, and not yet queued.
  */
-uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec*, size_t idx, size_t *out_size);
+uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec*, size_t idx, size_t *out_size) __INTRODUCED_IN(21);
 
 /**
  * Get the index of the next available input buffer. An app will typically use this with
  * getInputBuffer() to get a pointer to the buffer, then copy the data to be encoded or decoded
  * into the buffer before passing it to the codec.
  */
-ssize_t AMediaCodec_dequeueInputBuffer(AMediaCodec*, int64_t timeoutUs);
+ssize_t AMediaCodec_dequeueInputBuffer(AMediaCodec*, int64_t timeoutUs) __INTRODUCED_IN(21);
 
 /*
  * __USE_FILE_OFFSET64 changes the type of off_t in LP32, which changes the ABI
@@ -212,7 +219,7 @@
  */
 media_status_t AMediaCodec_queueInputBuffer(AMediaCodec*, size_t idx,
                                             _off_t_compat offset, size_t size,
-                                            uint64_t time, uint32_t flags);
+                                            uint64_t time, uint32_t flags) __INTRODUCED_IN(21);
 
 /**
  * Send the specified buffer to the codec for processing.
@@ -220,7 +227,7 @@
 media_status_t AMediaCodec_queueSecureInputBuffer(AMediaCodec*, size_t idx,
                                                   _off_t_compat offset,
                                                   AMediaCodecCryptoInfo*,
-                                                  uint64_t time, uint32_t flags);
+                                                  uint64_t time, uint32_t flags) __INTRODUCED_IN(21);
 
 #undef _off_t_compat
 
@@ -228,21 +235,21 @@
  * Get the index of the next available buffer of processed data.
  */
 ssize_t AMediaCodec_dequeueOutputBuffer(AMediaCodec*, AMediaCodecBufferInfo *info,
-        int64_t timeoutUs);
-AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*);
+        int64_t timeoutUs) __INTRODUCED_IN(21);
+AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*) __INTRODUCED_IN(21);
 
 /**
  * Get format of the buffer. The specified buffer index must have been previously obtained from
  * dequeueOutputBuffer.
  */
-AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index);
+AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index) __INTRODUCED_IN(21);
 
 /**
  * If you are done with a buffer, use this call to return the buffer to
  * the codec. If you previously specified a surface when configuring this
  * video decoder you can optionally render the buffer.
  */
-media_status_t AMediaCodec_releaseOutputBuffer(AMediaCodec*, size_t idx, bool render);
+media_status_t AMediaCodec_releaseOutputBuffer(AMediaCodec*, size_t idx, bool render) __INTRODUCED_IN(21);
 
 /**
  * Dynamically sets the output surface of a codec.
@@ -254,7 +261,7 @@
  *
  * For more details, see the Java documentation for MediaCodec.setOutputSurface.
  */
-media_status_t AMediaCodec_setOutputSurface(AMediaCodec*, ANativeWindow* surface);
+media_status_t AMediaCodec_setOutputSurface(AMediaCodec*, ANativeWindow* surface) __INTRODUCED_IN(21);
 
 /**
  * If you are done with a buffer, use this call to update its surface timestamp
@@ -265,9 +272,7 @@
  * For more details, see the Java documentation for MediaCodec.releaseOutputBuffer.
  */
 media_status_t AMediaCodec_releaseOutputBufferAtTime(
-        AMediaCodec *mData, size_t idx, int64_t timestampNs);
-
-#if __ANDROID_API__ >= 26
+        AMediaCodec *mData, size_t idx, int64_t timestampNs) __INTRODUCED_IN(21);
 
 /**
  * Creates a Surface that can be used as the input to encoder, in place of input buffers
@@ -281,7 +286,7 @@
  * For more details, see the Java documentation for MediaCodec.createInputSurface.
  */
 media_status_t AMediaCodec_createInputSurface(
-        AMediaCodec *mData, ANativeWindow **surface);
+        AMediaCodec *mData, ANativeWindow **surface) __INTRODUCED_IN(26);
 
 /**
  * Creates a persistent Surface that can be used as the input to encoder
@@ -297,7 +302,7 @@
  * For more details, see the Java documentation for MediaCodec.createPersistentInputSurface.
  */
 media_status_t AMediaCodec_createPersistentInputSurface(
-        ANativeWindow **surface);
+        ANativeWindow **surface) __INTRODUCED_IN(26);
 
 /**
  * Set a persistent-surface that can be used as the input to encoder, in place of input buffers
@@ -310,7 +315,7 @@
  * For more details, see the Java documentation for MediaCodec.setInputSurface.
  */
 media_status_t AMediaCodec_setInputSurface(
-        AMediaCodec *mData, ANativeWindow *surface);
+        AMediaCodec *mData, ANativeWindow *surface) __INTRODUCED_IN(26);
 
 /**
  * Signal additional parameters to the codec instance.
@@ -321,7 +326,7 @@
  * NOTE: Some of these parameter changes may silently fail to apply.
  */
 media_status_t AMediaCodec_setParameters(
-        AMediaCodec *mData, const AMediaFormat* params);
+        AMediaCodec *mData, const AMediaFormat* params) __INTRODUCED_IN(26);
 
 /**
  * Signals end-of-stream on input. Equivalent to submitting an empty buffer with
@@ -337,23 +342,19 @@
  *
  * For more details, see the Java documentation for MediaCodec.signalEndOfInputStream.
  */
-media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData);
-
-#endif /* __ANDROID_API__ >= 26 */
-
-#if __ANDROID_API__ >= 28
+media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData) __INTRODUCED_IN(26);
 
 /**
  * Get the component name. If the codec was created by createDecoderByType
  * or createEncoderByType, what component is chosen is not known beforehand.
  * Caller shall call AMediaCodec_releaseName to free the returned pointer.
  */
-media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name);
+media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name) __INTRODUCED_IN(28);
 
 /**
  * Free the memory pointed by name which is returned by AMediaCodec_getName.
  */
-void AMediaCodec_releaseName(AMediaCodec*, char* name);
+void AMediaCodec_releaseName(AMediaCodec*, char* name) __INTRODUCED_IN(28);
 
 /**
  * Set an asynchronous callback for actionable AMediaCodec events.
@@ -377,34 +378,32 @@
 media_status_t AMediaCodec_setAsyncNotifyCallback(
         AMediaCodec*,
         AMediaCodecOnAsyncNotifyCallback callback,
-        void *userdata);
+        void *userdata) __INTRODUCED_IN(28);
 
 /**
  * Release the crypto if applicable.
  */
-media_status_t AMediaCodec_releaseCrypto(AMediaCodec*);
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec*) __INTRODUCED_IN(28);
 
 /**
  * Call this after AMediaCodec_configure() returns successfully to get the input
  * format accepted by the codec. Do this to determine what optional configuration
  * parameters were supported by the codec.
  */
-AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*);
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*) __INTRODUCED_IN(28);
 
 /**
  * Returns true if the codec cannot proceed further, but can be recovered by stopping,
  * configuring, and starting again.
  */
-bool AMediaCodecActionCode_isRecoverable(int32_t actionCode);
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) __INTRODUCED_IN(28);
 
 /**
  * Returns true if the codec error is a transient issue, perhaps due to
  * resource constraints, and that the method (or encoding/decoding) may be
  * retried at a later time.
  */
-bool AMediaCodecActionCode_isTransient(int32_t actionCode);
-
-#endif /* __ANDROID_API__ >= 28 */
+bool AMediaCodecActionCode_isTransient(int32_t actionCode) __INTRODUCED_IN(28);
 
 typedef enum {
     AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
@@ -438,54 +437,54 @@
         uint8_t iv[16],
         cryptoinfo_mode_t mode,
         size_t *clearbytes,
-        size_t *encryptedbytes);
+        size_t *encryptedbytes) __INTRODUCED_IN(21);
 
 /**
  * delete an AMediaCodecCryptoInfo created previously with AMediaCodecCryptoInfo_new, or
  * obtained from AMediaExtractor
  */
-media_status_t AMediaCodecCryptoInfo_delete(AMediaCodecCryptoInfo*);
+media_status_t AMediaCodecCryptoInfo_delete(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
 
 /**
  * Set the crypto pattern on an AMediaCryptoInfo object
  */
 void AMediaCodecCryptoInfo_setPattern(
         AMediaCodecCryptoInfo *info,
-        cryptoinfo_pattern_t *pattern);
+        cryptoinfo_pattern_t *pattern) __INTRODUCED_IN(21);
 
 /**
  * The number of subsamples that make up the buffer's contents.
  */
-size_t AMediaCodecCryptoInfo_getNumSubSamples(AMediaCodecCryptoInfo*);
+size_t AMediaCodecCryptoInfo_getNumSubSamples(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
 
 /**
  * A 16-byte opaque key
  */
-media_status_t AMediaCodecCryptoInfo_getKey(AMediaCodecCryptoInfo*, uint8_t *dst);
+media_status_t AMediaCodecCryptoInfo_getKey(AMediaCodecCryptoInfo*, uint8_t *dst) __INTRODUCED_IN(21);
 
 /**
  * A 16-byte initialization vector
  */
-media_status_t AMediaCodecCryptoInfo_getIV(AMediaCodecCryptoInfo*, uint8_t *dst);
+media_status_t AMediaCodecCryptoInfo_getIV(AMediaCodecCryptoInfo*, uint8_t *dst) __INTRODUCED_IN(21);
 
 /**
  * The type of encryption that has been applied,
  * one of AMEDIACODECRYPTOINFO_MODE_CLEAR or AMEDIACODECRYPTOINFO_MODE_AES_CTR.
  */
-cryptoinfo_mode_t AMediaCodecCryptoInfo_getMode(AMediaCodecCryptoInfo*);
+cryptoinfo_mode_t AMediaCodecCryptoInfo_getMode(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
 
 /**
  * The number of leading unencrypted bytes in each subsample.
  */
-media_status_t AMediaCodecCryptoInfo_getClearBytes(AMediaCodecCryptoInfo*, size_t *dst);
+media_status_t AMediaCodecCryptoInfo_getClearBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
 
 /**
  * The number of trailing encrypted bytes in each subsample.
  */
-media_status_t AMediaCodecCryptoInfo_getEncryptedBytes(AMediaCodecCryptoInfo*, size_t *dst);
-
-#endif /* __ANDROID_API__ >= 21 */
+media_status_t AMediaCodecCryptoInfo_getEncryptedBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
 
 __END_DECLS
 
 #endif //_NDK_MEDIA_CODEC_H
+
+/** @} */
diff --git a/media/ndk/include/media/NdkMediaCrypto.h b/media/ndk/include/media/NdkMediaCrypto.h
index d31dbdc..b673adc 100644
--- a/media/ndk/include/media/NdkMediaCrypto.h
+++ b/media/ndk/include/media/NdkMediaCrypto.h
@@ -14,6 +14,14 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Media
+ * @{
+ */
+
+/**
+ * @file NdkMediaCrypto.h
+ */
 
 /*
  * This file defines an NDK API.
@@ -39,18 +47,16 @@
 
 typedef uint8_t AMediaUUID[16];
 
-#if __ANDROID_API__ >= 21
+bool AMediaCrypto_isCryptoSchemeSupported(const AMediaUUID uuid) __INTRODUCED_IN(21);
 
-bool AMediaCrypto_isCryptoSchemeSupported(const AMediaUUID uuid);
+bool AMediaCrypto_requiresSecureDecoderComponent(const char *mime) __INTRODUCED_IN(21);
 
-bool AMediaCrypto_requiresSecureDecoderComponent(const char *mime);
+AMediaCrypto* AMediaCrypto_new(const AMediaUUID uuid, const void *initData, size_t initDataSize) __INTRODUCED_IN(21);
 
-AMediaCrypto* AMediaCrypto_new(const AMediaUUID uuid, const void *initData, size_t initDataSize);
-
-void AMediaCrypto_delete(AMediaCrypto* crypto);
-
-#endif /* __ANDROID_API__ >= 21 */
+void AMediaCrypto_delete(AMediaCrypto* crypto) __INTRODUCED_IN(21);
 
 __END_DECLS
 
 #endif // _NDK_MEDIA_CRYPTO_H
+
+/** @} */
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 9e2e351..3a4373c 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -38,8 +38,6 @@
 struct AMediaDataSource;
 typedef struct AMediaDataSource AMediaDataSource;
 
-#if __ANDROID_API__ >= 28
-
 /*
  * AMediaDataSource's callbacks will be invoked on an implementation-defined thread
  * or thread pool. No guarantees are provided about which thread(s) will be used for
@@ -84,19 +82,19 @@
  * Create new media data source. Returns NULL if memory allocation
  * for the new data source object fails.
  */
-AMediaDataSource* AMediaDataSource_new();
+AMediaDataSource* AMediaDataSource_new() __INTRODUCED_IN(28);
 
 /**
  * Delete a previously created media data source.
  */
-void AMediaDataSource_delete(AMediaDataSource*);
+void AMediaDataSource_delete(AMediaDataSource*) __INTRODUCED_IN(28);
 
 /**
  * Set an user provided opaque handle. This opaque handle is passed as
  * the first argument to the data source callbacks.
  */
 void AMediaDataSource_setUserdata(
-        AMediaDataSource*, void *userdata);
+        AMediaDataSource*, void *userdata) __INTRODUCED_IN(28);
 
 /**
  * Set a custom callback for supplying random access media data to the
@@ -111,7 +109,7 @@
  */
 void AMediaDataSource_setReadAt(
         AMediaDataSource*,
-        AMediaDataSourceReadAt);
+        AMediaDataSourceReadAt) __INTRODUCED_IN(28);
 
 /**
  * Set a custom callback for supplying the size of the data source to the
@@ -122,7 +120,7 @@
  */
 void AMediaDataSource_setGetSize(
         AMediaDataSource*,
-        AMediaDataSourceGetSize);
+        AMediaDataSourceGetSize) __INTRODUCED_IN(28);
 
 /**
  * Set a custom callback to receive signal from the NDK media framework
@@ -133,9 +131,7 @@
  */
 void AMediaDataSource_setClose(
         AMediaDataSource*,
-        AMediaDataSourceClose);
-
-#endif  /*__ANDROID_API__ >= 28 */
+        AMediaDataSourceClose) __INTRODUCED_IN(28);
 
 __END_DECLS
 
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 9e9f4c3..24c0d6d 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -14,6 +14,15 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Media
+ * @{
+ */
+
+/**
+ * @file NdkMediaDrm.h
+ */
+
 /*
  * This file defines an NDK API.
  * Do not remove methods.
@@ -78,8 +87,6 @@
 typedef void (*AMediaDrmEventListener)(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         AMediaDrmEventType eventType, int extra, const uint8_t *data, size_t dataSize);
 
-#if __ANDROID_API__ >= 21
-
 /**
  * Query if the given scheme identified by its UUID is supported on this device, and
  * whether the drm plugin is able to handle the media container format specified by mimeType.
@@ -88,25 +95,27 @@
  * mimeType is the MIME type of the media container, e.g. "video/mp4".  If mimeType
  * is not known or required, it can be provided as NULL.
  */
-bool AMediaDrm_isCryptoSchemeSupported(const uint8_t *uuid, const char *mimeType);
+bool AMediaDrm_isCryptoSchemeSupported(const uint8_t *uuid,
+        const char *mimeType) __INTRODUCED_IN(21);
 
 /**
  * Create a MediaDrm instance from a UUID
  * uuid identifies the universal unique ID of the crypto scheme. uuid must be 16 bytes.
  */
-AMediaDrm* AMediaDrm_createByUUID(const uint8_t *uuid);
+AMediaDrm* AMediaDrm_createByUUID(const uint8_t *uuid) __INTRODUCED_IN(21);
 
 /**
  * Release a MediaDrm object
  */
-void AMediaDrm_release(AMediaDrm *);
+void AMediaDrm_release(AMediaDrm *) __INTRODUCED_IN(21);
 
 /**
  * Register a callback to be invoked when an event occurs
  *
  * listener is the callback that will be invoked on event
  */
-media_status_t AMediaDrm_setOnEventListener(AMediaDrm *, AMediaDrmEventListener listener);
+media_status_t AMediaDrm_setOnEventListener(AMediaDrm *,
+        AMediaDrmEventListener listener) __INTRODUCED_IN(21);
 
 /**
  * Open a new session with the MediaDrm object.  A session ID is returned.
@@ -114,13 +123,15 @@
  * returns MEDIADRM_NOT_PROVISIONED_ERROR if provisioning is needed
  * returns MEDIADRM_RESOURCE_BUSY_ERROR if required resources are in use
  */
-media_status_t AMediaDrm_openSession(AMediaDrm *, AMediaDrmSessionId *sessionId);
+media_status_t AMediaDrm_openSession(AMediaDrm *,
+        AMediaDrmSessionId *sessionId) __INTRODUCED_IN(21);
 
 /**
  * Close a session on the MediaDrm object that was previously opened
  * with AMediaDrm_openSession.
  */
-media_status_t AMediaDrm_closeSession(AMediaDrm *, const AMediaDrmSessionId *sessionId);
+media_status_t AMediaDrm_closeSession(AMediaDrm *,
+        const AMediaDrmSessionId *sessionId) __INTRODUCED_IN(21);
 
 typedef enum AMediaDrmKeyType {
     /**
@@ -199,7 +210,7 @@
 media_status_t AMediaDrm_getKeyRequest(AMediaDrm *, const AMediaDrmScope *scope,
         const uint8_t *init, size_t initSize, const char *mimeType, AMediaDrmKeyType keyType,
         const AMediaDrmKeyValue *optionalParameters, size_t numOptionalParameters,
-        const uint8_t **keyRequest, size_t *keyRequestSize);
+        const uint8_t **keyRequest, size_t *keyRequestSize) __INTRODUCED_IN(21);
 
 /**
  * A key response is received from the license server by the app, then it is
@@ -219,7 +230,8 @@
  */
 
 media_status_t AMediaDrm_provideKeyResponse(AMediaDrm *, const AMediaDrmScope *scope,
-        const uint8_t *response, size_t responseSize, AMediaDrmKeySetId *keySetId);
+        const uint8_t *response, size_t responseSize,
+        AMediaDrmKeySetId *keySetId) __INTRODUCED_IN(21);
 
 /**
  * Restore persisted offline keys into a new session.  keySetId identifies the
@@ -229,14 +241,15 @@
  * keySetId identifies the saved key set to restore
  */
 media_status_t AMediaDrm_restoreKeys(AMediaDrm *, const AMediaDrmSessionId *sessionId,
-        const AMediaDrmKeySetId *keySetId);
+        const AMediaDrmKeySetId *keySetId) __INTRODUCED_IN(21);
 
 /**
  * Remove the current keys from a session.
  *
  * keySetId identifies keys to remove
  */
-media_status_t AMediaDrm_removeKeys(AMediaDrm *, const AMediaDrmSessionId *keySetId);
+media_status_t AMediaDrm_removeKeys(AMediaDrm *,
+        const AMediaDrmSessionId *keySetId) __INTRODUCED_IN(21);
 
 /**
  * Request an informative description of the key status for the session.  The status is
@@ -252,7 +265,7 @@
  * and numPairs will be set to the number of pairs available.
  */
 media_status_t AMediaDrm_queryKeyStatus(AMediaDrm *, const AMediaDrmSessionId *sessionId,
-        AMediaDrmKeyValue *keyValuePairs, size_t *numPairs);
+        AMediaDrmKeyValue *keyValuePairs, size_t *numPairs) __INTRODUCED_IN(21);
 
 
 /**
@@ -271,7 +284,7 @@
  *       the next call to getProvisionRequest.
  */
 media_status_t AMediaDrm_getProvisionRequest(AMediaDrm *, const uint8_t **provisionRequest,
-        size_t *provisionRequestSize, const char **serverUrl);
+        size_t *provisionRequestSize, const char **serverUrl) __INTRODUCED_IN(21);
 
 
 /**
@@ -286,7 +299,7 @@
  * server rejected the request
  */
 media_status_t AMediaDrm_provideProvisionResponse(AMediaDrm *,
-        const uint8_t *response, size_t responseSize);
+        const uint8_t *response, size_t responseSize) __INTRODUCED_IN(21);
 
 
 /**
@@ -311,7 +324,7 @@
  * number required.
  */
 media_status_t AMediaDrm_getSecureStops(AMediaDrm *,
-        AMediaDrmSecureStop *secureStops, size_t *numSecureStops);
+        AMediaDrmSecureStop *secureStops, size_t *numSecureStops) __INTRODUCED_IN(21);
 
 /**
  * Process the SecureStop server response message ssRelease.  After authenticating
@@ -320,7 +333,7 @@
  * ssRelease is the server response indicating which secure stops to release
  */
 media_status_t AMediaDrm_releaseSecureStops(AMediaDrm *,
-        const AMediaDrmSecureStop *ssRelease);
+        const AMediaDrmSecureStop *ssRelease) __INTRODUCED_IN(21);
 
 /**
  * String property name: identifies the maker of the DRM engine plugin
@@ -353,7 +366,7 @@
  * will remain valid until the next call to AMediaDrm_getPropertyString.
  */
 media_status_t AMediaDrm_getPropertyString(AMediaDrm *, const char *propertyName,
-        const char **propertyValue);
+        const char **propertyValue) __INTRODUCED_IN(21);
 
 /**
  * Byte array property name: the device unique identifier is established during
@@ -368,19 +381,19 @@
  * will remain valid until the next call to AMediaDrm_getPropertyByteArray.
  */
 media_status_t AMediaDrm_getPropertyByteArray(AMediaDrm *, const char *propertyName,
-        AMediaDrmByteArray *propertyValue);
+        AMediaDrmByteArray *propertyValue) __INTRODUCED_IN(21);
 
 /**
  * Set a DRM engine plugin String property value.
  */
 media_status_t AMediaDrm_setPropertyString(AMediaDrm *, const char *propertyName,
-        const char *value);
+        const char *value) __INTRODUCED_IN(21);
 
 /**
  * Set a DRM engine plugin byte array property value.
  */
 media_status_t AMediaDrm_setPropertyByteArray(AMediaDrm *, const char *propertyName,
-        const uint8_t *value, size_t valueSize);
+        const uint8_t *value, size_t valueSize) __INTRODUCED_IN(21);
 
 /**
  * In addition to supporting decryption of DASH Common Encrypted Media, the
@@ -409,7 +422,7 @@
  */
 media_status_t AMediaDrm_encrypt(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *cipherAlgorithm, uint8_t *keyId, uint8_t *iv,
-        const uint8_t *input, uint8_t *output, size_t dataSize);
+        const uint8_t *input, uint8_t *output, size_t dataSize) __INTRODUCED_IN(21);
 
 /*
  * Decrypt the data referenced by input of length dataSize using algorithm specified
@@ -420,7 +433,7 @@
  */
 media_status_t AMediaDrm_decrypt(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *cipherAlgorithm, uint8_t *keyId, uint8_t *iv,
-        const uint8_t *input, uint8_t *output, size_t dataSize);
+        const uint8_t *input, uint8_t *output, size_t dataSize) __INTRODUCED_IN(21);
 
 /*
  * Generate a signature using the specified macAlgorithm over the message data
@@ -433,7 +446,7 @@
  */
 media_status_t AMediaDrm_sign(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *macAlgorithm, uint8_t *keyId, uint8_t *message, size_t messageSize,
-        uint8_t *signature, size_t *signatureSize);
+        uint8_t *signature, size_t *signatureSize) __INTRODUCED_IN(21);
 
 /*
  * Perform a signature verification using the specified macAlgorithm over the message
@@ -444,10 +457,10 @@
  */
 media_status_t AMediaDrm_verify(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         const char *macAlgorithm, uint8_t *keyId, const uint8_t *message, size_t messageSize,
-        const uint8_t *signature, size_t signatureSize);
-
-#endif /* __ANDROID_API__ >= 21 */
+        const uint8_t *signature, size_t signatureSize) __INTRODUCED_IN(21);
 
 __END_DECLS
 
 #endif //_NDK_MEDIA_DRM_H
+
+/** @} */
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index 13aacc9..75f4605 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -14,6 +14,14 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Media
+ * @{
+ */
+
+/**
+ * @file NdkMediaError.h
+ */
 
 /*
  * This file defines an NDK API.
@@ -79,3 +87,5 @@
 __END_DECLS
 
 #endif // _NDK_MEDIA_ERROR_H
+
+/** @} */
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index 1d295e4..9f60891 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -14,6 +14,14 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Media
+ * @{
+ */
+
+/**
+ * @file NdkMediaExtractor.h
+ */
 
 /*
  * This file defines an NDK API.
@@ -41,48 +49,44 @@
 struct AMediaExtractor;
 typedef struct AMediaExtractor AMediaExtractor;
 
-#if __ANDROID_API__ >= 21
-
 /**
  * Create new media extractor
  */
-AMediaExtractor* AMediaExtractor_new();
+AMediaExtractor* AMediaExtractor_new() __INTRODUCED_IN(21);
 
 /**
  * Delete a previously created media extractor
  */
-media_status_t AMediaExtractor_delete(AMediaExtractor*);
+media_status_t AMediaExtractor_delete(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  *  Set the file descriptor from which the extractor will read.
  */
 media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor*, int fd, off64_t offset,
-        off64_t length);
+        off64_t length) __INTRODUCED_IN(21);
 
 /**
  * Set the URI from which the extractor will read.
  */
-media_status_t AMediaExtractor_setDataSource(AMediaExtractor*, const char *location);
+media_status_t AMediaExtractor_setDataSource(AMediaExtractor*,
+        const char *location) __INTRODUCED_IN(21);
         // TODO support headers
 
-#if __ANDROID_API__ >= 28
-
 /**
  * Set the custom data source implementation from which the extractor will read.
  */
-media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*, AMediaDataSource *src);
-
-#endif /* __ANDROID_API__ >= 28 */
+media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*,
+        AMediaDataSource *src) __INTRODUCED_IN(28);
 
 /**
  * Return the number of tracks in the previously specified media file
  */
-size_t AMediaExtractor_getTrackCount(AMediaExtractor*);
+size_t AMediaExtractor_getTrackCount(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Return the format of the specified track. The caller must free the returned format
  */
-AMediaFormat* AMediaExtractor_getTrackFormat(AMediaExtractor*, size_t idx);
+AMediaFormat* AMediaExtractor_getTrackFormat(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
 
 /**
  * Select the specified track. Subsequent calls to readSampleData, getSampleTrackIndex and
@@ -90,41 +94,42 @@
  * Selecting the same track multiple times has no effect, the track is
  * only selected once.
  */
-media_status_t AMediaExtractor_selectTrack(AMediaExtractor*, size_t idx);
+media_status_t AMediaExtractor_selectTrack(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
 
 /**
  * Unselect the specified track. Subsequent calls to readSampleData, getSampleTrackIndex and
  * getSampleTime only retrieve information for the subset of tracks selected..
  */
-media_status_t AMediaExtractor_unselectTrack(AMediaExtractor*, size_t idx);
+media_status_t AMediaExtractor_unselectTrack(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
 
 /**
  * Read the current sample.
  */
-ssize_t AMediaExtractor_readSampleData(AMediaExtractor*, uint8_t *buffer, size_t capacity);
+ssize_t AMediaExtractor_readSampleData(AMediaExtractor*,
+        uint8_t *buffer, size_t capacity) __INTRODUCED_IN(21);
 
 /**
  * Read the current sample's flags.
  */
-uint32_t AMediaExtractor_getSampleFlags(AMediaExtractor*); // see definitions below
+uint32_t AMediaExtractor_getSampleFlags(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Returns the track index the current sample originates from (or -1
  * if no more samples are available)
  */
-int AMediaExtractor_getSampleTrackIndex(AMediaExtractor*);
+int AMediaExtractor_getSampleTrackIndex(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Returns the current sample's presentation time in microseconds.
  * or -1 if no more samples are available.
  */
-int64_t AMediaExtractor_getSampleTime(AMediaExtractor*);
+int64_t AMediaExtractor_getSampleTime(AMediaExtractor*) __INTRODUCED_IN(21);
 
 /**
  * Advance to the next sample. Returns false if no more sample data
  * is available (end of stream).
  */
-bool AMediaExtractor_advance(AMediaExtractor*);
+bool AMediaExtractor_advance(AMediaExtractor*) __INTRODUCED_IN(21);
 
 typedef enum {
     AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC,
@@ -135,7 +140,8 @@
 /**
  *
  */
-media_status_t AMediaExtractor_seekTo(AMediaExtractor*, int64_t seekPosUs, SeekMode mode);
+media_status_t AMediaExtractor_seekTo(AMediaExtractor*,
+        int64_t seekPosUs, SeekMode mode) __INTRODUCED_IN(21);
 
 /**
  * mapping of crypto scheme uuid to the scheme specific data for that scheme
@@ -157,18 +163,16 @@
 /**
  * Get the PSSH info if present.
  */
-PsshInfo* AMediaExtractor_getPsshInfo(AMediaExtractor*);
+PsshInfo* AMediaExtractor_getPsshInfo(AMediaExtractor*) __INTRODUCED_IN(21);
 
 
-AMediaCodecCryptoInfo *AMediaExtractor_getSampleCryptoInfo(AMediaExtractor *);
+AMediaCodecCryptoInfo *AMediaExtractor_getSampleCryptoInfo(AMediaExtractor *) __INTRODUCED_IN(21);
 
 enum {
     AMEDIAEXTRACTOR_SAMPLE_FLAG_SYNC = 1,
     AMEDIAEXTRACTOR_SAMPLE_FLAG_ENCRYPTED = 2,
 };
 
-#if __ANDROID_API__ >= 28
-
 /**
  * Returns the format of the extractor. The caller must free the returned format
  * using AMediaFormat_delete(format).
@@ -176,7 +180,7 @@
  * This function will always return a format; however, the format could be empty
  * (no key-value pairs) if the media container does not provide format information.
  */
-AMediaFormat* AMediaExtractor_getFileFormat(AMediaExtractor*);
+AMediaFormat* AMediaExtractor_getFileFormat(AMediaExtractor*) __INTRODUCED_IN(28);
 
 /**
  * Returns the size of the current sample in bytes, or -1 when no samples are
@@ -188,7 +192,7 @@
  * AMediaExtractor_readSampleData(ex, buf, sampleSize);
  *
  */
-ssize_t AMediaExtractor_getSampleSize(AMediaExtractor*);
+ssize_t AMediaExtractor_getSampleSize(AMediaExtractor*) __INTRODUCED_IN(28);
 
 /**
  * Returns the duration of cached media samples downloaded from a network data source
@@ -201,7 +205,7 @@
  * cached duration cannot be calculated (bitrate, duration, and file size information
  * not available).
  */
-int64_t AMediaExtractor_getCachedDuration(AMediaExtractor *);
+int64_t AMediaExtractor_getCachedDuration(AMediaExtractor *) __INTRODUCED_IN(28);
 
 /**
  * Read the current sample's metadata format into |fmt|. Examples of sample metadata are
@@ -212,18 +216,11 @@
  * Existing key-value pairs in |fmt| would be removed if this API returns AMEDIA_OK.
  * The contents of |fmt| is undefined if this API returns AMEDIA_ERROR_*.
  */
-media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex, AMediaFormat *fmt);
-
-#endif /* __ANDROID_API__ >= 28 */
-
-#if __ANDROID_API__ >= 29
-
-media_status_t AMediaExtractor_disconnect(AMediaExtractor *ex);
-
-#endif /* __ANDROID_API__ >= 29 */
-
-#endif /* __ANDROID_API__ >= 21 */
+media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex,
+        AMediaFormat *fmt) __INTRODUCED_IN(28);
 
 __END_DECLS
 
 #endif // _NDK_MEDIA_EXTRACTOR_H
+
+/** @} */
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 687054e..8f37f7b 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -14,6 +14,15 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Media
+ * @{
+ */
+
+/**
+ * @file NdkMediaFormat.h
+ */
+
 /*
  * This file defines an NDK API.
  * Do not remove methods.
@@ -37,130 +46,126 @@
 struct AMediaFormat;
 typedef struct AMediaFormat AMediaFormat;
 
-#if __ANDROID_API__ >= 21
-
-AMediaFormat *AMediaFormat_new();
-media_status_t AMediaFormat_delete(AMediaFormat*);
+AMediaFormat *AMediaFormat_new() __INTRODUCED_IN(21);
+media_status_t AMediaFormat_delete(AMediaFormat*) __INTRODUCED_IN(21);
 
 /**
  * Human readable representation of the format. The returned string is owned by the format,
  * and remains valid until the next call to toString, or until the format is deleted.
  */
-const char* AMediaFormat_toString(AMediaFormat*);
+const char* AMediaFormat_toString(AMediaFormat*) __INTRODUCED_IN(21);
 
-bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out);
-bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out);
-bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out);
-bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out);
+bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out) __INTRODUCED_IN(21);
+bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out) __INTRODUCED_IN(21);
+bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out) __INTRODUCED_IN(21);
+bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out) __INTRODUCED_IN(21);
 /**
  * The returned data is owned by the format and remains valid as long as the named entry
  * is part of the format.
  */
-bool AMediaFormat_getBuffer(AMediaFormat*, const char *name, void** data, size_t *size);
+bool AMediaFormat_getBuffer(AMediaFormat*, const char *name, void** data, size_t *size) __INTRODUCED_IN(21);
 /**
  * The returned string is owned by the format, and remains valid until the next call to getString,
  * or until the format is deleted.
  */
-bool AMediaFormat_getString(AMediaFormat*, const char *name, const char **out);
+bool AMediaFormat_getString(AMediaFormat*, const char *name, const char **out) __INTRODUCED_IN(21);
 
 
-void AMediaFormat_setInt32(AMediaFormat*, const char* name, int32_t value);
-void AMediaFormat_setInt64(AMediaFormat*, const char* name, int64_t value);
-void AMediaFormat_setFloat(AMediaFormat*, const char* name, float value);
+void AMediaFormat_setInt32(AMediaFormat*, const char* name, int32_t value) __INTRODUCED_IN(21);
+void AMediaFormat_setInt64(AMediaFormat*, const char* name, int64_t value) __INTRODUCED_IN(21);
+void AMediaFormat_setFloat(AMediaFormat*, const char* name, float value) __INTRODUCED_IN(21);
 /**
  * The provided string is copied into the format.
  */
-void AMediaFormat_setString(AMediaFormat*, const char* name, const char* value);
+void AMediaFormat_setString(AMediaFormat*, const char* name, const char* value) __INTRODUCED_IN(21);
 /**
  * The provided data is copied into the format.
  */
-void AMediaFormat_setBuffer(AMediaFormat*, const char* name, void* data, size_t size);
+void AMediaFormat_setBuffer(AMediaFormat*, const char* name, void* data, size_t size) __INTRODUCED_IN(21);
 
 
 
 /**
  * XXX should these be ints/enums that we look up in a table as needed?
  */
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
-extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
-extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
-extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
-extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
-extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
-extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
-extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
-extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
-extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
-extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
-extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
-extern const char* AMEDIAFORMAT_KEY_CSD;
-extern const char* AMEDIAFORMAT_KEY_CSD_0;
-extern const char* AMEDIAFORMAT_KEY_CSD_1;
-extern const char* AMEDIAFORMAT_KEY_CSD_2;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_DURATION;
-extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
-extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS;
-extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
-extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
-extern const char* AMEDIAFORMAT_KEY_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
-extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
-extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
-extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
-extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
-extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
-extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
-extern const char* AMEDIAFORMAT_KEY_LATENCY;
-extern const char* AMEDIAFORMAT_KEY_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
-extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_MIME;
-extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA;
-extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
-extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
-extern const char* AMEDIAFORMAT_KEY_PRIORITY;
-extern const char* AMEDIAFORMAT_KEY_PROFILE;
-extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
-extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
-extern const char* AMEDIAFORMAT_KEY_ROTATION;
-extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_SEI;
-extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_STRIDE;
-extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID;
-extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
-extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_TIME_US;
-extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
-extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_BIT_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_0 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_1 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_2 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DURATION __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FRAME_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_HEIGHT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_IS_ADTS __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_LANGUAGE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_LATENCY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MIME __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PRIORITY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PROFILE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_ROTATION __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_SEI __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_STRIDE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TIME_US __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_WIDTH __INTRODUCED_IN(21);
 
-#endif /* __ANDROID_API__ >= 21 */
-
-#if __ANDROID_API__ >= 28
-bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out);
+bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out) __INTRODUCED_IN(28);
 bool AMediaFormat_getRect(AMediaFormat*, const char *name,
-                          int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
+        int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) __INTRODUCED_IN(28);
 
-void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value);
-void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value);
+void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value) __INTRODUCED_IN(28);
+void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value) __INTRODUCED_IN(28);
 void AMediaFormat_setRect(AMediaFormat*, const char* name,
-                          int32_t left, int32_t top, int32_t right, int32_t bottom);
-#endif /* __ANDROID_API__ >= 28 */
+        int32_t left, int32_t top, int32_t right, int32_t bottom) __INTRODUCED_IN(28);
 
 __END_DECLS
 
 #endif // _NDK_MEDIA_FORMAT_H
+
+/** @} */
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 1ecd1ca..75c70ed 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -14,6 +14,14 @@
  * limitations under the License.
  */
 
+/**
+ * @addtogroup Media
+ * @{
+ */
+
+/**
+ * @file NdkMediaMuxer.h
+ */
 
 /*
  * This file defines an NDK API.
@@ -45,17 +53,15 @@
     AMEDIAMUXER_OUTPUT_FORMAT_WEBM   = 1,
 } OutputFormat;
 
-#if __ANDROID_API__ >= 21
-
 /**
  * Create new media muxer
  */
-AMediaMuxer* AMediaMuxer_new(int fd, OutputFormat format);
+AMediaMuxer* AMediaMuxer_new(int fd, OutputFormat format) __INTRODUCED_IN(21);
 
 /**
  * Delete a previously created media muxer
  */
-media_status_t AMediaMuxer_delete(AMediaMuxer*);
+media_status_t AMediaMuxer_delete(AMediaMuxer*) __INTRODUCED_IN(21);
 
 /**
  * Set and store the geodata (latitude and longitude) in the output file.
@@ -68,7 +74,8 @@
  * Latitude must be in the range [-90, 90].
  * Longitude must be in the range [-180, 180].
  */
-media_status_t AMediaMuxer_setLocation(AMediaMuxer*, float latitude, float longitude);
+media_status_t AMediaMuxer_setLocation(AMediaMuxer*,
+        float latitude, float longitude) __INTRODUCED_IN(21);
 
 /**
  * Sets the orientation hint for output video playback.
@@ -82,26 +89,26 @@
  * The angle is specified in degrees, clockwise.
  * The supported angles are 0, 90, 180, and 270 degrees.
  */
-media_status_t AMediaMuxer_setOrientationHint(AMediaMuxer*, int degrees);
+media_status_t AMediaMuxer_setOrientationHint(AMediaMuxer*, int degrees) __INTRODUCED_IN(21);
 
 /**
  * Adds a track with the specified format.
  * Returns the index of the new track or a negative value in case of failure,
  * which can be interpreted as a media_status_t.
  */
-ssize_t AMediaMuxer_addTrack(AMediaMuxer*, const AMediaFormat* format);
+ssize_t AMediaMuxer_addTrack(AMediaMuxer*, const AMediaFormat* format) __INTRODUCED_IN(21);
 
 /**
  * Start the muxer. Should be called after AMediaMuxer_addTrack and
  * before AMediaMuxer_writeSampleData.
  */
-media_status_t AMediaMuxer_start(AMediaMuxer*);
+media_status_t AMediaMuxer_start(AMediaMuxer*) __INTRODUCED_IN(21);
 
 /**
  * Stops the muxer.
  * Once the muxer stops, it can not be restarted.
  */
-media_status_t AMediaMuxer_stop(AMediaMuxer*);
+media_status_t AMediaMuxer_stop(AMediaMuxer*) __INTRODUCED_IN(21);
 
 /**
  * Writes an encoded sample into the muxer.
@@ -111,10 +118,11 @@
  * by the encoder.)
  */
 media_status_t AMediaMuxer_writeSampleData(AMediaMuxer *muxer,
-        size_t trackIdx, const uint8_t *data, const AMediaCodecBufferInfo *info);
-
-#endif /* __ANDROID_API__ >= 21 */
+        size_t trackIdx, const uint8_t *data,
+        const AMediaCodecBufferInfo *info) __INTRODUCED_IN(21);
 
 __END_DECLS
 
 #endif // _NDK_MEDIA_MUXER_H
+
+/** @} */
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index fb56694..d828d6a 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -32,55 +32,66 @@
     AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
-    AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+    AMEDIAFORMAT_KEY_AAC_PROFILE; # var introduced=21
     AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
     AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
     AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
-    AMEDIAFORMAT_KEY_BIT_RATE; # var
+    AMEDIAFORMAT_KEY_BIT_RATE; # var introduced=21
     AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
-    AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
-    AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
-    AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+    AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var introduced=21
+    AMEDIAFORMAT_KEY_CHANNEL_MASK; # var introduced=21
+    AMEDIAFORMAT_KEY_COLOR_FORMAT; # var introduced=21
     AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
     AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_0; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_1; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_2; # var introduced=28
     AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
-    AMEDIAFORMAT_KEY_DURATION; # var
-    AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
-    AMEDIAFORMAT_KEY_FRAME_RATE; # var
+    AMEDIAFORMAT_KEY_DISPLAY_HEIGHT; # var introduced=28
+    AMEDIAFORMAT_KEY_DISPLAY_WIDTH; # var introduced=28
+    AMEDIAFORMAT_KEY_DURATION; # var introduced=21
+    AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var introduced=21
+    AMEDIAFORMAT_KEY_FRAME_RATE; # var introduced=21
     AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
     AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
     AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
-    AMEDIAFORMAT_KEY_HEIGHT; # var
+    AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
     AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
-    AMEDIAFORMAT_KEY_IS_ADTS; # var
-    AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
-    AMEDIAFORMAT_KEY_IS_DEFAULT; # var
-    AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
-    AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
-    AMEDIAFORMAT_KEY_LANGUAGE; # var
+    AMEDIAFORMAT_KEY_IS_ADTS; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_DEFAULT; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var introduced=21
+    AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
+    AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
     AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
     AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
-    AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
-    AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
-    AMEDIAFORMAT_KEY_MAX_WIDTH; # var
-    AMEDIAFORMAT_KEY_MIME; # var
+    AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
+    AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
+    AMEDIAFORMAT_KEY_MAX_WIDTH; # var introduced=21
+    AMEDIAFORMAT_KEY_MIME; # var introduced=21
+    AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
     AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
     AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
     AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
     AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
-    AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
-    AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+    AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
+    AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
     AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
-    AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+    AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
+    AMEDIAFORMAT_KEY_SEI; # var introduced=28
     AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
-    AMEDIAFORMAT_KEY_STRIDE; # var
+    AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
+    AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID; # var introduced=28
     AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
+    AMEDIAFORMAT_KEY_TIME_US; # var introduced=28
+    AMEDIAFORMAT_KEY_TRACK_INDEX; # var introduced=28
     AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
-    AMEDIAFORMAT_KEY_WIDTH; # var
+    AMEDIAFORMAT_KEY_WIDTH; # var introduced=21
     AMediaCodecActionCode_isRecoverable; # introduced=28
     AMediaCodecActionCode_isTransient; # introduced=28
     AMediaCodecCryptoInfo_delete;
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index d6dae5b..de8e46a 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -21,9 +21,11 @@
         "MemoryLeakTrackUtil.cpp",
         "ProcessInfo.cpp",
         "SchedulingPolicyService.cpp",
+        "ServiceUtilities.cpp",
     ],
     shared_libs: [
         "libbinder",
+        "libcutils",
         "liblog",
         "libutils",
         "libmemunreachable",
diff --git a/media/utils/ISchedulingPolicyService.cpp b/media/utils/ISchedulingPolicyService.cpp
index 22fbc97..b210404 100644
--- a/media/utils/ISchedulingPolicyService.cpp
+++ b/media/utils/ISchedulingPolicyService.cpp
@@ -25,6 +25,7 @@
 // Keep in sync with frameworks/base/core/java/android/os/ISchedulingPolicyService.aidl
 enum {
     REQUEST_PRIORITY_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
+    REQUEST_CPUSET_BOOST,
 };
 
 // ----------------------------------------------------------------------
@@ -60,6 +61,23 @@
         }
         return reply.readInt32();
     }
+
+    virtual int requestCpusetBoost(bool enable, const sp<IInterface>& client)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISchedulingPolicyService::getInterfaceDescriptor());
+        data.writeInt32(enable);
+        data.writeStrongBinder(IInterface::asBinder(client));
+        status_t status = remote()->transact(REQUEST_CPUSET_BOOST, data, &reply, 0);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        // fail on exception: force binder reconnection
+        if (reply.readExceptionCode() != 0) {
+            return DEAD_OBJECT;
+        }
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(SchedulingPolicyService, "android.os.ISchedulingPolicyService");
@@ -71,6 +89,7 @@
 {
     switch (code) {
     case REQUEST_PRIORITY_TRANSACTION:
+    case REQUEST_CPUSET_BOOST:
         // Not reached
         return NO_ERROR;
         break;
diff --git a/media/utils/ISchedulingPolicyService.h b/media/utils/ISchedulingPolicyService.h
index 1015677..e4f7c0d 100644
--- a/media/utils/ISchedulingPolicyService.h
+++ b/media/utils/ISchedulingPolicyService.h
@@ -29,6 +29,7 @@
     virtual int         requestPriority(/*pid_t*/int32_t pid, /*pid_t*/int32_t tid,
                                         int32_t prio, bool isForApp, bool asynchronous) = 0;
 
+    virtual int         requestCpusetBoost(bool enable, const sp<IInterface>& client) = 0;
 };
 
 class BnSchedulingPolicyService : public BnInterface<ISchedulingPolicyService>
diff --git a/media/utils/SchedulingPolicyService.cpp b/media/utils/SchedulingPolicyService.cpp
index d7055ef..4e9792f 100644
--- a/media/utils/SchedulingPolicyService.cpp
+++ b/media/utils/SchedulingPolicyService.cpp
@@ -59,4 +59,31 @@
     return ret;
 }
 
+int requestCpusetBoost(bool enable, const sp<IInterface> &client)
+{
+    int ret;
+    sMutex.lock();
+    sp<ISchedulingPolicyService> sps = sSchedulingPolicyService;
+    sMutex.unlock();
+    if (sps == 0) {
+        sp<IBinder> binder = defaultServiceManager()->checkService(_scheduling_policy);
+        if (binder == 0) {
+            return DEAD_OBJECT;
+        }
+        sps = interface_cast<ISchedulingPolicyService>(binder);
+        sMutex.lock();
+        sSchedulingPolicyService = sps;
+        sMutex.unlock();
+    }
+    ret = sps->requestCpusetBoost(enable, client);
+    if (ret != DEAD_OBJECT) {
+        return ret;
+    }
+    ALOGW("SchedulingPolicyService died");
+    sMutex.lock();
+    sSchedulingPolicyService.clear();
+    sMutex.unlock();
+    return ret;
+}
+
 }   // namespace android
diff --git a/services/audioflinger/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
similarity index 84%
rename from services/audioflinger/ServiceUtilities.cpp
rename to media/utils/ServiceUtilities.cpp
index aa267ea..0d50be0 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -14,12 +14,13 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "ServiceUtilities"
+
 #include <binder/AppOpsManager.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/PermissionCache.h>
-#include <private/android_filesystem_config.h>
-#include "ServiceUtilities.h"
+#include "mediautils/ServiceUtilities.h"
 
 /* When performing permission checks we do not use permission cache for
  * runtime permissions (protection level dangerous) as they may change at
@@ -32,24 +33,6 @@
 
 static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
 
-// Not valid until initialized by AudioFlinger constructor.  It would have to be
-// re-initialized if the process containing AudioFlinger service forks (which it doesn't).
-// This is often used to validate binder interface calls within audioserver
-// (e.g. AudioPolicyManager to AudioFlinger).
-pid_t getpid_cached;
-
-// A trusted calling UID may specify the client UID as part of a binder interface call.
-// otherwise the calling UID must be equal to the client UID.
-bool isTrustedCallingUid(uid_t uid) {
-    switch (uid) {
-    case AID_MEDIA:
-    case AID_AUDIOSERVER:
-        return true;
-    default:
-        return false;
-    }
-}
-
 static String16 resolveCallingPackage(PermissionController& permissionController,
         const String16& opPackageName, uid_t uid) {
     if (opPackageName.size() > 0) {
@@ -71,16 +54,11 @@
     return packages[0];
 }
 
-static inline bool isAudioServerOrRoot(uid_t uid) {
-    // AID_ROOT is OK for command-line tests.  Native unforked audioserver always OK.
-    return uid == AID_ROOT || uid == AID_AUDIOSERVER ;
-}
-
 static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
         uid_t uid, bool start) {
     // Okay to not track in app ops as audio server is us and if
     // device is rooted security model is considered compromised.
-    if (isAudioServerOrRoot(uid)) return true;
+    if (isAudioServerOrRootUid(uid)) return true;
 
     // We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
     // may open a record track on behalf of a client.  Note that pid may be a tid.
@@ -127,7 +105,7 @@
 void finishRecording(const String16& opPackageName, uid_t uid) {
     // Okay to not track in app ops as audio server is us and if
     // device is rooted security model is considered compromised.
-    if (isAudioServerOrRoot(uid)) return;
+    if (isAudioServerOrRootUid(uid)) return;
 
     PermissionController permissionController;
     String16 resolvedOpPackageName = resolveCallingPackage(
@@ -142,7 +120,7 @@
 }
 
 bool captureAudioOutputAllowed(pid_t pid, uid_t uid) {
-    if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+    if (isAudioServerOrRootUid(uid)) return true;
     static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
     bool ok = PermissionCache::checkPermission(sCaptureAudioOutput, pid, uid);
     if (!ok) ALOGE("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
@@ -163,7 +141,8 @@
 }
 
 bool settingsAllowed() {
-    if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+    // given this is a permission check, could this be isAudioServerOrRootUid()?
+    if (isAudioServerUid(IPCThreadState::self()->getCallingUid())) return true;
     static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
     // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
     bool ok = PermissionCache::checkCallingPermission(sAudioSettings);
@@ -180,7 +159,6 @@
 }
 
 bool dumpAllowed() {
-    // don't optimize for same pid, since mediaserver never dumps itself
     static const String16 sDump("android.permission.DUMP");
     // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
     bool ok = PermissionCache::checkCallingPermission(sDump);
@@ -196,4 +174,29 @@
     return ok;
 }
 
+status_t checkIMemory(const sp<IMemory>& iMemory)
+{
+    if (iMemory == 0) {
+        ALOGE("%s check failed: NULL IMemory pointer", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    sp<IMemoryHeap> heap = iMemory->getMemory();
+    if (heap == 0) {
+        ALOGE("%s check failed: NULL heap pointer", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    off_t size = lseek(heap->getHeapID(), 0, SEEK_END);
+    lseek(heap->getHeapID(), 0, SEEK_SET);
+
+    if (iMemory->pointer() == NULL || size < (off_t)iMemory->size()) {
+        ALOGE("%s check failed: pointer %p size %zu fd size %u",
+              __FUNCTION__, iMemory->pointer(), iMemory->size(), (uint32_t)size);
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
 } // namespace android
diff --git a/media/utils/include/mediautils/SchedulingPolicyService.h b/media/utils/include/mediautils/SchedulingPolicyService.h
index 47d8734..a33539f 100644
--- a/media/utils/include/mediautils/SchedulingPolicyService.h
+++ b/media/utils/include/mediautils/SchedulingPolicyService.h
@@ -17,8 +17,11 @@
 #ifndef _ANDROID_SCHEDULING_POLICY_SERVICE_H
 #define _ANDROID_SCHEDULING_POLICY_SERVICE_H
 
+#include <utils/RefBase.h>
+
 namespace android {
 
+class IInterface;
 // Request elevated priority for thread tid, whose thread group leader must be pid.
 // The priority parameter is currently restricted to either 1 or 2.
 // The asynchronous parameter should be 'true' to return immediately,
@@ -26,6 +29,14 @@
 // The default value 'false' means to return after request has been enqueued and executed.
 int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool isForApp, bool asynchronous = false);
 
+// Request to move media.codec process between SP_FOREGROUND and SP_TOP_APP.
+// When 'enable' is 'true', server will attempt to move media.codec process
+// from SP_FOREGROUND into SP_TOP_APP cpuset. A valid 'client' must be provided
+// for the server to receive death notifications. When 'enable' is 'false', server
+// will attempt to move media.codec process back to the original cpuset, and
+// 'client' is ignored in this case.
+int requestCpusetBoost(bool enable, const sp<IInterface> &client);
+
 }   // namespace android
 
 #endif  // _ANDROID_SCHEDULING_POLICY_SERVICE_H
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
new file mode 100644
index 0000000..0911744
--- /dev/null
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include <binder/IMemory.h>
+#include <binder/PermissionController.h>
+#include <cutils/multiuser.h>
+#include <private/android_filesystem_config.h>
+
+namespace android {
+
+// Audio permission utilities
+
+// Used for calls that should originate from system services.
+// We allow that some services might have separate processes to
+// handle multiple users, e.g. u10_system, u10_bluetooth, u10_radio.
+static inline bool isServiceUid(uid_t uid) {
+    return multiuser_get_app_id(uid) < AID_APP_START;
+}
+
+// Used for calls that should originate from audioserver.
+static inline bool isAudioServerUid(uid_t uid) {
+    return uid == AID_AUDIOSERVER;
+}
+
+// Used for some permission checks.
+// AID_ROOT is OK for command-line tests.  Native audioserver always OK.
+static inline bool isAudioServerOrRootUid(uid_t uid) {
+    return uid == AID_AUDIOSERVER || uid == AID_ROOT;
+}
+
+// Used for calls that should come from system server or internal.
+// Note: system server is multiprocess for multiple users.  audioserver is not.
+static inline bool isAudioServerOrSystemServerUid(uid_t uid) {
+    return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
+}
+
+// Mediaserver may forward the client PID and UID as part of a binder interface call;
+// otherwise the calling UID must be equal to the client UID.
+static inline bool isAudioServerOrMediaServerUid(uid_t uid) {
+    switch (uid) {
+    case AID_MEDIA:
+    case AID_AUDIOSERVER:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
+void finishRecording(const String16& opPackageName, uid_t uid);
+bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
+bool captureHotwordAllowed(pid_t pid, uid_t uid);
+bool settingsAllowed();
+bool modifyAudioRoutingAllowed();
+bool dumpAllowed();
+bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
+status_t checkIMemory(const sp<IMemory>& iMemory);
+}
diff --git a/packages/MediaComponents/Android.mk b/packages/MediaComponents/Android.mk
index b0d8e7d..55a5424 100644
--- a/packages/MediaComponents/Android.mk
+++ b/packages/MediaComponents/Android.mk
@@ -14,59 +14,59 @@
 # limitations under the License.
 #
 
-LOCAL_PATH := $(call my-dir)
-
-ifneq ($(TARGET_BUILD_PDK),true)
-# Build MediaComponents only if this is not a PDK build.  MediaComponents won't
-# build in PDK builds because frameworks/base/core/java is not available but
-# IMediaSession2.aidl and IMediaController2.aidl are using classes from
-# frameworks/base/core/java.
-
-include $(CLEAR_VARS)
-
-LOCAL_PACKAGE_NAME := MediaComponents
-LOCAL_MODULE_OWNER := google
-
-# TODO: create a separate key for this package.
-LOCAL_CERTIFICATE := platform
-
-# TODO: Use System SDK once public APIs are approved
-# LOCAL_SDK_VERSION := system_current
-LOCAL_PRIVATE_PLATFORM_APIS := true
-
-LOCAL_SRC_FILES := \
-    $(call all-java-files-under, src) \
-    $(call all-Iaidl-files-under, src)
-
-LOCAL_PROGUARD_FLAG_FILES := proguard.cfg
-
-LOCAL_MULTILIB := first
-
-LOCAL_JAVA_LIBRARIES += android-support-annotations
-
-# To embed native libraries in package, uncomment the lines below.
-#LOCAL_MODULE_TAGS := samples
-#LOCAL_JNI_SHARED_LIBRARIES := \
-#    libaacextractor \
-#    libamrextractor \
-#    libflacextractor \
-#    libmidiextractor \
-#    libmkvextractor \
-#    libmp3extractor \
-#    libmp4extractor \
-#    libmpeg2extractor \
-#    liboggextractor \
-#    libwavextractor \
-
-# TODO: Remove dependency with other support libraries.
-LOCAL_STATIC_ANDROID_LIBRARIES += \
-    android-support-v4 \
-    android-support-v7-appcompat \
-    android-support-v7-palette
-LOCAL_USE_AAPT2 := true
-
-include $(BUILD_PACKAGE)
-
-endif  # ifneq ($(TARGET_BUILD_PDK),true)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
+# This package is excluded from build for now since APIs using this apk became hidden.
+#
+#LOCAL_PATH := $(call my-dir)
+#ifneq ($(TARGET_BUILD_PDK),true) # Build MediaComponents only if this is not a PDK build.  MediaComponents won't
+## build in PDK builds because frameworks/base/core/java is not available but
+## IMediaSession2.aidl and IMediaController2.aidl are using classes from
+## frameworks/base/core/java.
+#
+#include $(CLEAR_VARS)
+#
+#LOCAL_PACKAGE_NAME := MediaComponents
+#LOCAL_MODULE_OWNER := google
+#
+## TODO: create a separate key for this package.
+#LOCAL_CERTIFICATE := platform
+#
+## TODO: Use System SDK once public APIs are approved
+## LOCAL_SDK_VERSION := system_current
+#LOCAL_PRIVATE_PLATFORM_APIS := true
+#
+#LOCAL_SRC_FILES := \
+#    $(call all-java-files-under, src) \
+#    $(call all-Iaidl-files-under, src)
+#
+#LOCAL_PROGUARD_FLAG_FILES := proguard.cfg
+#
+#LOCAL_MULTILIB := first
+#
+#LOCAL_JAVA_LIBRARIES += androidx.annotation_annotation
+#
+## To embed native libraries in package, uncomment the lines below.
+##LOCAL_MODULE_TAGS := samples
+##LOCAL_JNI_SHARED_LIBRARIES := \
+##    libaacextractor \
+##    libamrextractor \
+##    libflacextractor \
+##    libmidiextractor \
+##    libmkvextractor \
+##    libmp3extractor \
+##    libmp4extractor \
+##    libmpeg2extractor \
+##    liboggextractor \
+##    libwavextractor \
+#
+## TODO: Remove dependency with other support libraries.
+#LOCAL_STATIC_ANDROID_LIBRARIES += \
+#    androidx.legacy_legacy-support-v4 \
+#    androidx.appcompat_appcompat \
+#    androidx.palette_palette
+#LOCAL_USE_AAPT2 := true
+#
+#include $(BUILD_PACKAGE)
+#
+#endif  # ifneq ($(TARGET_BUILD_PDK),true)
+#
+#include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/packages/MediaComponents/AndroidManifest.xml b/packages/MediaComponents/AndroidManifest.xml
index 061ae44..50fdca1 100644
--- a/packages/MediaComponents/AndroidManifest.xml
+++ b/packages/MediaComponents/AndroidManifest.xml
@@ -8,6 +8,7 @@
         android:label="Media Components Update"
         android:multiArch="true"
         android:allowBackup="false"
+        android:hasCode="false"
         android:extractNativeLibs="false">
     </application>
 
diff --git a/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
index b304471..f6f7be5 100644
--- a/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
+++ b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
@@ -169,7 +169,7 @@
             android:layout_height="wrap_content"
             android:fillViewport="true"
             android:scrollIndicators="top|bottom">
-            <android.support.v7.widget.ButtonBarLayout
+            <androidx.appcompat.widget.ButtonBarLayout
                 android:layout_width="match_parent"
                 android:layout_height="wrap_content"
                 android:gravity="bottom"
@@ -184,7 +184,7 @@
                     style="?android:attr/buttonBarNeutralButtonStyle"
                     android:layout_width="wrap_content"
                     android:layout_height="wrap_content"/>
-                <android.support.v4.widget.Space
+                <androidx.legacy.widget.Space
                     android:id="@+id/spacer"
                     android:layout_width="0dp"
                     android:layout_height="0dp"
@@ -200,7 +200,7 @@
                     style="?android:attr/buttonBarPositiveButtonStyle"
                     android:layout_width="wrap_content"
                     android:layout_height="wrap_content"/>
-            </android.support.v7.widget.ButtonBarLayout>
+            </androidx.appcompat.widget.ButtonBarLayout>
         </ScrollView>
     </LinearLayout>
 </FrameLayout>
diff --git a/packages/MediaComponents/res/layout/mr_controller_volume_item.xml b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
index a89058b..12d85ae 100644
--- a/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
+++ b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
@@ -40,7 +40,7 @@
                        android:layout_marginBottom="8dp"
                        android:scaleType="fitCenter"
                        android:src="?attr/mediaRouteAudioTrackDrawable" />
-            <android.support.v7.app.MediaRouteVolumeSlider
+            <androidx.mediarouter.app.MediaRouteVolumeSlider
                 android:id="@+id/mr_volume_slider"
                 android:layout_width="fill_parent"
                 android:layout_height="40dp"
diff --git a/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
index c909099..0327beb 100644
--- a/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
@@ -19,7 +19,6 @@
 import android.content.Context;
 import android.media.MediaBrowser2;
 import android.media.MediaBrowser2.BrowserCallback;
-import android.media.MediaController2;
 import android.media.MediaItem2;
 import android.media.SessionToken2;
 import android.media.update.MediaBrowser2Provider;
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Impl.java b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
index 249365a..2883087 100644
--- a/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
@@ -16,7 +16,6 @@
 
 package com.android.media;
 
-import static android.media.SessionCommand2.COMMAND_CODE_SET_VOLUME;
 import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_ADD_ITEM;
 import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REMOVE_ITEM;
 import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REPLACE_ITEM;
@@ -30,6 +29,7 @@
 import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_MEDIA_ID;
 import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_SEARCH;
 import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_URI;
+import static android.media.SessionCommand2.COMMAND_CODE_SET_VOLUME;
 
 import android.app.PendingIntent;
 import android.content.ComponentName;
@@ -44,11 +44,11 @@
 import android.media.MediaMetadata2;
 import android.media.MediaPlaylistAgent.RepeatMode;
 import android.media.MediaPlaylistAgent.ShuffleMode;
-import android.media.SessionCommand2;
 import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
 import android.media.MediaSessionService2;
 import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
 import android.media.SessionToken2;
 import android.media.update.MediaController2Provider;
 import android.net.Uri;
@@ -58,10 +58,11 @@
 import android.os.RemoteException;
 import android.os.ResultReceiver;
 import android.os.UserHandle;
-import android.support.annotation.GuardedBy;
 import android.text.TextUtils;
 import android.util.Log;
 
+import androidx.annotation.GuardedBy;
+
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Executor;
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Stub.java b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
index 2cfc5df..ece4a00 100644
--- a/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
@@ -21,8 +21,8 @@
 import android.media.MediaController2;
 import android.media.MediaItem2;
 import android.media.MediaMetadata2;
-import android.media.SessionCommand2;
 import android.media.MediaSession2.CommandButton;
+import android.media.SessionCommand2;
 import android.media.SessionCommandGroup2;
 import android.os.Bundle;
 import android.os.ResultReceiver;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
index 4ec6042..72ecf54 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
@@ -44,13 +44,13 @@
 import android.media.MediaPlaylistAgent.PlaylistEventCallback;
 import android.media.MediaSession2;
 import android.media.MediaSession2.Builder;
-import android.media.SessionCommand2;
 import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
 import android.media.MediaSession2.ControllerInfo;
 import android.media.MediaSession2.OnDataSourceMissingHelper;
 import android.media.MediaSession2.SessionCallback;
 import android.media.MediaSessionService2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
 import android.media.SessionToken2;
 import android.media.VolumeProvider2;
 import android.media.session.MediaSessionManager;
@@ -60,10 +60,11 @@
 import android.os.Parcelable;
 import android.os.Process;
 import android.os.ResultReceiver;
-import android.support.annotation.GuardedBy;
 import android.text.TextUtils;
 import android.util.Log;
 
+import androidx.annotation.GuardedBy;
+
 import java.lang.ref.WeakReference;
 import java.lang.reflect.Field;
 import java.util.ArrayList;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
index ec657d7..11ccd9f 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
@@ -22,11 +22,11 @@
 import android.media.MediaItem2;
 import android.media.MediaLibraryService2.LibraryRoot;
 import android.media.MediaMetadata2;
-import android.media.SessionCommand2;
 import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
 import android.media.MediaSession2.ControllerInfo;
 import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
 import android.media.VolumeProvider2;
 import android.net.Uri;
 import android.os.Binder;
@@ -35,13 +35,14 @@
 import android.os.IBinder;
 import android.os.RemoteException;
 import android.os.ResultReceiver;
-import android.support.annotation.GuardedBy;
-import android.support.annotation.NonNull;
 import android.text.TextUtils;
 import android.util.ArrayMap;
 import android.util.Log;
 import android.util.SparseArray;
 
+import androidx.annotation.GuardedBy;
+import androidx.annotation.NonNull;
+
 import com.android.media.MediaLibraryService2Impl.MediaLibrarySessionImpl;
 import com.android.media.MediaSession2Impl.CommandButtonImpl;
 import com.android.media.MediaSession2Impl.CommandGroupImpl;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
index c33eb65..d975839 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
@@ -20,7 +20,6 @@
 
 import android.app.Notification;
 import android.app.NotificationManager;
-import android.content.Context;
 import android.content.Intent;
 import android.media.MediaPlayerBase;
 import android.media.MediaPlayerBase.PlayerEventCallback;
@@ -31,9 +30,10 @@
 import android.media.SessionToken2.TokenType;
 import android.media.update.MediaSessionService2Provider;
 import android.os.IBinder;
-import android.support.annotation.GuardedBy;
 import android.util.Log;
 
+import androidx.annotation.GuardedBy;
+
 // TODO(jaewan): Need a test for session service itself.
 public class MediaSessionService2Impl implements MediaSessionService2Provider {
 
diff --git a/packages/MediaComponents/src/com/android/media/Rating2Impl.java b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
index d558129..e2b9f0a 100644
--- a/packages/MediaComponents/src/com/android/media/Rating2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
@@ -18,7 +18,6 @@
 
 import static android.media.Rating2.*;
 
-import android.content.Context;
 import android.media.Rating2;
 import android.media.Rating2.Style;
 import android.media.update.Rating2Provider;
diff --git a/packages/MediaComponents/src/com/android/media/RoutePlayer.java b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
index 9450d34..ebff0e2 100644
--- a/packages/MediaComponents/src/com/android/media/RoutePlayer.java
+++ b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
@@ -23,7 +23,8 @@
 import android.net.Uri;
 import android.os.Build;
 import android.os.Bundle;
-import android.support.annotation.RequiresApi;
+
+import androidx.annotation.RequiresApi;
 
 import com.android.support.mediarouter.media.MediaItemStatus;
 import com.android.support.mediarouter.media.MediaRouter;
@@ -33,8 +34,6 @@
 import com.android.support.mediarouter.media.RemotePlaybackClient.SessionActionCallback;
 import com.android.support.mediarouter.media.RemotePlaybackClient.StatusCallback;
 
-import java.util.Map;
-
 @RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
 public class RoutePlayer extends MediaSession.Callback {
     public static final long PLAYBACK_ACTIONS = PlaybackState.ACTION_PAUSE
diff --git a/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
index a5cf8c4..f792712 100644
--- a/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
@@ -16,9 +16,9 @@
 
 package com.android.media;
 
+import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
 import static android.media.SessionToken2.TYPE_SESSION;
 import static android.media.SessionToken2.TYPE_SESSION_SERVICE;
-import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
 
 import android.content.Context;
 import android.content.Intent;
diff --git a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
index a4d55d7..97d3927 100644
--- a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
+++ b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
@@ -16,12 +16,8 @@
 
 package com.android.media.subtitle;
 
-import java.util.Locale;
-import java.util.Vector;
-
 import android.content.Context;
 import android.media.MediaFormat;
-import android.media.MediaPlayer2;
 import android.media.MediaPlayer2.TrackInfo;
 import android.os.Handler;
 import android.os.Looper;
@@ -30,6 +26,9 @@
 
 import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
 
+import java.util.Locale;
+import java.util.Vector;
+
 // Note: This is forked from android.media.SubtitleController since P
 /**
  * The subtitle controller provides the architecture to display subtitles for a
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiFactory.java b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
index d7be549..f75b75e 100644
--- a/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
+++ b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
@@ -31,13 +31,13 @@
 import android.media.MediaMetadata2;
 import android.media.MediaPlaylistAgent;
 import android.media.MediaSession2;
-import android.media.SessionCommand2;
-import android.media.SessionCommandGroup2;
 import android.media.MediaSession2.ControllerInfo;
 import android.media.MediaSession2.SessionCallback;
 import android.media.MediaSessionService2;
 import android.media.MediaSessionService2.MediaNotification;
 import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
 import android.media.SessionToken2;
 import android.media.VolumeProvider2;
 import android.media.update.MediaBrowser2Provider;
@@ -59,11 +59,12 @@
 import android.media.update.VolumeProvider2Provider;
 import android.os.Bundle;
 import android.os.IInterface;
-import android.support.annotation.Nullable;
 import android.util.AttributeSet;
 import android.widget.MediaControlView2;
 import android.widget.VideoView2;
 
+import androidx.annotation.Nullable;
+
 import com.android.media.IMediaController2;
 import com.android.media.MediaBrowser2Impl;
 import com.android.media.MediaController2Impl;
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiHelper.java b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
index ad8bb48..dc5e5e2 100644
--- a/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
+++ b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
@@ -18,21 +18,21 @@
 
 import android.annotation.Nullable;
 import android.content.Context;
-import android.content.ContextWrapper;
 import android.content.pm.ApplicationInfo;
 import android.content.pm.PackageManager.NameNotFoundException;
 import android.content.res.Resources;
 import android.content.res.Resources.Theme;
 import android.content.res.XmlResourceParser;
-import android.support.annotation.GuardedBy;
-import android.support.v4.widget.Space;
-import android.support.v7.widget.ButtonBarLayout;
 import android.util.AttributeSet;
 import android.view.ContextThemeWrapper;
 import android.view.LayoutInflater;
 import android.view.View;
 import android.view.ViewGroup;
 
+import androidx.annotation.GuardedBy;
+import androidx.appcompat.widget.ButtonBarLayout;
+import androidx.legacy.widget.Space;
+
 import com.android.support.mediarouter.app.MediaRouteButton;
 import com.android.support.mediarouter.app.MediaRouteExpandCollapseButton;
 import com.android.support.mediarouter.app.MediaRouteVolumeSlider;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
index d3e8d47..98c0d17 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
@@ -19,11 +19,12 @@
 import android.annotation.NonNull;
 import android.annotation.Nullable;
 import android.content.Context;
-import android.support.v4.view.ActionProvider;
 import android.util.Log;
 import android.view.View;
 import android.view.ViewGroup;
 
+import androidx.core.view.ActionProvider;
+
 import com.android.support.mediarouter.media.MediaRouteSelector;
 import com.android.support.mediarouter.media.MediaRouter;
 
@@ -48,7 +49,7 @@
  * <h3>Prerequisites</h3>
  * <p>
  * To use the media route action provider, the activity must be a subclass of
- * {@link AppCompatActivity} from the <code>android.support.v7.appcompat</code>
+ * {@link AppCompatActivity} from the <code>androidx.appcompat.appcompat</code>
  * support library.  Refer to support library documentation for details.
  * </p>
  *
@@ -65,7 +66,7 @@
  *     &lt;item android:id="@+id/media_route_menu_item"
  *         android:title="@string/media_route_menu_title"
  *         app:showAsAction="always"
- *         app:actionProviderClass="android.support.v7.app.MediaRouteActionProvider"/>
+ *         app:actionProviderClass="androidx.mediarouter.app.MediaRouteActionProvider"/>
  * &lt;/menu>
  * </pre><p>
  * Then configure the menu and set the route selector for the chooser.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
index fde8a63..e82fcb9 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
@@ -28,14 +28,15 @@
 import android.graphics.drawable.AnimationDrawable;
 import android.graphics.drawable.Drawable;
 import android.os.AsyncTask;
-import android.support.v4.graphics.drawable.DrawableCompat;
-import android.support.v7.widget.TooltipCompat;
 import android.util.AttributeSet;
 import android.util.Log;
 import android.util.SparseArray;
 import android.view.SoundEffectConstants;
 import android.view.View;
 
+import androidx.appcompat.widget.TooltipCompat;
+import androidx.core.graphics.drawable.DrawableCompat;
+
 import com.android.media.update.ApiHelper;
 import com.android.media.update.R;
 import com.android.support.mediarouter.media.MediaRouteSelector;
@@ -70,7 +71,7 @@
  * <h3>Prerequisites</h3>
  * <p>
  * To use the media route button, the activity must be a subclass of
- * {@link FragmentActivity} from the <code>android.support.v4</code>
+ * {@link FragmentActivity} from the <code>androidx.core./code>
  * support library.  Refer to support library documentation for details.
  * </p>
  *
@@ -81,9 +82,9 @@
     private static final String TAG = "MediaRouteButton";
 
     private static final String CHOOSER_FRAGMENT_TAG =
-            "android.support.v7.mediarouter:MediaRouteChooserDialogFragment";
+            "androidx.mediarouter.media.outer:MediaRouteChooserDialogFragment";
     private static final String CONTROLLER_FRAGMENT_TAG =
-            "android.support.v7.mediarouter:MediaRouteControllerDialogFragment";
+            "androidx.mediarouter.media.outer:MediaRouteControllerDialogFragment";
 
     private final MediaRouter mRouter;
     private final MediaRouterCallback mCallback;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
index cac64d9..f24028a 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
@@ -16,13 +16,14 @@
 
 package com.android.support.mediarouter.app;
 
-import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTED;
-import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTING;
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo
+        .CONNECTION_STATE_CONNECTED;
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo
+        .CONNECTION_STATE_CONNECTING;
 
 import android.annotation.NonNull;
 import android.app.Dialog;
 import android.content.Context;
-import android.content.res.Resources;
 import android.content.res.TypedArray;
 import android.graphics.drawable.Drawable;
 import android.net.Uri;
@@ -30,12 +31,10 @@
 import android.os.Handler;
 import android.os.Message;
 import android.os.SystemClock;
-import android.support.v7.app.AppCompatDialog;
 import android.text.TextUtils;
 import android.util.Log;
 import android.view.ContextThemeWrapper;
 import android.view.Gravity;
-import android.view.LayoutInflater;
 import android.view.View;
 import android.view.ViewGroup;
 import android.widget.AdapterView;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
index 060cfca..f6c1d2f 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
@@ -40,9 +40,6 @@
 import android.support.v4.media.session.MediaControllerCompat;
 import android.support.v4.media.session.MediaSessionCompat;
 import android.support.v4.media.session.PlaybackStateCompat;
-import android.support.v4.util.ObjectsCompat;
-import android.support.v4.view.accessibility.AccessibilityEventCompat;
-import android.support.v7.graphics.Palette;
 import android.text.TextUtils;
 import android.util.Log;
 import android.view.ContextThemeWrapper;
@@ -72,11 +69,15 @@
 import android.widget.SeekBar;
 import android.widget.TextView;
 
+import androidx.core.util.ObjectsCompat;
+import androidx.core.view.accessibility.AccessibilityEventCompat;
+import androidx.palette.graphics.Palette;
+
 import com.android.media.update.ApiHelper;
 import com.android.media.update.R;
+import com.android.support.mediarouter.app.OverlayListView.OverlayObject;
 import com.android.support.mediarouter.media.MediaRouteSelector;
 import com.android.support.mediarouter.media.MediaRouter;
-import com.android.support.mediarouter.app.OverlayListView.OverlayObject;
 
 import java.io.BufferedInputStream;
 import java.io.IOException;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
index a9eaf39..b5ee63e 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
@@ -16,7 +16,7 @@
 
 package com.android.support.mediarouter.app;
 
-import android.support.annotation.NonNull;
+import androidx.annotation.NonNull;
 
 /**
  * The media route dialog factory is responsible for creating the media route
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
index 02ee118..52aecd88 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
@@ -17,10 +17,11 @@
 package com.android.support.mediarouter.app;
 
 import android.os.Bundle;
-import android.support.v4.app.Fragment;
 
-import com.android.support.mediarouter.media.MediaRouter;
+import androidx.fragment.app.Fragment;
+
 import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
 
 /**
  * Media route discovery fragment.
@@ -34,7 +35,7 @@
  * provide the {@link MediaRouter} callback to register.
  * </p><p>
  * Note that the discovery callback makes the application be connected with all the
- * {@link android.support.v7.media.MediaRouteProviderService media route provider services}
+ * {@link androidx.mediarouter.media.MediaRouteProviderService media route provider services}
  * while it is registered.
  * </p>
  */
@@ -114,7 +115,7 @@
     }
 
     /**
-     * Called to create the {@link android.support.v7.media.MediaRouter.Callback callback}
+     * Called to create the {@link androidx.mediarouter.media.MediaRouter.Callback callback}
      * that will be registered.
      * <p>
      * The default callback does nothing.  The application may override this method to
@@ -129,7 +130,7 @@
 
     /**
      * Called to prepare the callback flags that will be used when the
-     * {@link android.support.v7.media.MediaRouter.Callback callback} is registered.
+     * {@link androidx.mediarouter.media.MediaRouter.Callback callback} is registered.
      * <p>
      * The default implementation returns {@link MediaRouter#CALLBACK_FLAG_REQUEST_DISCOVERY}.
      * </p>
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
index 6a0a95a..dcca6a0 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
@@ -21,7 +21,6 @@
 import android.graphics.PorterDuff;
 import android.graphics.PorterDuffColorFilter;
 import android.graphics.drawable.AnimationDrawable;
-import android.support.v4.content.ContextCompat;
 import android.util.AttributeSet;
 import android.view.View;
 import android.widget.ImageButton;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
index 63f042f..b4bf8d1 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
@@ -19,12 +19,13 @@
 import android.content.Context;
 import android.content.res.TypedArray;
 import android.graphics.Color;
-import android.support.annotation.IntDef;
-import android.support.v4.graphics.ColorUtils;
 import android.util.TypedValue;
 import android.view.ContextThemeWrapper;
 import android.view.View;
 
+import androidx.annotation.IntDef;
+import androidx.core.graphics.ColorUtils;
+
 import com.android.media.update.R;
 
 import java.lang.annotation.Retention;
@@ -170,7 +171,7 @@
     private static boolean isLightTheme(Context context) {
         TypedValue value = new TypedValue();
         // TODO(sungsoo): Switch to com.android.internal.R.attr.isLightTheme
-        return context.getTheme().resolveAttribute(android.support.v7.appcompat.R.attr.isLightTheme,
+        return context.getTheme().resolveAttribute(androidx.appcompat.R.attr.isLightTheme,
                 value, true) && value.data != 0;
     }
 
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
index f8539bd..5a0bc95 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
@@ -20,7 +20,6 @@
 import android.hardware.display.DisplayManager;
 import android.os.Build;
 import android.os.Handler;
-import android.support.annotation.RequiresApi;
 import android.util.Log;
 import android.view.Display;
 
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
index 90ea2d5..92f608b 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
@@ -19,7 +19,8 @@
 import android.app.PendingIntent;
 import android.os.Bundle;
 import android.os.SystemClock;
-import android.support.v4.util.TimeUtils;
+
+import androidx.core.util.TimeUtils;
 
 /**
  * Describes the playback status of a media item.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
index 91a2e1a..7ea328c 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
@@ -23,7 +23,8 @@
 import android.content.Intent;
 import android.os.Handler;
 import android.os.Message;
-import android.support.v4.util.ObjectsCompat;
+
+import androidx.core.util.ObjectsCompat;
 
 import com.android.support.mediarouter.media.MediaRouter.ControlRequestCallback;
 
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
index 43cde10..a186fee 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
@@ -29,12 +29,14 @@
         .CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_ROUTE_CONTROL_REQUEST;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .CLIENT_MSG_SELECT_ROUTE;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_SET_DISCOVERY_REQUEST;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_SET_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .CLIENT_MSG_UNREGISTER;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_UNSELECT_ROUTE;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
@@ -51,9 +53,12 @@
         .SERVICE_MSG_GENERIC_FAILURE;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .SERVICE_MSG_GENERIC_SUCCESS;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_CURRENT;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .SERVICE_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .isValidRemoteMessenger;
 
 import android.app.Service;
 import android.content.Intent;
@@ -65,11 +70,12 @@
 import android.os.Message;
 import android.os.Messenger;
 import android.os.RemoteException;
-import android.support.annotation.VisibleForTesting;
-import android.support.v4.util.ObjectsCompat;
 import android.util.Log;
 import android.util.SparseArray;
 
+import androidx.annotation.VisibleForTesting;
+import androidx.core.util.ObjectsCompat;
+
 import java.lang.ref.WeakReference;
 import java.util.ArrayList;
 
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
index 5669b19..f20dcc0 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
@@ -17,8 +17,9 @@
 
 import android.content.IntentFilter;
 import android.os.Bundle;
-import android.support.annotation.NonNull;
-import android.support.annotation.Nullable;
+
+import androidx.annotation.NonNull;
+import androidx.annotation.Nullable;
 
 import java.util.ArrayList;
 import java.util.Arrays;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
index db0052e..4b56b19 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
@@ -33,15 +33,16 @@
 import android.os.Handler;
 import android.os.Looper;
 import android.os.Message;
-import android.support.v4.app.ActivityManagerCompat;
-import android.support.v4.hardware.display.DisplayManagerCompat;
-import android.support.v4.media.VolumeProviderCompat;
 import android.support.v4.media.session.MediaSessionCompat;
-import android.support.v4.util.Pair;
 import android.text.TextUtils;
 import android.util.Log;
 import android.view.Display;
 
+import androidx.core.app.ActivityManagerCompat;
+import androidx.core.hardware.display.DisplayManagerCompat;
+import androidx.core.util.Pair;
+import androidx.media.VolumeProviderCompat;
+
 import com.android.support.mediarouter.media.MediaRouteProvider.ProviderMetadata;
 import com.android.support.mediarouter.media.MediaRouteProvider.RouteController;
 
@@ -81,13 +82,13 @@
     static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
 
     /**
-     * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+     * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
      * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the reason the route
      * was unselected is unknown.
      */
     public static final int UNSELECT_REASON_UNKNOWN = 0;
     /**
-     * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+     * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
      * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
      * the disconnect button to disconnect and keep playing.
      * <p>
@@ -96,13 +97,13 @@
      */
     public static final int UNSELECT_REASON_DISCONNECTED = 1;
     /**
-     * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+     * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
      * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
      * the stop casting button.
      */
     public static final int UNSELECT_REASON_STOPPED = 2;
     /**
-     * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+     * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
      * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user selected
      * a different route.
      */
@@ -174,7 +175,7 @@
      * Applications should typically add a callback using this flag in the
      * {@link android.app.Activity activity's} {@link android.app.Activity#onStart onStart}
      * method and remove it in the {@link android.app.Activity#onStop onStop} method.
-     * The {@link android.support.v7.app.MediaRouteDiscoveryFragment} fragment may
+     * The {@link androidx.mediarouter.app.MediaRouteDiscoveryFragment} fragment may
      * also be used for this purpose.
      * </p><p class="note">
      * On {@link ActivityManager#isLowRamDevice low-RAM devices} this flag
@@ -182,7 +183,7 @@
      * {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
      * </p>
      *
-     * @see android.support.v7.app.MediaRouteDiscoveryFragment
+     * @see androidx.mediarouter.app.MediaRouteDiscoveryFragment
      */
     public static final int CALLBACK_FLAG_REQUEST_DISCOVERY = 1 << 2;
 
@@ -197,7 +198,7 @@
      * {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
      * </p>
      *
-     * @see android.support.v7.app.MediaRouteDiscoveryFragment
+     * @see androidx.mediarouter.app.MediaRouteDiscoveryFragment
      */
     public static final int CALLBACK_FLAG_FORCE_DISCOVERY = 1 << 3;
 
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
index 3206596..0e7514c 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
@@ -19,7 +19,8 @@
 import android.app.PendingIntent;
 import android.os.Bundle;
 import android.os.SystemClock;
-import android.support.v4.util.TimeUtils;
+
+import androidx.core.util.TimeUtils;
 
 /**
  * Describes the playback status of a media session.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
index 98e4e28..eacf1c8 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
@@ -29,17 +29,20 @@
         .CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_ROUTE_CONTROL_REQUEST;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .CLIENT_MSG_SELECT_ROUTE;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_SET_DISCOVERY_REQUEST;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_SET_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .CLIENT_MSG_UNREGISTER;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_UNSELECT_ROUTE;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .CLIENT_MSG_UPDATE_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .CLIENT_VERSION_CURRENT;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_DATA_ERROR;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .SERVICE_MSG_CONTROL_REQUEST_FAILED;
@@ -51,9 +54,11 @@
         .SERVICE_MSG_GENERIC_FAILURE;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
         .SERVICE_MSG_GENERIC_SUCCESS;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .SERVICE_MSG_REGISTERED;
 import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_1;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+        .isValidRemoteMessenger;
 
 import android.annotation.NonNull;
 import android.content.ComponentName;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
index 826449b..65c5518 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
@@ -18,7 +18,6 @@
 import android.content.Context;
 import android.media.AudioManager;
 import android.os.Build;
-import android.support.annotation.RequiresApi;
 
 import java.lang.ref.WeakReference;
 
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
index f6e1497..e76564e 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
@@ -22,9 +22,10 @@
 import android.content.IntentFilter;
 import android.net.Uri;
 import android.os.Bundle;
-import android.support.v4.util.ObjectsCompat;
 import android.util.Log;
 
+import androidx.core.util.ObjectsCompat;
+
 /**
  * A helper class for playing media on remote routes using the remote playback protocol
  * defined by {@link MediaControlIntent}.
@@ -867,11 +868,11 @@
 
     private final class ActionReceiver extends BroadcastReceiver {
         public static final String ACTION_ITEM_STATUS_CHANGED =
-                "android.support.v7.media.actions.ACTION_ITEM_STATUS_CHANGED";
+                "androidx.mediarouter.media.actions.ACTION_ITEM_STATUS_CHANGED";
         public static final String ACTION_SESSION_STATUS_CHANGED =
-                "android.support.v7.media.actions.ACTION_SESSION_STATUS_CHANGED";
+                "androidx.mediarouter.media.actions.ACTION_SESSION_STATUS_CHANGED";
         public static final String ACTION_MESSAGE_RECEIVED =
-                "android.support.v7.media.actions.ACTION_MESSAGE_RECEIVED";
+                "androidx.mediarouter.media.actions.ACTION_MESSAGE_RECEIVED";
 
         ActionReceiver() {
         }
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
index a38491f..53901a4 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
@@ -24,7 +24,6 @@
 import android.content.res.Resources;
 import android.media.AudioManager;
 import android.os.Build;
-import android.support.annotation.RequiresApi;
 import android.view.Display;
 
 import com.android.media.update.ApiHelper;
diff --git a/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
index 3aff150..ad85af4 100644
--- a/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
@@ -20,15 +20,13 @@
 import android.content.res.Resources;
 import android.graphics.Point;
 import android.media.MediaMetadata;
+import android.media.SessionToken2;
 import android.media.session.MediaController;
 import android.media.session.PlaybackState;
-import android.media.SessionToken2;
 import android.media.update.MediaControlView2Provider;
 import android.media.update.ViewGroupProvider;
 import android.os.Bundle;
-import android.support.annotation.Nullable;
 import android.util.AttributeSet;
-import android.util.Log;
 import android.view.Gravity;
 import android.view.MotionEvent;
 import android.view.View;
@@ -36,27 +34,28 @@
 import android.view.WindowManager;
 import android.widget.AdapterView;
 import android.widget.BaseAdapter;
-import android.widget.Button;
 import android.widget.ImageButton;
 import android.widget.ImageView;
 import android.widget.LinearLayout;
 import android.widget.ListView;
 import android.widget.MediaControlView2;
-import android.widget.ProgressBar;
 import android.widget.PopupWindow;
+import android.widget.ProgressBar;
 import android.widget.RelativeLayout;
 import android.widget.SeekBar;
 import android.widget.SeekBar.OnSeekBarChangeListener;
 import android.widget.TextView;
 
+import androidx.annotation.Nullable;
+
 import com.android.media.update.ApiHelper;
 import com.android.media.update.R;
 import com.android.support.mediarouter.app.MediaRouteButton;
-import com.android.support.mediarouter.media.MediaRouter;
 import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
 
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Formatter;
 import java.util.List;
 import java.util.Locale;
diff --git a/packages/MediaComponents/src/com/android/widget/SubtitleView.java b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
index 67b2cd1..db0ae33 100644
--- a/packages/MediaComponents/src/com/android/widget/SubtitleView.java
+++ b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
@@ -19,10 +19,11 @@
 import android.content.Context;
 import android.graphics.Canvas;
 import android.os.Looper;
-import android.support.annotation.Nullable;
 import android.util.AttributeSet;
 import android.widget.FrameLayout;
 
+import androidx.annotation.Nullable;
+
 import com.android.media.subtitle.SubtitleController.Anchor;
 import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
 
diff --git a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
index fc92e85..c9869c0 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
@@ -16,17 +16,18 @@
 
 package com.android.widget;
 
+import static android.widget.VideoView2.VIEW_TYPE_SURFACEVIEW;
+
 import android.content.Context;
 import android.graphics.Rect;
 import android.media.MediaPlayer2;
-import android.support.annotation.NonNull;
 import android.util.AttributeSet;
 import android.util.Log;
 import android.view.SurfaceHolder;
 import android.view.SurfaceView;
 import android.view.View;
 
-import static android.widget.VideoView2.VIEW_TYPE_SURFACEVIEW;
+import androidx.annotation.NonNull;
 
 class VideoSurfaceView extends SurfaceView implements VideoViewInterface, SurfaceHolder.Callback {
     private static final String TAG = "VideoSurfaceView";
diff --git a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
index 024a3aa..40fb046 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
@@ -16,18 +16,19 @@
 
 package com.android.widget;
 
+import static android.widget.VideoView2.VIEW_TYPE_TEXTUREVIEW;
+
 import android.content.Context;
 import android.graphics.SurfaceTexture;
 import android.media.MediaPlayer2;
-import android.support.annotation.NonNull;
-import android.support.annotation.RequiresApi;
 import android.util.AttributeSet;
 import android.util.Log;
 import android.view.Surface;
 import android.view.TextureView;
 import android.view.View;
 
-import static android.widget.VideoView2.VIEW_TYPE_TEXTUREVIEW;
+import androidx.annotation.NonNull;
+import androidx.annotation.RequiresApi;
 
 @RequiresApi(26)
 class VideoTextureView extends TextureView
diff --git a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
index 97279d6..ffb145a 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
@@ -28,30 +28,29 @@
 import android.media.AudioFocusRequest;
 import android.media.AudioManager;
 import android.media.DataSourceDesc;
+import android.media.MediaItem2;
 import android.media.MediaMetadata;
+import android.media.MediaMetadata2;
+import android.media.MediaMetadataRetriever;
 import android.media.MediaPlayer2;
 import android.media.MediaPlayer2.MediaPlayer2EventCallback;
 import android.media.MediaPlayer2.OnSubtitleDataListener;
 import android.media.MediaPlayer2Impl;
-import android.media.SubtitleData;
-import android.media.MediaItem2;
-import android.media.MediaMetadata2;
-import android.media.MediaMetadataRetriever;
 import android.media.Metadata;
 import android.media.PlaybackParams;
+import android.media.SessionToken2;
+import android.media.SubtitleData;
 import android.media.TimedText;
 import android.media.session.MediaController;
 import android.media.session.MediaController.PlaybackInfo;
 import android.media.session.MediaSession;
 import android.media.session.PlaybackState;
-import android.media.SessionToken2;
 import android.media.update.VideoView2Provider;
 import android.media.update.ViewGroupProvider;
 import android.net.Uri;
 import android.os.AsyncTask;
 import android.os.Bundle;
 import android.os.ResultReceiver;
-import android.support.annotation.Nullable;
 import android.util.AttributeSet;
 import android.util.DisplayMetrics;
 import android.util.Log;
@@ -66,6 +65,8 @@
 import android.widget.TextView;
 import android.widget.VideoView2;
 
+import androidx.annotation.Nullable;
+
 import com.android.internal.graphics.palette.Palette;
 import com.android.media.RoutePlayer;
 import com.android.media.subtitle.ClosedCaptionRenderer;
@@ -73,10 +74,10 @@
 import com.android.media.subtitle.SubtitleTrack;
 import com.android.media.update.ApiHelper;
 import com.android.media.update.R;
-import com.android.support.mediarouter.media.MediaItemStatus;
 import com.android.support.mediarouter.media.MediaControlIntent;
-import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.media.MediaItemStatus;
 import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
 
 import java.util.ArrayList;
 import java.util.List;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 7419e64..c0aa477 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -2,24 +2,6 @@
 
 include $(CLEAR_VARS)
 
-LOCAL_SRC_FILES := \
-    ServiceUtilities.cpp
-
-# FIXME Move this library to frameworks/native
-LOCAL_MODULE := libserviceutility
-
-LOCAL_SHARED_LIBRARIES := \
-    libcutils \
-    libutils \
-    liblog \
-    libbinder
-
-LOCAL_CFLAGS := -Wall -Werror
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(CLEAR_VARS)
-
 LOCAL_SRC_FILES:=               \
     AudioFlinger.cpp            \
     Threads.cpp                 \
@@ -31,7 +13,8 @@
     PatchPanel.cpp              \
     StateQueue.cpp              \
     BufLog.cpp                  \
-    TypedLogger.cpp
+    TypedLogger.cpp             \
+    NBAIO_Tee.cpp               \
 
 LOCAL_C_INCLUDES := \
     frameworks/av/services/audiopolicy \
@@ -53,13 +36,13 @@
     libnbaio \
     libnblog \
     libpowermanager \
-    libserviceutility \
     libmediautils \
     libmemunreachable \
     libmedia_helper
 
 LOCAL_STATIC_LIBRARIES := \
     libcpustats \
+    libsndfile \
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
@@ -82,6 +65,7 @@
 LOCAL_CFLAGS += -fvisibility=hidden
 
 LOCAL_CFLAGS += -Werror -Wall
+LOCAL_SANITIZE := integer_overflow
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index b38d37f..53a4ce9 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -46,7 +46,7 @@
 #include <system/audio.h>
 
 #include "AudioFlinger.h"
-#include "ServiceUtilities.h"
+#include "NBAIO_Tee.h"
 
 #include <media/AudioResamplerPublic.h>
 
@@ -64,6 +64,7 @@
 #include <media/nbaio/PipeReader.h>
 #include <media/AudioParameter.h>
 #include <mediautils/BatteryNotifier.h>
+#include <mediautils/ServiceUtilities.h>
 #include <private/android_filesystem_config.h>
 
 //#define BUFLOG_NDEBUG 0
@@ -98,17 +99,6 @@
 
 uint32_t AudioFlinger::mScreenState;
 
-
-#ifdef TEE_SINK
-bool AudioFlinger::mTeeSinkInputEnabled = false;
-bool AudioFlinger::mTeeSinkOutputEnabled = false;
-bool AudioFlinger::mTeeSinkTrackEnabled = false;
-
-size_t AudioFlinger::mTeeSinkInputFrames = kTeeSinkInputFramesDefault;
-size_t AudioFlinger::mTeeSinkOutputFrames = kTeeSinkOutputFramesDefault;
-size_t AudioFlinger::mTeeSinkTrackFrames = kTeeSinkTrackFramesDefault;
-#endif
-
 // In order to avoid invalidating offloaded tracks each time a Visualizer is turned on and off
 // we define a minimum time during which a global effect is considered enabled.
 static const nsecs_t kMinGlobalEffectEnabletimeNs = seconds(7200);
@@ -158,6 +148,7 @@
       mTotalMemory(0),
       mClientSharedHeapSize(kMinimumClientSharedHeapSizeBytes),
       mGlobalEffectEnableTime(0),
+      mPatchPanel(this),
       mSystemReady(false)
 {
     // unsigned instead of audio_unique_id_use_t, because ++ operator is unavailable for enum
@@ -166,7 +157,6 @@
         mNextUniqueIds[use] = AUDIO_UNIQUE_ID_USE_MAX;
     }
 
-    getpid_cached = getpid();
     const bool doLog = property_get_bool("ro.test_harness", false);
     if (doLog) {
         mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
@@ -183,27 +173,6 @@
     mEffectsFactoryHal = EffectsFactoryHalInterface::create();
 
     mMediaLogNotifier->run("MediaLogNotifier");
-
-#ifdef TEE_SINK
-    char value[PROPERTY_VALUE_MAX];
-    (void) property_get("ro.debuggable", value, "0");
-    int debuggable = atoi(value);
-    int teeEnabled = 0;
-    if (debuggable) {
-        (void) property_get("af.tee", value, "0");
-        teeEnabled = atoi(value);
-    }
-    // FIXME symbolic constants here
-    if (teeEnabled & 1) {
-        mTeeSinkInputEnabled = true;
-    }
-    if (teeEnabled & 2) {
-        mTeeSinkOutputEnabled = true;
-    }
-    if (teeEnabled & 4) {
-        mTeeSinkTrackEnabled = true;
-    }
-#endif
 }
 
 void AudioFlinger::onFirstRef()
@@ -224,8 +193,6 @@
         }
     }
 
-    mPatchPanel = new PatchPanel(this);
-
     mMode = AUDIO_MODE_NORMAL;
 
     gAudioFlinger = this;
@@ -532,12 +499,7 @@
             dev->dump(fd);
         }
 
-#ifdef TEE_SINK
-        // dump the serially shared record tee sink
-        if (mRecordTeeSource != 0) {
-            dumpTee(fd, mRecordTeeSource, AUDIO_IO_HANDLE_NONE, 'C');
-        }
-#endif
+        mPatchPanel.dump(fd);
 
         BUFLOG_RESET;
 
@@ -545,6 +507,10 @@
             mLock.unlock();
         }
 
+#ifdef TEE_SINK
+        // NBAIO_Tee dump is safe to call outside of AF lock.
+        NBAIO_Tee::dumpAll(fd, "_DUMP");
+#endif
         // append a copy of media.log here by forwarding fd to it, but don't attempt
         // to lookup the service if it's not running, as it will block for a second
         if (sMediaLogServiceAsBinder != 0) {
@@ -664,7 +630,7 @@
     bool updatePid = (input.clientInfo.clientPid == -1);
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
     uid_t clientUid = input.clientInfo.clientUid;
-    if (!isTrustedCallingUid(callingUid)) {
+    if (!isAudioServerOrMediaServerUid(callingUid)) {
         ALOGW_IF(clientUid != callingUid,
                 "%s uid %d tried to pass itself off as %d",
                 __FUNCTION__, callingUid, clientUid);
@@ -1000,14 +966,12 @@
 {
     ALOGV("AudioFlinger::setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
 
-    // TODO: Notify MmapThreads
-
     AutoMutex lock(mLock);
     for (size_t i = 0; i < mRecordThreads.size(); i++) {
-        sp<RecordThread> thread = mRecordThreads.valueAt(i);
-        if (thread != 0) {
-            thread->setRecordSilenced(uid, silenced);
-        }
+        mRecordThreads[i]->setRecordSilenced(uid, silenced);
+    }
+    for (size_t i = 0; i < mMmapThreads.size(); i++) {
+        mMmapThreads[i]->setRecordSilenced(uid, silenced);
     }
 }
 
@@ -1078,9 +1042,9 @@
         ALOGW("checkStreamType() invalid stream %d", stream);
         return BAD_VALUE;
     }
-    pid_t caller = IPCThreadState::self()->getCallingPid();
-    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && caller != getpid_cached) {
-        ALOGW("checkStreamType() pid %d cannot use internal stream type %d", caller, stream);
+    const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+    if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && !isAudioServerUid(callerUid)) {
+        ALOGW("checkStreamType() uid %d cannot use internal stream type %d", callerUid, stream);
         return PERMISSION_DENIED;
     }
 
@@ -1180,16 +1144,58 @@
     }
 }
 
+// Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
+// Some keys are used for audio routing and audio path configuration and should be reserved for use
+// by audio policy and audio flinger for functional, privacy and security reasons.
+void AudioFlinger::filterReservedParameters(String8& keyValuePairs, uid_t callingUid)
+{
+    static const String8 kReservedParameters[] = {
+        String8(AudioParameter::keyRouting),
+        String8(AudioParameter::keySamplingRate),
+        String8(AudioParameter::keyFormat),
+        String8(AudioParameter::keyChannels),
+        String8(AudioParameter::keyFrameCount),
+        String8(AudioParameter::keyInputSource),
+        String8(AudioParameter::keyMonoOutput),
+        String8(AudioParameter::keyStreamConnect),
+        String8(AudioParameter::keyStreamDisconnect),
+        String8(AudioParameter::keyStreamSupportedFormats),
+        String8(AudioParameter::keyStreamSupportedChannels),
+        String8(AudioParameter::keyStreamSupportedSamplingRates),
+    };
+
+    if (isAudioServerUid(callingUid)) {
+        return; // no need to filter if audioserver.
+    }
+
+    AudioParameter param = AudioParameter(keyValuePairs);
+    String8 value;
+    for (auto& key : kReservedParameters) {
+        if (param.get(key, value) == NO_ERROR) {
+            ALOGW("%s: filtering key %s value %s from uid %d",
+                  __func__, key.string(), value.string(), callingUid);
+            param.remove(key);
+        }
+    }
+    keyValuePairs = param.toString();
+}
+
 status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
 {
-    ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
-            ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid());
+    ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d calling uid %d",
+            ioHandle, keyValuePairs.string(),
+            IPCThreadState::self()->getCallingPid(), IPCThreadState::self()->getCallingUid());
 
     // check calling permissions
     if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
 
+    String8 filteredKeyValuePairs = keyValuePairs;
+    filterReservedParameters(filteredKeyValuePairs, IPCThreadState::self()->getCallingUid());
+
+    ALOGV("%s: filtered keyvalue %s", __func__, filteredKeyValuePairs.string());
+
     // AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
     if (ioHandle == AUDIO_IO_HANDLE_NONE) {
         Mutex::Autolock _l(mLock);
@@ -1200,7 +1206,7 @@
             mHardwareStatus = AUDIO_HW_SET_PARAMETER;
             for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
                 sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
-                status_t result = dev->setParameters(keyValuePairs);
+                status_t result = dev->setParameters(filteredKeyValuePairs);
                 // return success if at least one audio device accepts the parameters as not all
                 // HALs are requested to support all parameters. If no audio device supports the
                 // requested parameters, the last error is reported.
@@ -1211,7 +1217,7 @@
             mHardwareStatus = AUDIO_HW_IDLE;
         }
         // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
-        AudioParameter param = AudioParameter(keyValuePairs);
+        AudioParameter param = AudioParameter(filteredKeyValuePairs);
         String8 value;
         if (param.get(String8(AudioParameter::keyBtNrec), value) == NO_ERROR) {
             bool btNrecIsOff = (value == AudioParameter::valueOff);
@@ -1244,16 +1250,16 @@
             }
         } else if (thread == primaryPlaybackThread_l()) {
             // indicate output device change to all input threads for pre processing
-            AudioParameter param = AudioParameter(keyValuePairs);
+            AudioParameter param = AudioParameter(filteredKeyValuePairs);
             int value;
             if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
                     (value != 0)) {
-                broacastParametersToRecordThreads_l(keyValuePairs);
+                broacastParametersToRecordThreads_l(filteredKeyValuePairs);
             }
         }
     }
     if (thread != 0) {
-        return thread->setParameters(keyValuePairs);
+        return thread->setParameters(filteredKeyValuePairs);
     }
     return BAD_VALUE;
 }
@@ -1593,7 +1599,7 @@
     bool updatePid = (input.clientInfo.clientPid == -1);
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
     uid_t clientUid = input.clientInfo.clientUid;
-    if (!isTrustedCallingUid(callingUid)) {
+    if (!isAudioServerOrMediaServerUid(callingUid)) {
         ALOGW_IF(clientUid != callingUid,
                 "%s uid %d tried to pass itself off as %d",
                 __FUNCTION__, callingUid, clientUid);
@@ -1647,6 +1653,7 @@
         recordTrack.clear();
         AudioSystem::releaseInput(portId);
         output.inputId = AUDIO_IO_HANDLE_NONE;
+        output.selectedDeviceId = input.selectedDeviceId;
         portId = AUDIO_PORT_HANDLE_NONE;
     }
     lStatus = AudioSystem::getInputForAttr(&input.attr, &output.inputId,
@@ -1841,7 +1848,7 @@
 status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory)
 {
     uid_t uid = IPCThreadState::self()->getCallingUid();
-    if (uid != AID_SYSTEM) {
+    if (!isAudioServerOrSystemServerUid(uid)) {
         return PERMISSION_DENIED;
     }
     Mutex::Autolock _l(mLock);
@@ -1886,6 +1893,28 @@
     return mClientSharedHeapSize;
 }
 
+status_t AudioFlinger::setAudioPortConfig(const struct audio_port_config *config)
+{
+    ALOGV(__func__);
+
+    audio_module_handle_t module;
+    if (config->type == AUDIO_PORT_TYPE_DEVICE) {
+        module = config->ext.device.hw_module;
+    } else {
+        module = config->ext.mix.hw_module;
+    }
+
+    Mutex::Autolock _l(mLock);
+    ssize_t index = mAudioHwDevs.indexOfKey(module);
+    if (index < 0) {
+        ALOGW("%s() bad hw module %d", __func__, module);
+        return BAD_VALUE;
+    }
+
+    AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(index);
+    return audioHwDevice->hwDevice()->setAudioPortConfig(config);
+}
+
 audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
 {
     Mutex::Autolock _l(mLock);
@@ -2230,7 +2259,7 @@
     delete out;
 }
 
-void AudioFlinger::closeOutputInternal_l(const sp<PlaybackThread>& thread)
+void AudioFlinger::closeThreadInternal_l(const sp<PlaybackThread>& thread)
 {
     mPlaybackThreads.removeItem(thread->mId);
     thread->exit();
@@ -2306,6 +2335,9 @@
         return 0;
     }
 
+    // Some flags are specific to framework and must not leak to the HAL.
+    flags = static_cast<audio_input_flags_t>(flags & ~AUDIO_INPUT_FRAMEWORK_FLAGS);
+
     // Audio Policy can request a specific handle for hardware hotword.
     // The goal here is not to re-open an already opened input.
     // It is to use a pre-assigned I/O handle.
@@ -2363,55 +2395,6 @@
                     thread.get());
             return thread;
         } else {
-#ifdef TEE_SINK
-            // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
-            // or (re-)create if current Pipe is idle and does not match the new format
-            sp<NBAIO_Sink> teeSink;
-            enum {
-                TEE_SINK_NO,    // don't copy input
-                TEE_SINK_NEW,   // copy input using a new pipe
-                TEE_SINK_OLD,   // copy input using an existing pipe
-            } kind;
-            NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
-                    audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
-            if (!mTeeSinkInputEnabled) {
-                kind = TEE_SINK_NO;
-            } else if (!Format_isValid(format)) {
-                kind = TEE_SINK_NO;
-            } else if (mRecordTeeSink == 0) {
-                kind = TEE_SINK_NEW;
-            } else if (mRecordTeeSink->getStrongCount() != 1) {
-                kind = TEE_SINK_NO;
-            } else if (Format_isEqual(format, mRecordTeeSink->format())) {
-                kind = TEE_SINK_OLD;
-            } else {
-                kind = TEE_SINK_NEW;
-            }
-            switch (kind) {
-            case TEE_SINK_NEW: {
-                Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
-                size_t numCounterOffers = 0;
-                const NBAIO_Format offers[1] = {format};
-                ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
-                ALOG_ASSERT(index == 0);
-                PipeReader *pipeReader = new PipeReader(*pipe);
-                numCounterOffers = 0;
-                index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
-                ALOG_ASSERT(index == 0);
-                mRecordTeeSink = pipe;
-                mRecordTeeSource = pipeReader;
-                teeSink = pipe;
-                }
-                break;
-            case TEE_SINK_OLD:
-                teeSink = mRecordTeeSink;
-                break;
-            case TEE_SINK_NO:
-            default:
-                break;
-            }
-#endif
-
             // Start record thread
             // RecordThread requires both input and output device indication to forward to audio
             // pre processing modules
@@ -2421,9 +2404,6 @@
                                       primaryOutputDevice_l(),
                                       devices,
                                       mSystemReady
-#ifdef TEE_SINK
-                                      , teeSink
-#endif
                                       );
             mRecordThreads.add(*input, thread);
             ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
@@ -2523,7 +2503,7 @@
     delete in;
 }
 
-void AudioFlinger::closeInputInternal_l(const sp<RecordThread>& thread)
+void AudioFlinger::closeThreadInternal_l(const sp<RecordThread>& thread)
 {
     mRecordThreads.removeItem(thread->mId);
     closeInputFinish(thread);
@@ -2561,7 +2541,8 @@
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
     ALOGV("acquiring %d from %d, for %d", audioSession, caller, pid);
-    if (pid != -1 && (caller == getpid_cached)) {
+    const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+    if (pid != -1 && isAudioServerUid(callerUid)) { // check must match releaseAudioSessionId()
         caller = pid;
     }
 
@@ -2595,7 +2576,8 @@
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
     ALOGV("releasing %d from %d for %d", audioSession, caller, pid);
-    if (pid != -1 && (caller == getpid_cached)) {
+    const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+    if (pid != -1 && isAudioServerUid(callerUid)) { // check must match acquireAudioSessionId()
         caller = pid;
     }
     size_t num = mAudioSessionRefs.size();
@@ -2612,9 +2594,10 @@
             return;
         }
     }
-    // If the caller is mediaserver it is likely that the session being released was acquired
+    // If the caller is audioserver it is likely that the session being released was acquired
     // on behalf of a process not in notification clients and we ignore the warning.
-    ALOGW_IF(caller != getpid_cached, "session id %d not found for pid %d", audioSession, caller);
+    ALOGW_IF(!isAudioServerUid(callerUid),
+            "session id %d not found for pid %d", audioSession, caller);
 }
 
 bool AudioFlinger::isSessionAcquired_l(audio_session_t audioSession)
@@ -2922,7 +2905,7 @@
     effect_descriptor_t desc;
 
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
-    if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+    if (pid == -1 || !isAudioServerOrMediaServerUid(callingUid)) {
         const pid_t callingPid = IPCThreadState::self()->getCallingPid();
         ALOGW_IF(pid != -1 && pid != callingPid,
                  "%s uid %d pid %d tried to pass itself off as pid %d",
@@ -2945,8 +2928,8 @@
     }
 
     // Session AUDIO_SESSION_OUTPUT_STAGE is reserved for output stage effects
-    // that can only be created by audio policy manager (running in same process)
-    if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && getpid_cached != pid) {
+    // that can only be created by audio policy manager
+    if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && !isAudioServerUid(callingUid)) {
         lStatus = PERMISSION_DENIED;
         goto Exit;
     }
@@ -3326,140 +3309,6 @@
 }
 
 
-struct Entry {
-#define TEE_MAX_FILENAME 32 // %Y%m%d%H%M%S_%d.wav = 4+2+2+2+2+2+1+1+4+1 = 21
-    char mFileName[TEE_MAX_FILENAME];
-};
-
-int comparEntry(const void *p1, const void *p2)
-{
-    return strcmp(((const Entry *) p1)->mFileName, ((const Entry *) p2)->mFileName);
-}
-
-#ifdef TEE_SINK
-void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id, char suffix)
-{
-    NBAIO_Source *teeSource = source.get();
-    if (teeSource != NULL) {
-        // .wav rotation
-        // There is a benign race condition if 2 threads call this simultaneously.
-        // They would both traverse the directory, but the result would simply be
-        // failures at unlink() which are ignored.  It's also unlikely since
-        // normally dumpsys is only done by bugreport or from the command line.
-        char teePath[32+256];
-        strcpy(teePath, "/data/misc/audioserver");
-        size_t teePathLen = strlen(teePath);
-        DIR *dir = opendir(teePath);
-        teePath[teePathLen++] = '/';
-        if (dir != NULL) {
-#define TEE_MAX_SORT 20 // number of entries to sort
-#define TEE_MAX_KEEP 10 // number of entries to keep
-            struct Entry entries[TEE_MAX_SORT];
-            size_t entryCount = 0;
-            while (entryCount < TEE_MAX_SORT) {
-                struct dirent de;
-                struct dirent *result = NULL;
-                int rc = readdir_r(dir, &de, &result);
-                if (rc != 0) {
-                    ALOGW("readdir_r failed %d", rc);
-                    break;
-                }
-                if (result == NULL) {
-                    break;
-                }
-                if (result != &de) {
-                    ALOGW("readdir_r returned unexpected result %p != %p", result, &de);
-                    break;
-                }
-                // ignore non .wav file entries
-                size_t nameLen = strlen(de.d_name);
-                if (nameLen <= 4 || nameLen >= TEE_MAX_FILENAME ||
-                        strcmp(&de.d_name[nameLen - 4], ".wav")) {
-                    continue;
-                }
-                strcpy(entries[entryCount++].mFileName, de.d_name);
-            }
-            (void) closedir(dir);
-            if (entryCount > TEE_MAX_KEEP) {
-                qsort(entries, entryCount, sizeof(Entry), comparEntry);
-                for (size_t i = 0; i < entryCount - TEE_MAX_KEEP; ++i) {
-                    strcpy(&teePath[teePathLen], entries[i].mFileName);
-                    (void) unlink(teePath);
-                }
-            }
-        } else {
-            if (fd >= 0) {
-                dprintf(fd, "unable to rotate tees in %.*s: %s\n", (int) teePathLen, teePath,
-                        strerror(errno));
-            }
-        }
-        char teeTime[16];
-        struct timeval tv;
-        gettimeofday(&tv, NULL);
-        struct tm tm;
-        localtime_r(&tv.tv_sec, &tm);
-        strftime(teeTime, sizeof(teeTime), "%Y%m%d%H%M%S", &tm);
-        snprintf(&teePath[teePathLen], sizeof(teePath) - teePathLen, "%s_%d_%c.wav", teeTime, id,
-                suffix);
-        // if 2 dumpsys are done within 1 second, and rotation didn't work, then discard 2nd
-        int teeFd = open(teePath, O_WRONLY | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR);
-        if (teeFd >= 0) {
-            // FIXME use libsndfile
-            char wavHeader[44];
-            memcpy(wavHeader,
-                "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0",
-                sizeof(wavHeader));
-            NBAIO_Format format = teeSource->format();
-            unsigned channelCount = Format_channelCount(format);
-            uint32_t sampleRate = Format_sampleRate(format);
-            size_t frameSize = Format_frameSize(format);
-            wavHeader[22] = channelCount;       // number of channels
-            wavHeader[24] = sampleRate;         // sample rate
-            wavHeader[25] = sampleRate >> 8;
-            wavHeader[32] = frameSize;          // block alignment
-            wavHeader[33] = frameSize >> 8;
-            write(teeFd, wavHeader, sizeof(wavHeader));
-            size_t total = 0;
-            bool firstRead = true;
-#define TEE_SINK_READ 1024                      // frames per I/O operation
-            void *buffer = malloc(TEE_SINK_READ * frameSize);
-            for (;;) {
-                size_t count = TEE_SINK_READ;
-                ssize_t actual = teeSource->read(buffer, count);
-                bool wasFirstRead = firstRead;
-                firstRead = false;
-                if (actual <= 0) {
-                    if (actual == (ssize_t) OVERRUN && wasFirstRead) {
-                        continue;
-                    }
-                    break;
-                }
-                ALOG_ASSERT(actual <= (ssize_t)count);
-                write(teeFd, buffer, actual * frameSize);
-                total += actual;
-            }
-            free(buffer);
-            lseek(teeFd, (off_t) 4, SEEK_SET);
-            uint32_t temp = 44 + total * frameSize - 8;
-            // FIXME not big-endian safe
-            write(teeFd, &temp, sizeof(temp));
-            lseek(teeFd, (off_t) 40, SEEK_SET);
-            temp =  total * frameSize;
-            // FIXME not big-endian safe
-            write(teeFd, &temp, sizeof(temp));
-            close(teeFd);
-            if (fd >= 0) {
-                dprintf(fd, "tee copied to %s\n", teePath);
-            }
-        } else {
-            if (fd >= 0) {
-                dprintf(fd, "unable to create tee %s: %s\n", teePath, strerror(errno));
-            }
-        }
-    }
-}
-#endif
-
 // ----------------------------------------------------------------------------
 
 status_t AudioFlinger::onTransact(
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 0e2da4e..0276cad 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -19,8 +19,11 @@
 #define ANDROID_AUDIO_FLINGER_H
 
 #include "Configuration.h"
+#include <atomic>
+#include <mutex>
 #include <deque>
 #include <map>
+#include <vector>
 #include <stdint.h>
 #include <sys/types.h>
 #include <limits.h>
@@ -60,6 +63,7 @@
 #include <media/VolumeShaper.h>
 
 #include <audio_utils/SimpleLog.h>
+#include <audio_utils/TimestampVerifier.h>
 
 #include "FastCapture.h"
 #include "FastMixer.h"
@@ -68,6 +72,7 @@
 #include "AudioStreamOut.h"
 #include "SpdifStreamOut.h"
 #include "AudioHwDevice.h"
+#include "NBAIO_Tee.h"
 
 #include <powermanager/IPowerManager.h>
 
@@ -788,42 +793,16 @@
 
     // for use from destructor
     status_t    closeOutput_nonvirtual(audio_io_handle_t output);
-    void        closeOutputInternal_l(const sp<PlaybackThread>& thread);
+    void        closeThreadInternal_l(const sp<PlaybackThread>& thread);
     status_t    closeInput_nonvirtual(audio_io_handle_t input);
-    void        closeInputInternal_l(const sp<RecordThread>& thread);
+    void        closeThreadInternal_l(const sp<RecordThread>& thread);
     void        setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId);
 
     status_t    checkStreamType(audio_stream_type_t stream) const;
 
-#ifdef TEE_SINK
-    // all record threads serially share a common tee sink, which is re-created on format change
-    sp<NBAIO_Sink>   mRecordTeeSink;
-    sp<NBAIO_Source> mRecordTeeSource;
-#endif
+    void        filterReservedParameters(String8& keyValuePairs, uid_t callingUid);
 
 public:
-
-#ifdef TEE_SINK
-    // tee sink, if enabled by property, allows dumpsys to write most recent audio to .wav file
-    static void dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id, char suffix);
-
-    // whether tee sink is enabled by property
-    static bool mTeeSinkInputEnabled;
-    static bool mTeeSinkOutputEnabled;
-    static bool mTeeSinkTrackEnabled;
-
-    // runtime configured size of each tee sink pipe, in frames
-    static size_t mTeeSinkInputFrames;
-    static size_t mTeeSinkOutputFrames;
-    static size_t mTeeSinkTrackFrames;
-
-    // compile-time default size of tee sink pipes, in frames
-    // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes
-    static const size_t kTeeSinkInputFramesDefault = 0x200000;
-    static const size_t kTeeSinkOutputFramesDefault = 0x200000;
-    static const size_t kTeeSinkTrackFramesDefault = 0x200000;
-#endif
-
     // These methods read variables atomically without mLock,
     // though the variables are updated with mLock.
     bool    isLowRamDevice() const { return mIsLowRamDevice; }
@@ -838,7 +817,8 @@
 
     nsecs_t mGlobalEffectEnableTime;  // when a global effect was last enabled
 
-    sp<PatchPanel> mPatchPanel;
+    // protected by mLock
+    PatchPanel mPatchPanel;
     sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
 
     bool        mSystemReady;
diff --git a/services/audioflinger/BufLog.cpp b/services/audioflinger/BufLog.cpp
index 2780290..ae96036 100644
--- a/services/audioflinger/BufLog.cpp
+++ b/services/audioflinger/BufLog.cpp
@@ -24,6 +24,7 @@
 #include <pthread.h>
 #include <stdio.h>
 #include <string.h>
+#include <audio_utils/string.h>
 
 #define MIN(a, b) ((a) < (b) ? (a) : (b))
 
@@ -117,7 +118,7 @@
     mByteCount = 0l;
     mPaused = false;
     if (tag != NULL) {
-        strncpy(mTag, tag, BUFLOGSTREAM_MAX_TAGSIZE);
+        (void)audio_utils_strlcpy(mTag, tag);
     } else {
         mTag[0] = 0;
     }
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index dcf223c..786c4af 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -31,9 +31,9 @@
 #include <media/AudioEffect.h>
 #include <media/audiohal/EffectHalInterface.h>
 #include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <mediautils/ServiceUtilities.h>
 
 #include "AudioFlinger.h"
-#include "ServiceUtilities.h"
 
 // ----------------------------------------------------------------------------
 
@@ -595,7 +595,8 @@
             (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
                     || mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
         // Older effects may require exact STEREO position mask.
-        if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) {
+        if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
+                && (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) {
             ALOGV("Overriding effect input channels %#x as STEREO", mConfig.inputCfg.channels);
             mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
         }
@@ -1814,7 +1815,7 @@
     bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
 
     snprintf(buffer, size, "\t\t\t%5d    %5d  %3s    %3s  %5u  %5u\n",
-            (mClient == 0) ? getpid_cached : mClient->pid(),
+            (mClient == 0) ? getpid() : mClient->pid(),
             mPriority,
             mHasControl ? "yes" : "no",
             locked ? "yes" : "no",
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index ef466a2..ad35264 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -47,7 +47,8 @@
 
 /*static*/ const FastMixerState FastMixer::sInitial;
 
-FastMixer::FastMixer() : FastThread("cycle_ms", "load_us"),
+FastMixer::FastMixer(audio_io_handle_t parentIoHandle)
+    : FastThread("cycle_ms", "load_us"),
     // mFastTrackNames
     // mGenerations
     mOutputSink(NULL),
@@ -66,8 +67,11 @@
     mTotalNativeFramesWritten(0),
     // timestamp
     mNativeFramesWrittenButNotPresented(0),   // the = 0 is to silence the compiler
-    mMasterMono(false)
+    mMasterMono(false),
+    mThreadIoHandle(parentIoHandle)
 {
+    (void)mThreadIoHandle; // prevent unused warning, see C++17 [[maybe_unused]]
+
     // FIXME pass sInitial as parameter to base class constructor, and make it static local
     mPrevious = &sInitial;
     mCurrent = &sInitial;
@@ -216,15 +220,14 @@
             mWarmupNsMax = LONG_MAX;
         }
         mMixerBufferState = UNDEFINED;
-#if !LOG_NDEBUG
-        for (unsigned i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
-            mFastTrackNames[i] = -1;
-        }
-#endif
         // we need to reconfigure all active tracks
         previousTrackMask = 0;
         mFastTracksGen = current->mFastTracksGen - 1;
         dumpState->mFrameCount = frameCount;
+#ifdef TEE_SINK
+        mTee.set(mFormat, NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
+        mTee.setId(std::string("_") + std::to_string(mThreadIoHandle) + "_F");
+#endif
     } else {
         previousTrackMask = previous->mTrackMask;
     }
@@ -245,9 +248,6 @@
             if (mMixer != NULL) {
                 mMixer->destroy(i);
             }
-#if !LOG_NDEBUG
-            mFastTrackNames[i] = -1;
-#endif
             // don't reset track dump state, since other side is ignoring it
             mGenerations[i] = fastTrack->mGeneration;
         }
@@ -259,7 +259,6 @@
             addedTracks &= ~(1 << i);
             const FastTrack* fastTrack = &current->mFastTracks[i];
             AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-            ALOG_ASSERT(bufferProvider != NULL && mFastTrackNames[i] == -1);
             if (mMixer != NULL) {
                 const int name = i; // for clarity, choose name as fast track index.
                 status_t status = mMixer->create(
@@ -337,13 +336,15 @@
 {
     // TODO: pass an ID parameter to indicate which time series we want to write to in NBLog.cpp
     // Or: pass both of these into a single call with a boolean
+    const FastMixerState * const current = (const FastMixerState *) mCurrent;
+    FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
+
     if (mIsWarm) {
         LOG_HIST_TS();
     } else {
+        dumpState->mTimestampVerifier.discontinuity();
         LOG_AUDIO_STATE();
     }
-    const FastMixerState * const current = (const FastMixerState *) mCurrent;
-    FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
     const FastMixerState::Command command = mCommand;
     const size_t frameCount = current->mFrameCount;
 
@@ -455,10 +456,9 @@
                     frameCount * Format_channelCount(mFormat));
         }
         // if non-NULL, then duplicate write() to this non-blocking sink
-        NBAIO_Sink* teeSink;
-        if ((teeSink = current->mTeeSink) != NULL) {
-            (void) teeSink->write(buffer, frameCount);
-        }
+#ifdef TEE_SINK
+        mTee.write(buffer, frameCount);
+#endif
         // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink,
         //       but this code should be modified to handle both non-blocking and blocking sinks
         dumpState->mWriteSequence++;
@@ -479,35 +479,47 @@
         mAttemptedWrite = true;
         // FIXME count # of writes blocked excessively, CPU usage, etc. for dump
 
-        ExtendedTimestamp timestamp; // local
-        status_t status = mOutputSink->getTimestamp(timestamp);
-        if (status == NO_ERROR) {
-            const int64_t totalNativeFramesPresented =
-                    timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
-            if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
-                mNativeFramesWrittenButNotPresented =
-                    mTotalNativeFramesWritten - totalNativeFramesPresented;
-                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+        if (mIsWarm) {
+            ExtendedTimestamp timestamp; // local
+            status_t status = mOutputSink->getTimestamp(timestamp);
+            if (status == NO_ERROR) {
+                dumpState->mTimestampVerifier.add(
+                        timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+                        mSampleRate);
+                const int64_t totalNativeFramesPresented =
                         timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
-                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
-                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+                if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
+                    mNativeFramesWrittenButNotPresented =
+                        mTotalNativeFramesWritten - totalNativeFramesPresented;
+                    mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+                            timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+                            timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+                    // We don't compensate for server - kernel time difference and
+                    // only update latency if we have valid info.
+                    dumpState->mLatencyMs =
+                            (double)mNativeFramesWrittenButNotPresented * 1000 / mSampleRate;
+                } else {
+                    // HAL reported that more frames were presented than were written
+                    mNativeFramesWrittenButNotPresented = 0;
+                    status = INVALID_OPERATION;
+                }
             } else {
-                // HAL reported that more frames were presented than were written
-                mNativeFramesWrittenButNotPresented = 0;
-                status = INVALID_OPERATION;
+                dumpState->mTimestampVerifier.error();
             }
-        }
-        if (status == NO_ERROR) {
-            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
-                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
-        } else {
-            // fetch server time if we can't get timestamp
-            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
-                    systemTime(SYSTEM_TIME_MONOTONIC);
-            // clear out kernel cached position as this may get rapidly stale
-            // if we never get a new valid timestamp
-            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
-            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+            if (status == NO_ERROR) {
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+                        mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+            } else {
+                // fetch server time if we can't get timestamp
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+                        systemTime(SYSTEM_TIME_MONOTONIC);
+                // clear out kernel cached position as this may get rapidly stale
+                // if we never get a new valid timestamp
+                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+            }
         }
     }
 }
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 235d23f..1c86d9a 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -22,6 +22,7 @@
 #include "StateQueue.h"
 #include "FastMixerState.h"
 #include "FastMixerDumpState.h"
+#include "NBAIO_Tee.h"
 
 namespace android {
 
@@ -32,7 +33,9 @@
 class FastMixer : public FastThread {
 
 public:
-            FastMixer();
+    /** FastMixer constructor takes as param the parent MixerThread's io handle (id)
+        for purposes of identification. */
+    explicit FastMixer(audio_io_handle_t threadIoHandle);
     virtual ~FastMixer();
 
             FastMixerStateQueue* sq();
@@ -87,6 +90,11 @@
     // accessed without lock between multiple threads.
     std::atomic_bool mMasterMono;
     std::atomic_int_fast64_t mBoottimeOffset;
+
+    const audio_io_handle_t mThreadIoHandle; // parent thread id for debugging purposes
+#ifdef TEE_SINK
+    NBAIO_Tee       mTee;
+#endif
 };  // class FastMixer
 
 }   // namespace android
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 2e4fb8c..d60643c 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -68,11 +68,12 @@
     dprintf(fd, "  FastMixer command=%s writeSequence=%u framesWritten=%u\n"
                 "            numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
                 "            sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
-                "            mixPeriod=%.2f ms\n",
+                "            mixPeriod=%.2f ms latency=%.2f ms\n",
                 FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
                 mNumTracks, mWriteErrors, mUnderruns, mOverruns,
                 mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
-                mixPeriodSec * 1e3);
+                mixPeriodSec * 1e3, mLatencyMs);
+    dprintf(fd, "  FastMixer Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
 #ifdef FAST_THREAD_STATISTICS
     // find the interval of valid samples
     uint32_t bounds = mBounds;
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 8ef31d1..9b91cbc 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -18,6 +18,7 @@
 #define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
 
 #include <stdint.h>
+#include <audio_utils/TimestampVerifier.h>
 #include "Configuration.h"
 #include "FastThreadDumpState.h"
 #include "FastMixerState.h"
@@ -66,6 +67,7 @@
 
     void dump(int fd) const;    // should only be called on a stable copy, not the original
 
+    double   mLatencyMs = 0.;   // measured latency, default of 0 if no valid timestamp read.
     uint32_t mWriteSequence;    // incremented before and after each write()
     uint32_t mFramesWritten;    // total number of frames written successfully
     uint32_t mNumTracks;        // total number of active fast tracks
@@ -74,6 +76,9 @@
     size_t   mFrameCount;
     uint32_t mTrackMask;        // mask of active tracks
     FastTrackDump   mTracks[FastMixerState::kMaxFastTracks];
+
+    // For timestamp statistics.
+    TimestampVerifier<int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
 };
 
 }   // android
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index 36d8eef..b98842d 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -35,7 +35,7 @@
 FastMixerState::FastMixerState() : FastThreadState(),
     // mFastTracks
     mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0),
-    mFrameCount(0), mTeeSink(NULL)
+    mFrameCount(0)
 {
     int ok = pthread_once(&sMaxFastTracksOnce, sMaxFastTracksInit);
     if (ok != 0) {
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 2be1e91..c7fcbd8 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -77,9 +77,6 @@
         WRITE = 0x10,           // write to output sink
         MIX_WRITE = 0x18;       // mix tracks and write to output sink
 
-    // This might be a one-time configuration rather than per-state
-    NBAIO_Sink* mTeeSink;       // if non-NULL, then duplicate write()s to this non-blocking sink
-
     // never returns NULL; asserts if command is invalid
     static const char *commandToString(Command command);
 
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index a210a1b..968d5aa 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -28,6 +28,7 @@
                             audio_format_t format,
                             audio_channel_mask_t channelMask,
                             audio_session_t sessionId,
+                            bool isOut,
                             uid_t uid,
                             pid_t pid,
                             audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
@@ -39,10 +40,20 @@
                               audio_session_t triggerSession);
     virtual void        stop();
     virtual bool        isFastTrack() const { return false; }
+            bool        isDirect() const override { return true; }
 
-     static void        appendDumpHeader(String8& result);
+            void        appendDumpHeader(String8& result);
             void        appendDump(String8& result, bool active);
 
+                        // protected by MMapThread::mLock
+            void        setSilenced_l(bool silenced) { mSilenced = silenced;
+                                                       mSilencedNotified = false;}
+                        // protected by MMapThread::mLock
+            bool        isSilenced_l() const { return mSilenced; }
+                        // protected by MMapThread::mLock
+            bool        getAndSetSilencedNotified_l() { bool silencedNotified = mSilencedNotified;
+                                                        mSilencedNotified = true;
+                                                        return silencedNotified; }
 private:
     friend class MmapThread;
 
@@ -58,5 +69,7 @@
     virtual void onTimestamp(const ExtendedTimestamp &timestamp);
 
     pid_t mPid;
+    bool  mSilenced;            // protected by MMapThread::mLock
+    bool  mSilencedNotified;    // protected by MMapThread::mLock
 };  // end of Track
 
diff --git a/services/audioflinger/NBAIO_Tee.cpp b/services/audioflinger/NBAIO_Tee.cpp
new file mode 100644
index 0000000..53083d5
--- /dev/null
+++ b/services/audioflinger/NBAIO_Tee.cpp
@@ -0,0 +1,517 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "NBAIO_Tee"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <deque>
+#include <dirent.h>
+#include <future>
+#include <list>
+#include <vector>
+
+#include <audio_utils/format.h>
+#include <audio_utils/sndfile.h>
+#include <media/nbaio/PipeReader.h>
+
+#include "Configuration.h"
+#include "NBAIO_Tee.h"
+
+// Enabled with TEE_SINK in Configuration.h
+#ifdef TEE_SINK
+
+namespace android {
+
+/*
+ Tee filenames generated as follows:
+
+ "aftee_Date_ThreadId_C_reason.wav" RecordThread
+ "aftee_Date_ThreadId_M_reason.wav" MixerThread (Normal)
+ "aftee_Date_ThreadId_F_reason.wav" MixerThread (Fast)
+ "aftee_Date_ThreadId_TrackId_R_reason.wav" RecordTrack
+ "aftee_Date_ThreadId_TrackId_TrackName_T_reason.wav" PlaybackTrack
+
+ where Date = YYYYmmdd_HHMMSS_MSEC
+
+ where Reason = [ DTOR | DUMP | REMOVE ]
+
+ Examples:
+  aftee_20180424_153811_038_13_57_2_T_REMOVE.wav
+  aftee_20180424_153811_218_13_57_2_T_REMOVE.wav
+  aftee_20180424_153811_378_13_57_2_T_REMOVE.wav
+  aftee_20180424_153825_147_62_C_DUMP.wav
+  aftee_20180424_153825_148_62_59_R_DUMP.wav
+  aftee_20180424_153825_149_13_F_DUMP.wav
+  aftee_20180424_153842_125_62_59_R_REMOVE.wav
+  aftee_20180424_153842_168_62_C_DTOR.wav
+*/
+
+static constexpr char DEFAULT_PREFIX[] = "aftee_";
+static constexpr char DEFAULT_DIRECTORY[] = "/data/misc/audioserver";
+static constexpr size_t DEFAULT_THREADPOOL_SIZE = 8;
+
+/** AudioFileHandler manages temporary audio wav files with a least recently created
+    retention policy.
+
+    The temporary filenames are systematically generated. A common filename prefix,
+    storage directory, and concurrency pool are passed in on creating the object.
+
+    Temporary files are created by "create", which returns a filename generated by
+
+    prefix + 14 char date + suffix
+
+    TODO Move to audio_utils.
+    TODO Avoid pointing two AudioFileHandlers to the same directory and prefix
+    as we don't have a prefix specific lock file. */
+
+class AudioFileHandler {
+public:
+
+    AudioFileHandler(const std::string &prefix, const std::string &directory, size_t pool)
+        : mThreadPool(pool)
+        , mPrefix(prefix)
+    {
+        (void)setDirectory(directory);
+    }
+
+    /** returns filename of created audio file, else empty string on failure. */
+    std::string create(
+            std::function<ssize_t /* frames_read */
+                        (void * /* buffer */, size_t /* size_in_frames */)> reader,
+            uint32_t sampleRate,
+            uint32_t channelCount,
+            audio_format_t format,
+            const std::string &suffix);
+
+private:
+    /** sets the current directory. this is currently private to avoid confusion
+        when changing while pending operations are occurring (it's okay, but
+        weakly synchronized). */
+    status_t setDirectory(const std::string &directory);
+
+    /** cleans current directory and returns the directory name done. */
+    status_t clean(std::string *dir = nullptr);
+
+    /** creates an audio file from a reader functor passed in. */
+    status_t createInternal(
+            std::function<ssize_t /* frames_read */
+                        (void * /* buffer */, size_t /* size_in_frames */)> reader,
+            uint32_t sampleRate,
+            uint32_t channelCount,
+            audio_format_t format,
+            const std::string &filename);
+
+    static bool isDirectoryValid(const std::string &directory) {
+        return directory.size() > 0 && directory[0] == '/';
+    }
+
+    std::string generateFilename(const std::string &suffix) const {
+        char fileTime[sizeof("YYYYmmdd_HHMMSS_\0")];
+        struct timeval tv;
+        gettimeofday(&tv, NULL);
+        struct tm tm;
+        localtime_r(&tv.tv_sec, &tm);
+        LOG_ALWAYS_FATAL_IF(strftime(fileTime, sizeof(fileTime), "%Y%m%d_%H%M%S_", &tm) == 0,
+            "incorrect fileTime buffer");
+        char msec[4];
+        (void)snprintf(msec, sizeof(msec), "%03d", (int)(tv.tv_usec / 1000));
+        return mPrefix + fileTime + msec + suffix + ".wav";
+    }
+
+    bool isManagedFilename(const char *name) {
+        constexpr size_t FILENAME_LEN_DATE = 4 + 2 + 2 // %Y%m%d%
+            + 1 + 2 + 2 + 2 // _H%M%S
+            + 1 + 3; //_MSEC
+        const size_t prefixLen = mPrefix.size();
+        const size_t nameLen = strlen(name);
+
+        // reject on size, prefix, and .wav
+        if (nameLen < prefixLen + FILENAME_LEN_DATE + 4 /* .wav */
+             || strncmp(name, mPrefix.c_str(), prefixLen) != 0
+             || strcmp(name + nameLen - 4, ".wav") != 0) {
+            return false;
+        }
+
+        // validate date portion
+        const char *date = name + prefixLen;
+        return std::all_of(date, date + 8, isdigit)
+            && date[8] == '_'
+            && std::all_of(date + 9, date + 15, isdigit)
+            && date[15] == '_'
+            && std::all_of(date + 16, date + 19, isdigit);
+    }
+
+    // yet another ThreadPool implementation.
+    class ThreadPool {
+    public:
+        ThreadPool(size_t size)
+            : mThreadPoolSize(size)
+        { }
+
+        /** launches task "name" with associated function "func".
+            if the threadpool is exhausted, it will launch on calling function */
+        status_t launch(const std::string &name, std::function<status_t()> func);
+
+    private:
+        std::mutex mLock;
+        std::list<std::pair<
+                std::string, std::future<status_t>>> mFutures; // GUARDED_BY(mLock)
+
+        const size_t mThreadPoolSize;
+    } mThreadPool;
+
+    const std::string mPrefix;
+    std::mutex mLock;
+    std::string mDirectory;         // GUARDED_BY(mLock)
+    std::deque<std::string> mFiles; // GUARDED_BY(mLock)  sorted list of files by creation time
+
+    static constexpr size_t FRAMES_PER_READ = 1024;
+    static constexpr size_t MAX_FILES_READ = 1024;
+    static constexpr size_t MAX_FILES_KEEP = 32;
+};
+
+/* static */
+void NBAIO_Tee::NBAIO_TeeImpl::dumpTee(
+        int fd, const NBAIO_SinkSource &sinkSource, const std::string &suffix)
+{
+    // Singleton. Constructed thread-safe on first call, never destroyed.
+    static AudioFileHandler audioFileHandler(
+            DEFAULT_PREFIX, DEFAULT_DIRECTORY, DEFAULT_THREADPOOL_SIZE);
+
+    auto &source = sinkSource.second;
+    if (source.get() == nullptr) {
+        return;
+    }
+
+    const NBAIO_Format format = source->format();
+    bool firstRead = true;
+    std::string filename = audioFileHandler.create(
+            // this functor must not hold references to stack
+            [firstRead, sinkSource] (void *buffer, size_t frames) mutable {
+                    auto &source = sinkSource.second;
+                    ssize_t actualRead = source->read(buffer, frames);
+                    if (actualRead == (ssize_t)OVERRUN && firstRead) {
+                        // recheck once
+                        actualRead = source->read(buffer, frames);
+                    }
+                    firstRead = false;
+                    return actualRead;
+                },
+            Format_sampleRate(format),
+            Format_channelCount(format),
+            format.mFormat,
+            suffix);
+
+    if (fd >= 0 && filename.size() > 0) {
+        dprintf(fd, "tee wrote to %s\n", filename.c_str());
+    }
+}
+
+/* static */
+NBAIO_Tee::NBAIO_TeeImpl::NBAIO_SinkSource NBAIO_Tee::NBAIO_TeeImpl::makeSinkSource(
+        const NBAIO_Format &format, size_t frames, bool *enabled)
+{
+    if (Format_isValid(format) && audio_is_linear_pcm(format.mFormat)) {
+        Pipe *pipe = new Pipe(frames, format);
+        size_t numCounterOffers = 0;
+        const NBAIO_Format offers[1] = {format};
+        ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
+        if (index != 0) {
+            ALOGW("pipe failure to negotiate: %zd", index);
+            goto exit;
+        }
+        PipeReader *pipeReader = new PipeReader(*pipe);
+        numCounterOffers = 0;
+        index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
+        if (index != 0) {
+            ALOGW("pipeReader failure to negotiate: %zd", index);
+            goto exit;
+        }
+        if (enabled != nullptr) *enabled = true;
+        return {pipe, pipeReader};
+    }
+exit:
+    if (enabled != nullptr) *enabled = false;
+    return {nullptr, nullptr};
+}
+
+std::string AudioFileHandler::create(
+        std::function<ssize_t /* frames_read */
+                    (void * /* buffer */, size_t /* size_in_frames */)> reader,
+        uint32_t sampleRate,
+        uint32_t channelCount,
+        audio_format_t format,
+        const std::string &suffix)
+{
+    const std::string filename = generateFilename(suffix);
+
+    if (mThreadPool.launch(std::string("create ") + filename,
+            [=]() { return createInternal(reader, sampleRate, channelCount, format, filename); })
+            == NO_ERROR) {
+        return filename;
+    }
+    return "";
+}
+
+status_t AudioFileHandler::setDirectory(const std::string &directory)
+{
+    if (!isDirectoryValid(directory)) return BAD_VALUE;
+
+    // TODO: consider using std::filesystem in C++17
+    DIR *dir = opendir(directory.c_str());
+
+    if (dir == nullptr) {
+        ALOGW("%s: cannot open directory %s", __func__, directory.c_str());
+        return BAD_VALUE;
+    }
+
+    size_t toRemove = 0;
+    decltype(mFiles) files;
+
+    while (files.size() < MAX_FILES_READ) {
+        errno = 0;
+        const struct dirent *result = readdir(dir);
+        if (result == nullptr) {
+            ALOGW_IF(errno != 0, "%s: readdir failure %s", __func__, strerror(errno));
+            break;
+        }
+        // is it a managed filename?
+        if (!isManagedFilename(result->d_name)) {
+            continue;
+        }
+        files.emplace_back(result->d_name);
+    }
+    (void)closedir(dir);
+
+    // OPTIMIZATION: we don't need to stat each file, the filenames names are
+    // already (roughly) ordered by creation date.  we use std::deque instead
+    // of std::set for faster insertion and sorting times.
+
+    if (files.size() > MAX_FILES_KEEP) {
+        // removed files can use a partition (no need to do a full sort).
+        toRemove = files.size() - MAX_FILES_KEEP;
+        std::nth_element(files.begin(), files.begin() + toRemove - 1, files.end());
+    }
+
+    // kept files must be sorted.
+    std::sort(files.begin() + toRemove, files.end());
+
+    {
+        std::lock_guard<std::mutex> _l(mLock);
+
+        mDirectory = directory;
+        mFiles = std::move(files);
+    }
+
+    if (toRemove > 0) { // launch a clean in background.
+        (void)mThreadPool.launch(
+                std::string("cleaning ") + directory, [this]() { return clean(); });
+    }
+    return NO_ERROR;
+}
+
+status_t AudioFileHandler::clean(std::string *directory)
+{
+    std::vector<std::string> filesToRemove;
+    std::string dir;
+    {
+        std::lock_guard<std::mutex> _l(mLock);
+
+        if (!isDirectoryValid(mDirectory)) return NO_INIT;
+
+        dir = mDirectory;
+        if (mFiles.size() > MAX_FILES_KEEP) {
+            size_t toRemove = mFiles.size() - MAX_FILES_KEEP;
+
+            // use move and erase to efficiently transfer std::string
+            std::move(mFiles.begin(),
+                    mFiles.begin() + toRemove,
+                    std::back_inserter(filesToRemove));
+            mFiles.erase(mFiles.begin(), mFiles.begin() + toRemove);
+        }
+    }
+
+    std::string dirp = dir + "/";
+    // remove files outside of lock for better concurrency.
+    for (const auto &file : filesToRemove) {
+        (void)unlink((dirp + file).c_str());
+    }
+
+    // return the directory if requested.
+    if (directory != nullptr) {
+        *directory = dir;
+    }
+    return NO_ERROR;
+}
+
+status_t AudioFileHandler::ThreadPool::launch(
+        const std::string &name, std::function<status_t()> func)
+{
+    if (mThreadPoolSize > 1) {
+        std::lock_guard<std::mutex> _l(mLock);
+        if (mFutures.size() >= mThreadPoolSize) {
+            for (auto it = mFutures.begin(); it != mFutures.end();) {
+                const std::string &filename = it->first;
+                std::future<status_t> &future = it->second;
+                if (!future.valid() ||
+                        future.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
+                    ALOGV("%s: future %s ready", __func__, filename.c_str());
+                    it = mFutures.erase(it);
+                } else {
+                    ALOGV("%s: future %s not ready", __func__, filename.c_str());
+                    ++it;
+                }
+            }
+        }
+        if (mFutures.size() < mThreadPoolSize) {
+            ALOGV("%s: deferred calling %s", __func__, name.c_str());
+            mFutures.emplace_back(name, std::async(std::launch::async, func));
+            return NO_ERROR;
+        }
+    }
+    ALOGV("%s: immediate calling %s", __func__, name.c_str());
+    return func();
+}
+
+status_t AudioFileHandler::createInternal(
+        std::function<ssize_t /* frames_read */
+                    (void * /* buffer */, size_t /* size_in_frames */)> reader,
+        uint32_t sampleRate,
+        uint32_t channelCount,
+        audio_format_t format,
+        const std::string &filename)
+{
+    // Attempt to choose the best matching file format.
+    // We can choose any sf_format
+    // but writeFormat must be one of 16, 32, float
+    // due to sf_writef compatibility.
+    int sf_format;
+    audio_format_t writeFormat;
+    switch (format) {
+    case AUDIO_FORMAT_PCM_8_BIT:
+    case AUDIO_FORMAT_PCM_16_BIT:
+        sf_format = SF_FORMAT_PCM_16;
+        writeFormat = AUDIO_FORMAT_PCM_16_BIT;
+        ALOGV("%s: %s using PCM_16 for format %#x", __func__, filename.c_str(), format);
+        break;
+    case AUDIO_FORMAT_PCM_8_24_BIT:
+    case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+    case AUDIO_FORMAT_PCM_32_BIT:
+        sf_format = SF_FORMAT_PCM_32;
+        writeFormat = AUDIO_FORMAT_PCM_32_BIT;
+        ALOGV("%s: %s using PCM_32 for format %#x", __func__, filename.c_str(), format);
+        break;
+    case AUDIO_FORMAT_PCM_FLOAT:
+        sf_format = SF_FORMAT_FLOAT;
+        writeFormat = AUDIO_FORMAT_PCM_FLOAT;
+        ALOGV("%s: %s using PCM_FLOAT for format %#x", __func__, filename.c_str(), format);
+        break;
+    default:
+        // TODO:
+        // handle audio_has_proportional_frames() formats.
+        // handle compressed formats as single byte files.
+        return BAD_VALUE;
+    }
+
+    std::string directory;
+    status_t status = clean(&directory);
+    if (status != NO_ERROR) return status;
+    std::string dirPrefix = directory + "/";
+
+    const std::string path = dirPrefix + filename;
+
+    /* const */ SF_INFO info = {
+        .frames = 0,
+        .samplerate = (int)sampleRate,
+        .channels = (int)channelCount,
+        .format = SF_FORMAT_WAV | sf_format,
+    };
+    SNDFILE *sf = sf_open(path.c_str(), SFM_WRITE, &info);
+    if (sf == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    size_t total = 0;
+    void *buffer = malloc(FRAMES_PER_READ * std::max(
+            channelCount * audio_bytes_per_sample(writeFormat), //output framesize
+            channelCount * audio_bytes_per_sample(format))); // input framesize
+    if (buffer == nullptr) {
+        sf_close(sf);
+        return NO_MEMORY;
+    }
+
+    for (;;) {
+        const ssize_t actualRead = reader(buffer, FRAMES_PER_READ);
+        if (actualRead <= 0) {
+            break;
+        }
+
+        // Convert input format to writeFormat as needed.
+        if (format != writeFormat) {
+            memcpy_by_audio_format(
+                    buffer, writeFormat, buffer, format, actualRead * info.channels);
+        }
+
+        ssize_t reallyWritten;
+        switch (writeFormat) {
+        case AUDIO_FORMAT_PCM_16_BIT:
+            reallyWritten = sf_writef_short(sf, (const int16_t *)buffer, actualRead);
+            break;
+        case AUDIO_FORMAT_PCM_32_BIT:
+            reallyWritten = sf_writef_int(sf, (const int32_t *)buffer, actualRead);
+            break;
+        case AUDIO_FORMAT_PCM_FLOAT:
+            reallyWritten = sf_writef_float(sf, (const float *)buffer, actualRead);
+            break;
+        default:
+            LOG_ALWAYS_FATAL("%s: %s writeFormat: %#x", __func__, filename.c_str(), writeFormat);
+            break;
+        }
+
+        if (reallyWritten < 0) {
+            ALOGW("%s: %s write error: %zd", __func__, filename.c_str(), reallyWritten);
+            break;
+        }
+        total += reallyWritten;
+        if (reallyWritten < actualRead) {
+            ALOGW("%s: %s write short count: %zd < %zd",
+                     __func__, filename.c_str(), reallyWritten, actualRead);
+            break;
+        }
+    }
+    sf_close(sf);
+    free(buffer);
+    if (total == 0) {
+        (void)unlink(path.c_str());
+        return NOT_ENOUGH_DATA;
+    }
+
+    // Success: add our name to managed files.
+    {
+        std::lock_guard<std::mutex> _l(mLock);
+        // weak synchronization - only update mFiles if the directory hasn't changed.
+        if (mDirectory == directory) {
+            mFiles.emplace_back(filename);  // add to the end to preserve sort.
+        }
+    }
+    return NO_ERROR; // return full path
+}
+
+} // namespace android
+
+#endif // TEE_SINK
diff --git a/services/audioflinger/NBAIO_Tee.h b/services/audioflinger/NBAIO_Tee.h
new file mode 100644
index 0000000..fed8cc8
--- /dev/null
+++ b/services/audioflinger/NBAIO_Tee.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Enabled with TEE_SINK in Configuration.h
+#ifndef ANDROID_NBAIO_TEE_H
+#define ANDROID_NBAIO_TEE_H
+
+#ifdef TEE_SINK
+
+#include <atomic>
+#include <mutex>
+#include <set>
+
+#include <cutils/properties.h>
+#include <media/nbaio/NBAIO.h>
+
+namespace android {
+
+/**
+ * The NBAIO_Tee uses the NBAIO Pipe and PipeReader for nonblocking
+ * data collection, for eventual dump to log files.
+ * See https://source.android.com/devices/audio/debugging for how to
+ * enable by ro.debuggable and af.tee properties.
+ *
+ * The write() into the NBAIO_Tee is therefore nonblocking,
+ * but changing NBAIO_Tee formats with set() cannot be done during a write();
+ * usually the caller already implements this mutual exclusion.
+ *
+ * All other calls except set() vs write() may occur at any time.
+ *
+ * dump() disruption is minimized to the caller since system calls are executed
+ * in an asynchronous thread (when possible).
+ *
+ * Currently the NBAIO_Tee is "hardwired" for AudioFlinger support.
+ *
+ * Some AudioFlinger specific notes:
+ *
+ * 1) Tees capture only linear PCM data.
+ * 2) Tees without any data written are considered empty and do not generate
+ *    any output files.
+ * 2) Once a Tee dumps data, it is considered "emptied" and new data
+ *    needs to be written before another Tee file is generated.
+ * 3) Tee file format is
+ *    WAV integer PCM 16 bit for AUDIO_FORMAT_PCM_8_BIT, AUDIO_FORMAT_PCM_16_BIT.
+ *    WAV integer PCM 32 bit for AUDIO_FORMAT_PCM_8_24_BIT, AUDIO_FORMAT_PCM_24_BIT_PACKED
+ *                               AUDIO_FORMAT_PCM_32_BIT.
+ *    WAV float PCM 32 bit for AUDIO_FORMAT_PCM_FLOAT.
+ *
+ * Input_Thread:
+ * 1) Capture buffer is teed when read from the HAL, before resampling for the AudioRecord
+ *    client.
+ *
+ * Output_Thread:
+ * 1) MixerThreads will tee at the FastMixer output (if it has one) or at the
+ *    NormalMixer output (if no FastMixer).
+ * 2) DuplicatingThreads do not tee any mixed data. Apply a tee on the downstream OutputTrack
+ *    or on the upstream playback Tracks.
+ * 3) DirectThreads and OffloadThreads do not tee any data. The upstream track
+ *    (if linear PCM format) may be teed to discover data.
+ * 4) MmapThreads are not supported.
+ *
+ * Tracks:
+ * 1) RecordTracks and playback Tracks tee as data is being written to or
+ *    read from the shared client-server track buffer by the associated Threads.
+ * 2) The mechanism is on the AudioBufferProvider release() so large static Track
+ *    playback may not show any Tee data depending on when it is released.
+ * 3) When a track becomes inactive, the Thread will trigger a dump.
+ */
+
+class NBAIO_Tee {
+public:
+    /* TEE_FLAG is used in set() and must match the flags for the af.tee property
+       given in https://source.android.com/devices/audio/debugging
+    */
+    enum TEE_FLAG {
+        TEE_FLAG_NONE = 0,
+        TEE_FLAG_INPUT_THREAD = (1 << 0),  // treat as a Tee for input (Capture) Threads
+        TEE_FLAG_OUTPUT_THREAD = (1 << 1), // treat as a Tee for output (Playback) Threads
+        TEE_FLAG_TRACK = (1 << 2),         // treat as a Tee for tracks (Record and Playback)
+    };
+
+    NBAIO_Tee()
+        : mTee(std::make_shared<NBAIO_TeeImpl>())
+    {
+        getRunningTees().add(mTee);
+    }
+
+    ~NBAIO_Tee() {
+        getRunningTees().remove(mTee);
+        dump(-1, "_DTOR"); // log any data remaining in Tee.
+    }
+
+    /**
+     * \brief set is used for deferred configuration of Tee.
+     *
+     *  May be called anytime except concurrently with write().
+     *
+     * \param format NBAIO_Format used to open NBAIO pipes
+     * \param flags (https://source.android.com/devices/audio/debugging)
+     *              - TEE_FLAG_NONE to bypass af.tee property checks (default);
+     *              - TEE_FLAG_INPUT_THREAD to check af.tee if input thread logging set;
+     *              - TEE_FLAG_OUTPUT_THREAD to check af.tee if output thread logging set;
+     *              - TEE_FLAG_TRACK to check af.tee if track logging set.
+     * \param frames number of frames to open the NBAIO pipe (set to 0 to use default).
+     *
+     * \return
+     *         - NO_ERROR on success (or format unchanged)
+     *         - BAD_VALUE if format or flags invalid.
+     *         - PERMISSION_DENIED if flags not allowed by af.tee
+     */
+
+    status_t set(const NBAIO_Format &format,
+            TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const {
+        return mTee->set(format, flags, frames);
+    }
+
+    status_t set(uint32_t sampleRate, uint32_t channelCount, audio_format_t format,
+            TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const {
+        return mTee->set(Format_from_SR_C(sampleRate, channelCount, format), flags, frames);
+    }
+
+    /**
+     * \brief write data to the tee.
+     *
+     * This call is lock free (as shared pointer and NBAIO is lock free);
+     * may be called simultaneous to all methods except set().
+     *
+     * \param buffer to write to pipe.
+     * \param frameCount in frames as specified by the format passed to set()
+     */
+
+    void write(const void *buffer, size_t frameCount) const {
+        mTee->write(buffer, frameCount);
+    }
+
+    /** sets Tee id string which identifies the generated file (should be unique). */
+    void setId(const std::string &id) const {
+        mTee->setId(id);
+    }
+
+    /**
+     * \brief dump the audio content written to the Tee.
+     *
+     * \param fd file descriptor to write dumped filename for logging, use -1 to ignore.
+     * \param reason string suffix to append to the generated file.
+     */
+    void dump(int fd, const std::string &reason = "") const {
+        mTee->dump(fd, reason);
+    }
+
+    /**
+     * \brief dump all Tees currently alive.
+     *
+     * \param fd file descriptor to write dumped filename for logging, use -1 to ignore.
+     * \param reason string suffix to append to the generated file.
+     */
+    static void dumpAll(int fd, const std::string &reason = "") {
+        getRunningTees().dump(fd, reason);
+    }
+
+private:
+
+    /** The underlying implementation of the Tee - the lifetime is through
+        a shared pointer so destruction of the NBAIO_Tee container may proceed
+        even though dumping is occurring. */
+    class NBAIO_TeeImpl {
+    public:
+        status_t set(const NBAIO_Format &format, TEE_FLAG flags, size_t frames) {
+            static const int teeConfig = property_get_bool("ro.debuggable", false)
+                   ? property_get_int32("af.tee", 0) : 0;
+
+            // check the type of Tee
+            const TEE_FLAG type = TEE_FLAG(
+                    flags & (TEE_FLAG_INPUT_THREAD | TEE_FLAG_OUTPUT_THREAD | TEE_FLAG_TRACK));
+
+            // parameter flags can't select multiple types.
+            if (__builtin_popcount(type) > 1) {
+                return BAD_VALUE;
+            }
+
+            // if type is set, we check to see if it is permitted by configuration.
+            if (type != 0 && (type & teeConfig) == 0) {
+                return PERMISSION_DENIED;
+            }
+
+            // determine number of frames for Tee
+            if (frames == 0) {
+                // TODO: consider varying frame count based on type.
+                frames = DEFAULT_TEE_FRAMES;
+            }
+
+            // TODO: should we check minimum number of frames?
+
+            // don't do anything if format and frames are the same.
+            if (Format_isEqual(format, mFormat) && frames == mFrames) {
+                return NO_ERROR;
+            }
+
+            bool enabled = false;
+            auto sinksource = makeSinkSource(format, frames, &enabled);
+
+            // enabled is set if makeSinkSource is successful.
+            // Note: as mentioned in NBAIO_Tee::set(), don't call set() while write() is
+            // ongoing.
+            if (enabled) {
+                std::lock_guard<std::mutex> _l(mLock);
+                mFlags = flags;
+                mFormat = format; // could get this from the Sink.
+                mFrames = frames;
+                mSinkSource = std::move(sinksource);
+                mEnabled.store(true);
+                return NO_ERROR;
+            }
+            return BAD_VALUE;
+        }
+
+        void setId(const std::string &id) {
+            std::lock_guard<std::mutex> _l(mLock);
+            mId = id;
+        }
+
+        void dump(int fd, const std::string &reason) {
+            if (!mDataReady.exchange(false)) return;
+            std::string suffix;
+            NBAIO_SinkSource sinkSource;
+            {
+                std::lock_guard<std::mutex> _l(mLock);
+                suffix = mId + reason;
+                sinkSource = mSinkSource;
+            }
+            dumpTee(fd, sinkSource, suffix);
+        }
+
+        void write(const void *buffer, size_t frameCount) {
+            if (!mEnabled.load() || frameCount == 0) return;
+            (void)mSinkSource.first->write(buffer, frameCount);
+            mDataReady.store(true);
+        }
+
+    private:
+        // TRICKY: We need to keep the NBAIO_Sink and NBAIO_Source both alive at the same time
+        // because PipeReader holds a naked reference (not a strong or weak pointer) to Pipe.
+        using NBAIO_SinkSource = std::pair<sp<NBAIO_Sink>, sp<NBAIO_Source>>;
+
+        static void dumpTee(int fd, const NBAIO_SinkSource& sinkSource, const std::string& suffix);
+
+        static NBAIO_SinkSource makeSinkSource(
+                const NBAIO_Format &format, size_t frames, bool *enabled);
+
+        // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes
+        static constexpr size_t DEFAULT_TEE_FRAMES = 0x200000;
+
+        // atomic status checking
+        std::atomic<bool> mEnabled{false};
+        std::atomic<bool> mDataReady{false};
+
+        // locked dump information
+        mutable std::mutex mLock;
+        std::string mId;                                         // GUARDED_BY(mLock)
+        TEE_FLAG mFlags = TEE_FLAG_NONE;                         // GUARDED_BY(mLock)
+        NBAIO_Format mFormat = Format_Invalid;                   // GUARDED_BY(mLock)
+        size_t mFrames = 0;                                      // GUARDED_BY(mLock)
+        NBAIO_SinkSource mSinkSource;                            // GUARDED_BY(mLock)
+    };
+
+    /** RunningTees tracks current running tees for dump purposes.
+        It is implemented to have minimal locked regions, to be transparent to the caller. */
+    class RunningTees {
+    public:
+        void add(const std::shared_ptr<NBAIO_TeeImpl> &tee) {
+            std::lock_guard<std::mutex> _l(mLock);
+            ALOGW_IF(!mTees.emplace(tee).second,
+                    "%s: %p already exists in mTees", __func__, tee.get());
+        }
+
+        void remove(const std::shared_ptr<NBAIO_TeeImpl> &tee) {
+            std::lock_guard<std::mutex> _l(mLock);
+            ALOGW_IF(mTees.erase(tee) != 1,
+                    "%s: %p doesn't exist in mTees", __func__, tee.get());
+        }
+
+        void dump(int fd, const std::string &reason) {
+            std::vector<std::shared_ptr<NBAIO_TeeImpl>> tees; // safe snapshot of tees
+            {
+                std::lock_guard<std::mutex> _l(mLock);
+                tees.insert(tees.end(), mTees.begin(), mTees.end());
+            }
+            for (const auto &tee : tees) {
+                tee->dump(fd, reason);
+            }
+        }
+
+    private:
+        std::mutex mLock;
+        std::set<std::shared_ptr<NBAIO_TeeImpl>> mTees; // GUARDED_BY(mLock)
+    };
+
+    // singleton
+    static RunningTees &getRunningTees() {
+        static RunningTees runningTees;
+        return runningTees;
+    }
+
+    // The NBAIO TeeImpl may have lifetime longer than NBAIO_Tee if
+    // RunningTees::dump() is being called simultaneous to ~NBAIO_Tee().
+    // This is allowed for maximum concurrency.
+    const std::shared_ptr<NBAIO_TeeImpl> mTee;
+}; // NBAIO_Tee
+
+} // namespace android
+
+#endif // TEE_SINK
+#endif // !ANDROID_NBAIO_TEE_H
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index e5cb8a2..0caa0af 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -24,8 +24,9 @@
 #include <audio_utils/primitives.h>
 
 #include "AudioFlinger.h"
-#include "ServiceUtilities.h"
 #include <media/AudioParameter.h>
+#include <media/PatchBuilder.h>
+#include <mediautils/ServiceUtilities.h>
 
 // ----------------------------------------------------------------------------
 
@@ -49,111 +50,67 @@
                                 struct audio_port *ports)
 {
     Mutex::Autolock _l(mLock);
-    if (mPatchPanel != 0) {
-        return mPatchPanel->listAudioPorts(num_ports, ports);
-    }
-    return NO_INIT;
+    return mPatchPanel.listAudioPorts(num_ports, ports);
 }
 
 /* Get supported attributes for a given audio port */
 status_t AudioFlinger::getAudioPort(struct audio_port *port)
 {
     Mutex::Autolock _l(mLock);
-    if (mPatchPanel != 0) {
-        return mPatchPanel->getAudioPort(port);
-    }
-    return NO_INIT;
+    return mPatchPanel.getAudioPort(port);
 }
 
-
 /* Connect a patch between several source and sink ports */
 status_t AudioFlinger::createAudioPatch(const struct audio_patch *patch,
                                    audio_patch_handle_t *handle)
 {
     Mutex::Autolock _l(mLock);
-    if (mPatchPanel != 0) {
-        return mPatchPanel->createAudioPatch(patch, handle);
-    }
-    return NO_INIT;
+    return mPatchPanel.createAudioPatch(patch, handle);
 }
 
 /* Disconnect a patch */
 status_t AudioFlinger::releaseAudioPatch(audio_patch_handle_t handle)
 {
     Mutex::Autolock _l(mLock);
-    if (mPatchPanel != 0) {
-        return mPatchPanel->releaseAudioPatch(handle);
-    }
-    return NO_INIT;
+    return mPatchPanel.releaseAudioPatch(handle);
 }
 
-
 /* List connected audio ports and they attributes */
 status_t AudioFlinger::listAudioPatches(unsigned int *num_patches,
                                   struct audio_patch *patches)
 {
     Mutex::Autolock _l(mLock);
-    if (mPatchPanel != 0) {
-        return mPatchPanel->listAudioPatches(num_patches, patches);
-    }
-    return NO_INIT;
-}
-
-/* Set audio port configuration */
-status_t AudioFlinger::setAudioPortConfig(const struct audio_port_config *config)
-{
-    Mutex::Autolock _l(mLock);
-    if (mPatchPanel != 0) {
-        return mPatchPanel->setAudioPortConfig(config);
-    }
-    return NO_INIT;
-}
-
-
-AudioFlinger::PatchPanel::PatchPanel(const sp<AudioFlinger>& audioFlinger)
-                                   : mAudioFlinger(audioFlinger)
-{
-}
-
-AudioFlinger::PatchPanel::~PatchPanel()
-{
+    return mPatchPanel.listAudioPatches(num_patches, patches);
 }
 
 /* List connected audio ports and their attributes */
 status_t AudioFlinger::PatchPanel::listAudioPorts(unsigned int *num_ports __unused,
                                 struct audio_port *ports __unused)
 {
-    ALOGV("listAudioPorts");
+    ALOGV(__func__);
     return NO_ERROR;
 }
 
 /* Get supported attributes for a given audio port */
 status_t AudioFlinger::PatchPanel::getAudioPort(struct audio_port *port __unused)
 {
-    ALOGV("getAudioPort");
+    ALOGV(__func__);
     return NO_ERROR;
 }
 
-
 /* Connect a patch between several source and sink ports */
 status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,
                                    audio_patch_handle_t *handle)
 {
-    status_t status = NO_ERROR;
-    audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
-    sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
     if (handle == NULL || patch == NULL) {
         return BAD_VALUE;
     }
-    ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
-          patch->num_sources, patch->num_sinks, *handle);
-    if (audioflinger == 0) {
-        return NO_INIT;
-    }
+    ALOGV("%s() num_sources %d num_sinks %d handle %d",
+            __func__, patch->num_sources, patch->num_sinks, *handle);
+    status_t status = NO_ERROR;
+    audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
 
-    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            (patch->num_sinks == 0 && patch->num_sources != 2) ||
-            patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+    if (!audio_patch_is_valid(patch) || (patch->num_sinks == 0 && patch->num_sources != 2)) {
         return BAD_VALUE;
     }
     // limit number of sources to 1 for now or 2 sources for special cross hw module case.
@@ -163,81 +120,73 @@
     }
 
     if (*handle != AUDIO_PATCH_HANDLE_NONE) {
-        for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) {
-            if (*handle == mPatches[index]->mHandle) {
-                ALOGV("createAudioPatch() removing patch handle %d", *handle);
-                halHandle = mPatches[index]->mHalHandle;
-                Patch *removedPatch = mPatches[index];
-                // free resources owned by the removed patch if applicable
-                // 1) if a software patch is present, release the playback and capture threads and
-                // tracks created. This will also release the corresponding audio HAL patches
-                if ((removedPatch->mRecordPatchHandle
-                        != AUDIO_PATCH_HANDLE_NONE) ||
-                        (removedPatch->mPlaybackPatchHandle !=
-                                AUDIO_PATCH_HANDLE_NONE)) {
-                    clearPatchConnections(removedPatch);
-                }
-                // 2) if the new patch and old patch source or sink are devices from different
-                // hw modules,  clear the audio HAL patches now because they will not be updated
-                // by call to create_audio_patch() below which will happen on a different HW module
-                if (halHandle != AUDIO_PATCH_HANDLE_NONE) {
-                    audio_module_handle_t hwModule = AUDIO_MODULE_HANDLE_NONE;
-                    if ((removedPatch->mAudioPatch.sources[0].type == AUDIO_PORT_TYPE_DEVICE) &&
-                        ((patch->sources[0].type != AUDIO_PORT_TYPE_DEVICE) ||
-                          (removedPatch->mAudioPatch.sources[0].ext.device.hw_module !=
-                           patch->sources[0].ext.device.hw_module))) {
-                        hwModule = removedPatch->mAudioPatch.sources[0].ext.device.hw_module;
-                    } else if ((patch->num_sinks == 0) ||
-                            ((removedPatch->mAudioPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
-                             ((patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) ||
-                              (removedPatch->mAudioPatch.sinks[0].ext.device.hw_module !=
-                               patch->sinks[0].ext.device.hw_module)))) {
-                        // Note on (patch->num_sinks == 0): this situation should not happen as
-                        // these special patches are only created by the policy manager but just
-                        // in case, systematically clear the HAL patch.
-                        // Note that removedPatch->mAudioPatch.num_sinks cannot be 0 here because
-                        // halHandle would be AUDIO_PATCH_HANDLE_NONE in this case.
-                        hwModule = removedPatch->mAudioPatch.sinks[0].ext.device.hw_module;
-                    }
-                    if (hwModule != AUDIO_MODULE_HANDLE_NONE) {
-                        ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(hwModule);
-                        if (index >= 0) {
-                            sp<DeviceHalInterface> hwDevice =
-                                    audioflinger->mAudioHwDevs.valueAt(index)->hwDevice();
-                            hwDevice->releaseAudioPatch(halHandle);
-                        }
-                    }
-                }
-                mPatches.removeAt(index);
-                delete removedPatch;
-                break;
+        auto iter = mPatches.find(*handle);
+        if (iter != mPatches.end()) {
+            ALOGV("%s() removing patch handle %d", __func__, *handle);
+            Patch &removedPatch = iter->second;
+            // free resources owned by the removed patch if applicable
+            // 1) if a software patch is present, release the playback and capture threads and
+            // tracks created. This will also release the corresponding audio HAL patches
+            if (removedPatch.isSoftware()) {
+                removedPatch.clearConnections(this);
             }
+            // 2) if the new patch and old patch source or sink are devices from different
+            // hw modules,  clear the audio HAL patches now because they will not be updated
+            // by call to create_audio_patch() below which will happen on a different HW module
+            if (removedPatch.mHalHandle != AUDIO_PATCH_HANDLE_NONE) {
+                audio_module_handle_t hwModule = AUDIO_MODULE_HANDLE_NONE;
+                const struct audio_patch &oldPatch = removedPatch.mAudioPatch;
+                if (oldPatch.sources[0].type == AUDIO_PORT_TYPE_DEVICE &&
+                        (patch->sources[0].type != AUDIO_PORT_TYPE_DEVICE ||
+                                oldPatch.sources[0].ext.device.hw_module !=
+                                patch->sources[0].ext.device.hw_module)) {
+                    hwModule = oldPatch.sources[0].ext.device.hw_module;
+                } else if (patch->num_sinks == 0 ||
+                        (oldPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE &&
+                                (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE ||
+                                        oldPatch.sinks[0].ext.device.hw_module !=
+                                        patch->sinks[0].ext.device.hw_module))) {
+                    // Note on (patch->num_sinks == 0): this situation should not happen as
+                    // these special patches are only created by the policy manager but just
+                    // in case, systematically clear the HAL patch.
+                    // Note that removedPatch.mAudioPatch.num_sinks cannot be 0 here because
+                    // removedPatch.mHalHandle would be AUDIO_PATCH_HANDLE_NONE in this case.
+                    hwModule = oldPatch.sinks[0].ext.device.hw_module;
+                }
+                sp<DeviceHalInterface> hwDevice = findHwDeviceByModule(hwModule);
+                if (hwDevice != 0) {
+                    hwDevice->releaseAudioPatch(removedPatch.mHalHandle);
+                }
+            }
+            mPatches.erase(iter);
         }
     }
 
-    Patch *newPatch = new Patch(patch);
+    Patch newPatch{*patch};
 
     switch (patch->sources[0].type) {
         case AUDIO_PORT_TYPE_DEVICE: {
             audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
-            ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
+            ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(srcModule);
             if (index < 0) {
-                ALOGW("createAudioPatch() bad src hw module %d", srcModule);
+                ALOGW("%s() bad src hw module %d", __func__, srcModule);
                 status = BAD_VALUE;
                 goto exit;
             }
-            AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
+            AudioHwDevice *audioHwDevice = mAudioFlinger.mAudioHwDevs.valueAt(index);
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 // support only one sink if connection to a mix or across HW modules
                 if ((patch->sinks[i].type == AUDIO_PORT_TYPE_MIX ||
-                        patch->sinks[i].ext.mix.hw_module != srcModule) &&
+                                (patch->sinks[i].type == AUDIO_PORT_TYPE_DEVICE &&
+                                        patch->sinks[i].ext.device.hw_module != srcModule)) &&
                         patch->num_sinks > 1) {
+                    ALOGW("%s() multiple sinks for mix or across modules not supported", __func__);
                     status = INVALID_OPERATION;
                     goto exit;
                 }
                 // reject connection to different sink types
                 if (patch->sinks[i].type != patch->sinks[0].type) {
-                    ALOGW("createAudioPatch() different sink types in same patch not supported");
+                    ALOGW("%s() different sink types in same patch not supported", __func__);
                     status = BAD_VALUE;
                     goto exit;
                 }
@@ -256,38 +205,42 @@
                     if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
                             (patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
                                     patch->sources[1].ext.mix.hw_module)) {
-                        ALOGW("createAudioPatch() invalid source combination");
+                        ALOGW("%s() invalid source combination", __func__);
                         status = INVALID_OPERATION;
                         goto exit;
                     }
 
                     sp<ThreadBase> thread =
-                            audioflinger->checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
-                    newPatch->mPlaybackThread = (MixerThread *)thread.get();
+                            mAudioFlinger.checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
                     if (thread == 0) {
-                        ALOGW("createAudioPatch() cannot get playback thread");
+                        ALOGW("%s() cannot get playback thread", __func__);
                         status = INVALID_OPERATION;
                         goto exit;
                     }
+                    // existing playback thread is reused, so it is not closed when patch is cleared
+                    newPatch.mPlayback.setThread(
+                            reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
                 } else {
                     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
                     audio_devices_t device = patch->sinks[0].ext.device.type;
                     String8 address = String8(patch->sinks[0].ext.device.address);
                     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-                    sp<ThreadBase> thread = audioflinger->openOutput_l(
+                    audio_output_flags_t flags =
+                            patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+                            patch->sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
+                    sp<ThreadBase> thread = mAudioFlinger.openOutput_l(
                                                             patch->sinks[0].ext.device.hw_module,
                                                             &output,
                                                             &config,
                                                             device,
                                                             address,
-                                                            AUDIO_OUTPUT_FLAG_NONE);
-                    newPatch->mPlaybackThread = (PlaybackThread *)thread.get();
-                    ALOGV("audioflinger->openOutput_l() returned %p",
-                                          newPatch->mPlaybackThread.get());
-                    if (newPatch->mPlaybackThread == 0) {
+                                                            flags);
+                    ALOGV("mAudioFlinger.openOutput_l() returned %p", thread.get());
+                    if (thread == 0) {
                         status = NO_MEMORY;
                         goto exit;
                     }
+                    newPatch.mPlayback.setThread(reinterpret_cast<PlaybackThread*>(thread.get()));
                 }
                 audio_devices_t device = patch->sources[0].ext.device.type;
                 String8 address = String8(patch->sources[0].ext.device.address);
@@ -297,47 +250,50 @@
                 if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
                     config.sample_rate = patch->sources[0].sample_rate;
                 } else {
-                    config.sample_rate = newPatch->mPlaybackThread->sampleRate();
+                    config.sample_rate = newPatch.mPlayback.thread()->sampleRate();
                 }
                 if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
                     config.channel_mask = patch->sources[0].channel_mask;
                 } else {
-                    config.channel_mask =
-                        audio_channel_in_mask_from_count(newPatch->mPlaybackThread->channelCount());
+                    config.channel_mask = audio_channel_in_mask_from_count(
+                            newPatch.mPlayback.thread()->channelCount());
                 }
                 if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
                     config.format = patch->sources[0].format;
                 } else {
-                    config.format = newPatch->mPlaybackThread->format();
+                    config.format = newPatch.mPlayback.thread()->format();
                 }
+                audio_input_flags_t flags =
+                        patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+                        patch->sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
                 audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-                sp<ThreadBase> thread = audioflinger->openInput_l(srcModule,
+                sp<ThreadBase> thread = mAudioFlinger.openInput_l(srcModule,
                                                                     &input,
                                                                     &config,
                                                                     device,
                                                                     address,
                                                                     AUDIO_SOURCE_MIC,
-                                                                    AUDIO_INPUT_FLAG_NONE);
-                newPatch->mRecordThread = (RecordThread *)thread.get();
-                ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
-                      newPatch->mRecordThread.get(), config.channel_mask);
-                if (newPatch->mRecordThread == 0) {
+                                                                    flags);
+                ALOGV("mAudioFlinger.openInput_l() returned %p inChannelMask %08x",
+                      thread.get(), config.channel_mask);
+                if (thread == 0) {
                     status = NO_MEMORY;
                     goto exit;
                 }
-                status = createPatchConnections(newPatch, patch);
+                newPatch.mRecord.setThread(reinterpret_cast<RecordThread*>(thread.get()));
+                status = newPatch.createConnections(this);
                 if (status != NO_ERROR) {
                     goto exit;
                 }
             } else {
                 if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+                    sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(
                                                               patch->sinks[0].ext.mix.handle);
                     if (thread == 0) {
-                        thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+                        thread = mAudioFlinger.checkMmapThread_l(patch->sinks[0].ext.mix.handle);
                         if (thread == 0) {
-                            ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
+                            ALOGW("%s() bad capture I/O handle %d",
+                                    __func__, patch->sinks[0].ext.mix.handle);
                             status = BAD_VALUE;
                             goto exit;
                         }
@@ -356,9 +312,9 @@
         } break;
         case AUDIO_PORT_TYPE_MIX: {
             audio_module_handle_t srcModule =  patch->sources[0].ext.mix.hw_module;
-            ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
+            ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(srcModule);
             if (index < 0) {
-                ALOGW("createAudioPatch() bad src hw module %d", srcModule);
+                ALOGW("%s() bad src hw module %d", __func__, srcModule);
                 status = BAD_VALUE;
                 goto exit;
             }
@@ -366,8 +322,8 @@
             audio_devices_t type = AUDIO_DEVICE_NONE;
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
-                    ALOGW("createAudioPatch() invalid sink type %d for mix source",
-                          patch->sinks[i].type);
+                    ALOGW("%s() invalid sink type %d for mix source",
+                            __func__, patch->sinks[i].type);
                     status = BAD_VALUE;
                     goto exit;
                 }
@@ -379,21 +335,21 @@
                 type |= patch->sinks[i].ext.device.type;
             }
             sp<ThreadBase> thread =
-                            audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
+                            mAudioFlinger.checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
             if (thread == 0) {
-                thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+                thread = mAudioFlinger.checkMmapThread_l(patch->sources[0].ext.mix.handle);
                 if (thread == 0) {
-                    ALOGW("createAudioPatch() bad playback I/O handle %d",
-                              patch->sources[0].ext.mix.handle);
+                    ALOGW("%s() bad playback I/O handle %d",
+                            __func__, patch->sources[0].ext.mix.handle);
                     status = BAD_VALUE;
                     goto exit;
                 }
             }
-            if (thread == audioflinger->primaryPlaybackThread_l()) {
+            if (thread == mAudioFlinger.primaryPlaybackThread_l()) {
                 AudioParameter param = AudioParameter();
                 param.addInt(String8(AudioParameter::keyRouting), (int)type);
 
-                audioflinger->broacastParametersToRecordThreads_l(param.toString());
+                mAudioFlinger.broacastParametersToRecordThreads_l(param.toString());
             }
 
             status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
@@ -403,295 +359,284 @@
             goto exit;
     }
 exit:
-    ALOGV("createAudioPatch() status %d", status);
+    ALOGV("%s() status %d", __func__, status);
     if (status == NO_ERROR) {
-        *handle = (audio_patch_handle_t) audioflinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
-        newPatch->mHandle = *handle;
-        newPatch->mHalHandle = halHandle;
-        mPatches.add(newPatch);
-        ALOGV("createAudioPatch() added new patch handle %d halHandle %d", *handle, halHandle);
+        *handle = (audio_patch_handle_t) mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
+        newPatch.mHalHandle = halHandle;
+        mPatches.insert(std::make_pair(*handle, std::move(newPatch)));
+        ALOGV("%s() added new patch handle %d halHandle %d", __func__, *handle, halHandle);
     } else {
-        clearPatchConnections(newPatch);
-        delete newPatch;
+        newPatch.clearConnections(this);
     }
     return status;
 }
 
-status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch,
-                                                          const struct audio_patch *audioPatch)
+AudioFlinger::PatchPanel::Patch::~Patch()
+{
+    ALOGE_IF(isSoftware(), "Software patch connections leaked %d %d",
+            mRecord.handle(), mPlayback.handle());
+}
+
+status_t AudioFlinger::PatchPanel::Patch::createConnections(PatchPanel *panel)
 {
     // create patch from source device to record thread input
-    struct audio_patch subPatch;
-    subPatch.num_sources = 1;
-    subPatch.sources[0] = audioPatch->sources[0];
-    subPatch.num_sinks = 1;
-
-    patch->mRecordThread->getAudioPortConfig(&subPatch.sinks[0]);
-    subPatch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_MIC;
-
-    status_t status = createAudioPatch(&subPatch, &patch->mRecordPatchHandle);
+    status_t status = panel->createAudioPatch(
+            PatchBuilder().addSource(mAudioPatch.sources[0]).
+                addSink(mRecord.thread(), { .source = AUDIO_SOURCE_MIC }).patch(),
+            mRecord.handlePtr());
     if (status != NO_ERROR) {
-        patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        *mRecord.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
         return status;
     }
 
     // create patch from playback thread output to sink device
-    if (audioPatch->num_sinks != 0) {
-        patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]);
-        subPatch.sinks[0] = audioPatch->sinks[0];
-        status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle);
+    if (mAudioPatch.num_sinks != 0) {
+        status = panel->createAudioPatch(
+                PatchBuilder().addSource(mPlayback.thread()).addSink(mAudioPatch.sinks[0]).patch(),
+                mPlayback.handlePtr());
         if (status != NO_ERROR) {
-            patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+            *mPlayback.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
             return status;
         }
     } else {
-        patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        *mPlayback.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
     }
 
     // use a pseudo LCM between input and output framecount
-    size_t playbackFrameCount = patch->mPlaybackThread->frameCount();
+    size_t playbackFrameCount = mPlayback.thread()->frameCount();
     int playbackShift = __builtin_ctz(playbackFrameCount);
-    size_t recordFramecount = patch->mRecordThread->frameCount();
+    size_t recordFramecount = mRecord.thread()->frameCount();
     int shift = __builtin_ctz(recordFramecount);
     if (playbackShift < shift) {
         shift = playbackShift;
     }
     size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
-    ALOGV("createPatchConnections() playframeCount %zu recordFramecount %zu frameCount %zu",
-          playbackFrameCount, recordFramecount, frameCount);
+    ALOGV("%s() playframeCount %zu recordFramecount %zu frameCount %zu",
+            __func__, playbackFrameCount, recordFramecount, frameCount);
 
     // create a special record track to capture from record thread
-    uint32_t channelCount = patch->mPlaybackThread->channelCount();
+    uint32_t channelCount = mPlayback.thread()->channelCount();
     audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
-    audio_channel_mask_t outChannelMask = patch->mPlaybackThread->channelMask();
-    uint32_t sampleRate = patch->mPlaybackThread->sampleRate();
-    audio_format_t format = patch->mPlaybackThread->format();
+    audio_channel_mask_t outChannelMask = mPlayback.thread()->channelMask();
+    uint32_t sampleRate = mPlayback.thread()->sampleRate();
+    audio_format_t format = mPlayback.thread()->format();
 
-    patch->mPatchRecord = new RecordThread::PatchRecord(
-                                             patch->mRecordThread.get(),
+    audio_format_t inputFormat = mRecord.thread()->format();
+    if (!audio_is_linear_pcm(inputFormat)) {
+        // The playbackThread format will say PCM for IEC61937 packetized stream.
+        // Use recordThread format.
+        format = inputFormat;
+    }
+    audio_input_flags_t inputFlags = mAudioPatch.sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+            mAudioPatch.sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
+    sp<RecordThread::PatchRecord> tempRecordTrack = new (std::nothrow) RecordThread::PatchRecord(
+                                             mRecord.thread().get(),
                                              sampleRate,
                                              inChannelMask,
                                              format,
                                              frameCount,
                                              NULL,
                                              (size_t)0 /* bufferSize */,
-                                             AUDIO_INPUT_FLAG_NONE);
-    if (patch->mPatchRecord == 0) {
-        return NO_MEMORY;
-    }
-    status = patch->mPatchRecord->initCheck();
+                                             inputFlags);
+    status = mRecord.checkTrack(tempRecordTrack.get());
     if (status != NO_ERROR) {
         return status;
     }
-    patch->mRecordThread->addPatchRecord(patch->mPatchRecord);
+
+    audio_output_flags_t outputFlags = mAudioPatch.sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+            mAudioPatch.sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
 
     // create a special playback track to render to playback thread.
     // this track is given the same buffer as the PatchRecord buffer
-    patch->mPatchTrack = new PlaybackThread::PatchTrack(
-                                           patch->mPlaybackThread.get(),
-                                           audioPatch->sources[1].ext.mix.usecase.stream,
+    sp<PlaybackThread::PatchTrack> tempPatchTrack = new (std::nothrow) PlaybackThread::PatchTrack(
+                                           mPlayback.thread().get(),
+                                           mAudioPatch.sources[1].ext.mix.usecase.stream,
                                            sampleRate,
                                            outChannelMask,
                                            format,
                                            frameCount,
-                                           patch->mPatchRecord->buffer(),
-                                           patch->mPatchRecord->bufferSize(),
-                                           AUDIO_OUTPUT_FLAG_NONE);
-    status = patch->mPatchTrack->initCheck();
+                                           tempRecordTrack->buffer(),
+                                           tempRecordTrack->bufferSize(),
+                                           outputFlags);
+    status = mPlayback.checkTrack(tempPatchTrack.get());
     if (status != NO_ERROR) {
         return status;
     }
-    patch->mPlaybackThread->addPatchTrack(patch->mPatchTrack);
 
     // tie playback and record tracks together
-    patch->mPatchRecord->setPeerProxy(patch->mPatchTrack.get());
-    patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
+    mRecord.setTrackAndPeer(tempRecordTrack, tempPatchTrack.get());
+    mPlayback.setTrackAndPeer(tempPatchTrack, tempRecordTrack.get());
 
     // start capture and playback
-    patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
-    patch->mPatchTrack->start();
+    mRecord.track()->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
+    mPlayback.track()->start();
 
     return status;
 }
 
-void AudioFlinger::PatchPanel::clearPatchConnections(Patch *patch)
+void AudioFlinger::PatchPanel::Patch::clearConnections(PatchPanel *panel)
 {
-    sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
-    if (audioflinger == 0) {
-        return;
+    ALOGV("%s() mRecord.handle %d mPlayback.handle %d",
+            __func__, mRecord.handle(), mPlayback.handle());
+    mRecord.stopTrack();
+    mPlayback.stopTrack();
+    mRecord.closeConnections(panel);
+    mPlayback.closeConnections(panel);
+}
+
+status_t AudioFlinger::PatchPanel::Patch::getLatencyMs(double *latencyMs) const
+{
+    if (!isSoftware()) return INVALID_OPERATION;
+
+    auto recordTrack = mRecord.const_track();
+    if (recordTrack.get() == nullptr) return INVALID_OPERATION;
+
+    auto playbackTrack = mPlayback.const_track();
+    if (playbackTrack.get() == nullptr) return INVALID_OPERATION;
+
+    // Latency information for tracks may be called without obtaining
+    // the underlying thread lock.
+    //
+    // We use record server latency + playback track latency (generally smaller than the
+    // reverse due to internal biases).
+    //
+    // TODO: is this stable enough? Consider a PatchTrack synchronized version of this.
+    double recordServerLatencyMs;
+    if (recordTrack->getServerLatencyMs(&recordServerLatencyMs) != OK) return INVALID_OPERATION;
+
+    double playbackTrackLatencyMs;
+    if (playbackTrack->getTrackLatencyMs(&playbackTrackLatencyMs) != OK) return INVALID_OPERATION;
+
+    *latencyMs = recordServerLatencyMs + playbackTrackLatencyMs;
+    return OK;
+}
+
+String8 AudioFlinger::PatchPanel::Patch::dump(audio_patch_handle_t myHandle)
+{
+    String8 result;
+
+    // TODO: Consider table dump form for patches, just like tracks.
+    result.appendFormat("Patch %d: thread %p => thread %p",
+            myHandle, mRecord.thread().get(), mPlayback.thread().get());
+
+    // add latency if it exists
+    double latencyMs;
+    if (getLatencyMs(&latencyMs) == OK) {
+        result.appendFormat("  latency: %.2lf", latencyMs);
     }
 
-    ALOGV("clearPatchConnections() patch->mRecordPatchHandle %d patch->mPlaybackPatchHandle %d",
-          patch->mRecordPatchHandle, patch->mPlaybackPatchHandle);
-
-    if (patch->mPatchRecord != 0) {
-        patch->mPatchRecord->stop();
-    }
-    if (patch->mPatchTrack != 0) {
-        patch->mPatchTrack->stop();
-    }
-    if (patch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
-        releaseAudioPatch(patch->mRecordPatchHandle);
-        patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-    }
-    if (patch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
-        releaseAudioPatch(patch->mPlaybackPatchHandle);
-        patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-    }
-    if (patch->mRecordThread != 0) {
-        if (patch->mPatchRecord != 0) {
-            patch->mRecordThread->deletePatchRecord(patch->mPatchRecord);
-        }
-        audioflinger->closeInputInternal_l(patch->mRecordThread);
-    }
-    if (patch->mPlaybackThread != 0) {
-        if (patch->mPatchTrack != 0) {
-            patch->mPlaybackThread->deletePatchTrack(patch->mPatchTrack);
-        }
-        // if num sources == 2 we are reusing an existing playback thread so we do not close it
-        if (patch->mAudioPatch.num_sources != 2) {
-            audioflinger->closeOutputInternal_l(patch->mPlaybackThread);
-        }
-    }
-    if (patch->mRecordThread != 0) {
-        if (patch->mPatchRecord != 0) {
-            patch->mPatchRecord.clear();
-        }
-        patch->mRecordThread.clear();
-    }
-    if (patch->mPlaybackThread != 0) {
-        if (patch->mPatchTrack != 0) {
-            patch->mPatchTrack.clear();
-        }
-        patch->mPlaybackThread.clear();
-    }
-
+    result.append("\n");
+    return result;
 }
 
 /* Disconnect a patch */
 status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle)
 {
-    ALOGV("releaseAudioPatch handle %d", handle);
+    ALOGV("%s handle %d", __func__, handle);
     status_t status = NO_ERROR;
-    size_t index;
 
-    sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
-    if (audioflinger == 0) {
-        return NO_INIT;
-    }
-
-    for (index = 0; index < mPatches.size(); index++) {
-        if (handle == mPatches[index]->mHandle) {
-            break;
-        }
-    }
-    if (index == mPatches.size()) {
+    auto iter = mPatches.find(handle);
+    if (iter == mPatches.end()) {
         return BAD_VALUE;
     }
-    Patch *removedPatch = mPatches[index];
-    mPatches.removeAt(index);
+    Patch &removedPatch = iter->second;
+    const struct audio_patch &patch = removedPatch.mAudioPatch;
 
-    struct audio_patch *patch = &removedPatch->mAudioPatch;
-
-    switch (patch->sources[0].type) {
+    const struct audio_port_config &src = patch.sources[0];
+    switch (src.type) {
         case AUDIO_PORT_TYPE_DEVICE: {
-            audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
-            ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
-            if (index < 0) {
-                ALOGW("releaseAudioPatch() bad src hw module %d", srcModule);
+            sp<DeviceHalInterface> hwDevice = findHwDeviceByModule(src.ext.device.hw_module);
+            if (hwDevice == 0) {
+                ALOGW("%s() bad src hw module %d", __func__, src.ext.device.hw_module);
                 status = BAD_VALUE;
                 break;
             }
 
-            if (removedPatch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE ||
-                    removedPatch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
-                clearPatchConnections(removedPatch);
+            if (removedPatch.isSoftware()) {
+                removedPatch.clearConnections(this);
                 break;
             }
 
-            if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                patch->sinks[0].ext.mix.handle);
+            if (patch.sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+                audio_io_handle_t ioHandle = patch.sinks[0].ext.mix.handle;
+                sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(ioHandle);
                 if (thread == 0) {
-                    thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+                    thread = mAudioFlinger.checkMmapThread_l(ioHandle);
                     if (thread == 0) {
-                        ALOGW("releaseAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
+                        ALOGW("%s() bad capture I/O handle %d", __func__, ioHandle);
                         status = BAD_VALUE;
                         break;
                     }
                 }
-                status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+                status = thread->sendReleaseAudioPatchConfigEvent(removedPatch.mHalHandle);
             } else {
-                AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-                sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
-                status = hwDevice->releaseAudioPatch(removedPatch->mHalHandle);
+                status = hwDevice->releaseAudioPatch(removedPatch.mHalHandle);
             }
         } break;
         case AUDIO_PORT_TYPE_MIX: {
-            audio_module_handle_t srcModule =  patch->sources[0].ext.mix.hw_module;
-            ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
-            if (index < 0) {
-                ALOGW("releaseAudioPatch() bad src hw module %d", srcModule);
+            if (findHwDeviceByModule(src.ext.mix.hw_module) == 0) {
+                ALOGW("%s() bad src hw module %d", __func__, src.ext.mix.hw_module);
                 status = BAD_VALUE;
                 break;
             }
-            sp<ThreadBase> thread =
-                            audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
+            audio_io_handle_t ioHandle = src.ext.mix.handle;
+            sp<ThreadBase> thread = mAudioFlinger.checkPlaybackThread_l(ioHandle);
             if (thread == 0) {
-                thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+                thread = mAudioFlinger.checkMmapThread_l(ioHandle);
                 if (thread == 0) {
-                    ALOGW("releaseAudioPatch() bad playback I/O handle %d",
-                                                                  patch->sources[0].ext.mix.handle);
+                    ALOGW("%s() bad playback I/O handle %d", __func__, ioHandle);
                     status = BAD_VALUE;
                     break;
                 }
             }
-            status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+            status = thread->sendReleaseAudioPatchConfigEvent(removedPatch.mHalHandle);
         } break;
         default:
             status = BAD_VALUE;
-            break;
     }
 
-    delete removedPatch;
+    mPatches.erase(iter);
     return status;
 }
 
-
 /* List connected audio ports and they attributes */
 status_t AudioFlinger::PatchPanel::listAudioPatches(unsigned int *num_patches __unused,
                                   struct audio_patch *patches __unused)
 {
-    ALOGV("listAudioPatches");
+    ALOGV(__func__);
     return NO_ERROR;
 }
 
-/* Set audio port configuration */
-status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_config *config)
+sp<DeviceHalInterface> AudioFlinger::PatchPanel::findHwDeviceByModule(audio_module_handle_t module)
 {
-    ALOGV("setAudioPortConfig");
-
-    sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
-    if (audioflinger == 0) {
-        return NO_INIT;
-    }
-
-    audio_module_handle_t module;
-    if (config->type == AUDIO_PORT_TYPE_DEVICE) {
-        module = config->ext.device.hw_module;
-    } else {
-        module = config->ext.mix.hw_module;
-    }
-
-    ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(module);
+    if (module == AUDIO_MODULE_HANDLE_NONE) return nullptr;
+    ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(module);
     if (index < 0) {
-        ALOGW("setAudioPortConfig() bad hw module %d", module);
-        return BAD_VALUE;
+        return nullptr;
     }
+    return mAudioFlinger.mAudioHwDevs.valueAt(index)->hwDevice();
+}
 
-    AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-    return audioHwDevice->hwDevice()->setAudioPortConfig(config);
+void AudioFlinger::PatchPanel::dump(int fd)
+{
+    // Only dump software patches.
+    bool headerPrinted = false;
+    for (auto& iter : mPatches) {
+        if (iter.second.isSoftware()) {
+            if (!headerPrinted) {
+                String8 header("\nSoftware patches:\n");
+                write(fd, header.string(), header.size());
+                headerPrinted = true;
+            }
+            String8 patchDump("  ");
+            patchDump.append(iter.second.dump(iter.first));
+            write(fd, patchDump.string(), patchDump.size());
+        }
+    }
+    if (headerPrinted) {
+        String8 trailing("\n");
+        write(fd, trailing.string(), trailing.size());
+    }
 }
 
 } // namespace android
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index d37c0d3..5d6bf00 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -19,13 +19,10 @@
     #error This header file should only be included from AudioFlinger.h
 #endif
 
-class PatchPanel : public RefBase {
+// PatchPanel is concealed within AudioFlinger, their lifetimes are the same.
+class PatchPanel {
 public:
-
-    class Patch;
-
-    explicit PatchPanel(const sp<AudioFlinger>& audioFlinger);
-    virtual ~PatchPanel();
+    explicit PatchPanel(AudioFlinger* audioFlinger) : mAudioFlinger(*audioFlinger) {}
 
     /* List connected audio ports and their attributes */
     status_t listAudioPorts(unsigned int *num_ports,
@@ -45,46 +42,104 @@
     status_t listAudioPatches(unsigned int *num_patches,
                                       struct audio_patch *patches);
 
-    /* Set audio port configuration */
-    status_t setAudioPortConfig(const struct audio_port_config *config);
+    void dump(int fd);
 
-    status_t createPatchConnections(Patch *patch,
-                                    const struct audio_patch *audioPatch);
-    void clearPatchConnections(Patch *patch);
+private:
+    template<typename ThreadType, typename TrackType>
+    class Endpoint {
+    public:
+        Endpoint() = default;
+        Endpoint(Endpoint&& other) { *this = std::move(other); }
+        Endpoint& operator=(Endpoint&& other) {
+            ALOGE_IF(mHandle != AUDIO_PATCH_HANDLE_NONE,
+                    "A non empty Patch Endpoint leaked, handle %d", mHandle);
+            *this = other;
+            other.mHandle = AUDIO_PATCH_HANDLE_NONE;
+            return *this;
+        }
+
+        status_t checkTrack(TrackType *trackOrNull) const {
+            if (trackOrNull == nullptr) return NO_MEMORY;
+            return trackOrNull->initCheck();
+        }
+        audio_patch_handle_t handle() const { return mHandle; }
+        sp<ThreadType> thread() { return mThread; }
+        sp<TrackType> track() { return mTrack; }
+        sp<const TrackType> const_track() const { return mTrack; }
+
+        void closeConnections(PatchPanel *panel) {
+            if (mHandle != AUDIO_PATCH_HANDLE_NONE) {
+                panel->releaseAudioPatch(mHandle);
+                mHandle = AUDIO_PATCH_HANDLE_NONE;
+            }
+            if (mThread != 0) {
+                if (mTrack != 0) {
+                    mThread->deletePatchTrack(mTrack);
+                }
+                if (mCloseThread) {
+                    panel->mAudioFlinger.closeThreadInternal_l(mThread);
+                }
+            }
+        }
+        audio_patch_handle_t* handlePtr() { return &mHandle; }
+        void setThread(const sp<ThreadType>& thread, bool closeThread = true) {
+            mThread = thread;
+            mCloseThread = closeThread;
+        }
+        void setTrackAndPeer(const sp<TrackType>& track,
+                             ThreadBase::PatchProxyBufferProvider *peer) {
+            mTrack = track;
+            mThread->addPatchTrack(mTrack);
+            mTrack->setPeerProxy(peer);
+        }
+        void stopTrack() { if (mTrack) mTrack->stop(); }
+
+    private:
+        Endpoint(const Endpoint&) = default;
+        Endpoint& operator=(const Endpoint&) = default;
+
+        sp<ThreadType> mThread;
+        bool mCloseThread = true;
+        audio_patch_handle_t mHandle = AUDIO_PATCH_HANDLE_NONE;
+        sp<TrackType> mTrack;
+    };
 
     class Patch {
     public:
-        explicit Patch(const struct audio_patch *patch) :
-            mAudioPatch(*patch), mHandle(AUDIO_PATCH_HANDLE_NONE),
-            mHalHandle(AUDIO_PATCH_HANDLE_NONE), mRecordPatchHandle(AUDIO_PATCH_HANDLE_NONE),
-            mPlaybackPatchHandle(AUDIO_PATCH_HANDLE_NONE) {}
-        ~Patch() {}
+        explicit Patch(const struct audio_patch &patch) : mAudioPatch(patch) {}
+        ~Patch();
+        Patch(const Patch&) = delete;
+        Patch(Patch&&) = default;
+        Patch& operator=(const Patch&) = delete;
+        Patch& operator=(Patch&&) = default;
 
+        status_t createConnections(PatchPanel *panel);
+        void clearConnections(PatchPanel *panel);
+        bool isSoftware() const {
+            return mRecord.handle() != AUDIO_PATCH_HANDLE_NONE ||
+                    mPlayback.handle() != AUDIO_PATCH_HANDLE_NONE; }
+
+        // returns the latency of the patch (from record to playback).
+        status_t getLatencyMs(double *latencyMs) const;
+
+        String8 dump(audio_patch_handle_t myHandle);
+
+        // Note that audio_patch::id is only unique within a HAL module
         struct audio_patch              mAudioPatch;
-        audio_patch_handle_t            mHandle;
         // handle for audio HAL patch handle present only when the audio HAL version is >= 3.0
-        audio_patch_handle_t            mHalHandle;
+        audio_patch_handle_t            mHalHandle = AUDIO_PATCH_HANDLE_NONE;
         // below members are used by a software audio patch connecting a source device from a
         // given audio HW module to a sink device on an other audio HW module.
-        // playback thread created by createAudioPatch() and released by clearPatchConnections() if
-        // no existing playback thread can be used by the software patch
-        sp<PlaybackThread>              mPlaybackThread;
-        // audio track created by createPatchConnections() and released by clearPatchConnections()
-        sp<PlaybackThread::PatchTrack>  mPatchTrack;
-        // record thread created by createAudioPatch() and released by clearPatchConnections()
-        sp<RecordThread>                mRecordThread;
-        // audio record created by createPatchConnections() and released by clearPatchConnections()
-        sp<RecordThread::PatchRecord>   mPatchRecord;
-        // handle for audio patch connecting source device to record thread input.
-        // created by createPatchConnections() and released by clearPatchConnections()
-        audio_patch_handle_t            mRecordPatchHandle;
-        // handle for audio patch connecting playback thread output to sink device
-        // created by createPatchConnections() and released by clearPatchConnections()
-        audio_patch_handle_t            mPlaybackPatchHandle;
-
+        // the objects are created by createConnections() and released by clearConnections()
+        // playback thread is created if no existing playback thread can be used
+        // connects playback thread output to sink device
+        Endpoint<PlaybackThread, PlaybackThread::PatchTrack> mPlayback;
+        // connects source device to record thread input
+        Endpoint<RecordThread, RecordThread::PatchRecord> mRecord;
     };
 
-private:
-    const wp<AudioFlinger>      mAudioFlinger;
-    SortedVector <Patch *>      mPatches;
+    sp<DeviceHalInterface> findHwDeviceByModule(audio_module_handle_t module);
+
+    AudioFlinger &mAudioFlinger;
+    std::map<audio_patch_handle_t, Patch> mPatches;
 };
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ea01a25..4d5f6b0 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -41,7 +41,7 @@
     virtual             ~Track();
     virtual status_t    initCheck() const;
 
-    static  void        appendDumpHeader(String8& result);
+            void        appendDumpHeader(String8& result);
             void        appendDump(String8& result, bool active);
     virtual status_t    start(AudioSystem::sync_event_t event =
                                     AudioSystem::SYNC_EVENT_NONE,
@@ -56,6 +56,12 @@
                 LOG_ALWAYS_FATAL_IF(mName >= 0 && name >= 0,
                         "%s both old name %d and new name %d are valid", __func__, mName, name);
                 mName = name;
+#ifdef TEE_SINK
+                mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+                        + "_" + std::to_string(mId)
+                        + "_" + std::to_string(mName)
+                        + "_T");
+#endif
             }
 
     virtual uint32_t    sampleRate() const;
@@ -65,10 +71,12 @@
             }
             bool        isOffloaded() const
                                 { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
-            bool        isDirect() const { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+            bool        isDirect() const override
+                                { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
             bool        isOffloadedOrDirect() const { return (mFlags
                             & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
                                     | AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
+            bool        isStatic() const { return  mSharedBuffer.get() != nullptr; }
 
             status_t    setParameters(const String8& keyValuePairs);
             status_t    attachAuxEffect(int EffectId);
@@ -87,12 +95,33 @@
 
     virtual bool        isFastTrack() const { return (mFlags & AUDIO_OUTPUT_FLAG_FAST) != 0; }
 
+            double      bufferLatencyMs() const override {
+                            return isStatic() ? 0. : TrackBase::bufferLatencyMs();
+                        }
+
 // implement volume handling.
     media::VolumeShaper::Status applyVolumeShaper(
                                 const sp<media::VolumeShaper::Configuration>& configuration,
                                 const sp<media::VolumeShaper::Operation>& operation);
     sp<media::VolumeShaper::State> getVolumeShaperState(int id);
     sp<media::VolumeHandler>   getVolumeHandler() { return mVolumeHandler; }
+    /** Set the computed normalized final volume of the track.
+     * !masterMute * masterVolume * streamVolume * averageLRVolume */
+    void                setFinalVolume(float volume);
+    float               getFinalVolume() const { return mFinalVolume; }
+
+    /** @return true if the track has changed (metadata or volume) since
+     *          the last time this function was called,
+     *          true if this function was never called since the track creation,
+     *          false otherwise.
+     *  Thread safe.
+     */
+    bool            readAndClearHasChanged() { return !mChangeNotified.test_and_set(); }
+
+    using SourceMetadatas = std::vector<playback_track_metadata_t>;
+    using MetadataInserter = std::back_insert_iterator<SourceMetadatas>;
+    /** Copy the track metadata in the provided iterator. Thread safe. */
+    virtual void    copyMetadataTo(MetadataInserter& backInserter) const;
 
 protected:
     // for numerous
@@ -123,7 +152,7 @@
     bool isResumePending();
     void resumeAck();
     void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
-            const ExtendedTimestamp &timeStamp);
+            uint32_t halSampleRate, const ExtendedTimestamp &timeStamp);
 
     sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
 
@@ -133,6 +162,8 @@
     bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
     void signalClientFlag(int32_t flag);
 
+    /** Set that a metadata has changed and needs to be notified to backend. Thread safe. */
+    void setMetadataHasChanged() { mChangeNotified.clear(); }
 public:
     void triggerEvents(AudioSystem::sync_event_t type);
     virtual void invalidate();
@@ -182,10 +213,13 @@
     volatile float      mCachedVolume;  // combined master volume and stream type volume;
                                         // 'volatile' means accessed without lock or
                                         // barrier, but is read/written atomically
+    float               mFinalVolume; // combine master volume, stream type volume and track volume
     sp<AudioTrackServerProxy>  mAudioTrackServerProxy;
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
     audio_output_flags_t mFlags;
+    // If the last track change was notified to the client with readAndClearHasChanged
+    std::atomic_flag     mChangeNotified = ATOMIC_FLAG_INIT;
 };  // end of Track
 
 
@@ -211,13 +245,28 @@
                                     AudioSystem::SYNC_EVENT_NONE,
                              audio_session_t triggerSession = AUDIO_SESSION_NONE);
     virtual void        stop();
-            bool        write(void* data, uint32_t frames);
+            ssize_t     write(void* data, uint32_t frames);
             bool        bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
             bool        isActive() const { return mActive; }
     const wp<ThreadBase>& thread() const { return mThread; }
 
-private:
+            void        copyMetadataTo(MetadataInserter& backInserter) const override;
+    /** Set the metadatas of the upstream tracks. Thread safe. */
+            void        setMetadatas(const SourceMetadatas& metadatas);
+    /** returns client timestamp to the upstream duplicating thread. */
+    ExtendedTimestamp   getClientProxyTimestamp() const {
+                            // server - kernel difference is not true latency when drained
+                            // i.e. mServerProxy->isDrained().
+                            ExtendedTimestamp timestamp;
+                            (void) mClientProxy->getTimestamp(&timestamp);
+                            // On success, the timestamp LOCATION_SERVER and LOCATION_KERNEL
+                            // entries will be properly filled. If getTimestamp()
+                            // is unsuccessful, then a default initialized timestamp
+                            // (with mTimeNs[] filled with -1's) is returned.
+                            return timestamp;
+                        }
 
+private:
     status_t            obtainBuffer(AudioBufferProvider::Buffer* buffer,
                                      uint32_t waitTimeMs);
     void                clearBufferQueue();
@@ -232,6 +281,21 @@
     bool                        mActive;
     DuplicatingThread* const    mSourceThread; // for waitTimeMs() in write()
     sp<AudioTrackClientProxy>   mClientProxy;
+
+    /** Attributes of the source tracks.
+     *
+     * This member must be accessed with mTrackMetadatasMutex taken.
+     * There is one writer (duplicating thread) and one reader (downstream mixer).
+     *
+     * That means that the duplicating thread can block the downstream mixer
+     * thread and vice versa for the time of the copy.
+     * If this becomes an issue, the metadata could be stored in an atomic raw pointer,
+     * and a exchange with nullptr and delete can be used.
+     * Alternatively a read-copy-update might be implemented.
+     */
+    SourceMetadatas mTrackMetadatas;
+    /** Protects mTrackMetadatas against concurrent access. */
+    mutable std::mutex mTrackMetadatasMutex;
 };  // end of OutputTrack
 
 // playback track, used by PatchPanel
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 2b993ee..b0c9fda 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -51,7 +51,7 @@
             bool        setOverflow() { bool tmp = mOverflow; mOverflow = true;
                                                 return tmp; }
 
-    static  void        appendDumpHeader(String8& result);
+            void        appendDumpHeader(String8& result);
             void        appendDump(String8& result, bool active);
 
             void        handleSyncStartEvent(const sp<SyncEvent>& event);
@@ -63,8 +63,10 @@
                                              const ExtendedTimestamp &timestamp);
 
     virtual bool        isFastTrack() const { return (mFlags & AUDIO_INPUT_FLAG_FAST) != 0; }
+            bool        isDirect() const override
+                                { return (mFlags & AUDIO_INPUT_FLAG_DIRECT) != 0; }
 
-            void        setSilenced(bool silenced) { mSilenced = silenced; }
+            void        setSilenced(bool silenced) { if (!isPatchTrack()) mSilenced = silenced; }
             bool        isSilenced() const { return mSilenced; }
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
deleted file mode 100644
index f45ada1..0000000
--- a/services/audioflinger/ServiceUtilities.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <unistd.h>
-
-#include <binder/PermissionController.h>
-
-namespace android {
-
-extern pid_t getpid_cached;
-bool isTrustedCallingUid(uid_t uid);
-bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
-void finishRecording(const String16& opPackageName, uid_t uid);
-bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
-bool captureHotwordAllowed(pid_t pid, uid_t uid);
-bool settingsAllowed();
-bool modifyAudioRoutingAllowed();
-bool dumpAllowed();
-bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
-}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index adeef31..70af5c6 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -62,8 +62,8 @@
 #include "AudioFlinger.h"
 #include "FastMixer.h"
 #include "FastCapture.h"
-#include "ServiceUtilities.h"
-#include "mediautils/SchedulingPolicyService.h"
+#include <mediautils/SchedulingPolicyService.h>
+#include <mediautils/ServiceUtilities.h>
 
 #ifdef ADD_BATTERY_DATA
 #include <media/IMediaPlayerService.h>
@@ -200,7 +200,7 @@
 // Initially this heap is used to allocate client buffers for "fast" AudioRecord.
 // Eventually it will be the single buffer that FastCapture writes into via HAL read(),
 // and that all "fast" AudioRecord clients read from.  In either case, the size can be small.
-static const size_t kRecordThreadReadOnlyHeapSize = 0x4000;
+static const size_t kRecordThreadReadOnlyHeapSize = 0xD000;
 
 // ----------------------------------------------------------------------------
 
@@ -769,6 +769,8 @@
             if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
             if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
             if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+            if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, " );
+            if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, " );
             if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
         } else {
             if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
@@ -783,6 +785,12 @@
             if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
             if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
             if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+            if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
+            if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
+            if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
+            if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low freq, ");
+            if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, " );
+            if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, " );
             if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
             if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
             if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown,  ");
@@ -845,6 +853,14 @@
     dprintf(fd, "  Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
     dprintf(fd, "  Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
 
+    // Dump timestamp statistics for the Thread types that support it.
+    if (mType == RECORD
+            || mType == MIXER
+            || mType == DUPLICATING
+            || (mType == DIRECT && audio_is_linear_pcm(mHALFormat))) {
+        dprintf(fd, "  Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
+    }
+
     if (locked) {
         mLock.unlock();
     }
@@ -1519,7 +1535,7 @@
     }
 }
 
-void AudioFlinger::ThreadBase::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::ThreadBase::toAudioPortConfig(struct audio_port_config *config)
 {
     config->type = AUDIO_PORT_TYPE_MIX;
     config->ext.mix.handle = mId;
@@ -1571,6 +1587,9 @@
     --mBatteryCounter[track->uid()].second;
     // mLatestActiveTrack is not cleared even if is the same as track.
     mHasChanged = true;
+#ifdef TEE_SINK
+    track->dumpTee(-1 /* fd */, "_REMOVE");
+#endif
     return index;
 }
 
@@ -1773,7 +1792,7 @@
     if (numtracks) {
         dprintf(fd, " of which %zu are active\n", numactive);
         result.append(prefix);
-        Track::appendDumpHeader(result);
+        mTracks[0]->appendDumpHeader(result);
         for (size_t i = 0; i < numtracks; ++i) {
             sp<Track> track = mTracks[i];
             if (track != 0) {
@@ -1793,7 +1812,7 @@
         result.append("  The following tracks are in the active list but"
                 " not in the track list\n");
         result.append(prefix);
-        Track::appendDumpHeader(result);
+        mActiveTracks[0]->appendDumpHeader(result);
         for (size_t i = 0; i < numactive; ++i) {
             sp<Track> track = mActiveTracks[i];
             if (mTracks.indexOf(track) < 0) {
@@ -1880,11 +1899,17 @@
     status_t lStatus;
     audio_output_flags_t outputFlags = mOutput->flags;
     audio_output_flags_t requestedFlags = *flags;
+    uint32_t sampleRate;
+
+    if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
 
     if (*pSampleRate == 0) {
         *pSampleRate = mSampleRate;
     }
-    uint32_t sampleRate = *pSampleRate;
+    sampleRate = *pSampleRate;
 
     // special case for FAST flag considered OK if fast mixer is present
     if (hasFastMixer()) {
@@ -2623,27 +2648,33 @@
 
 void AudioFlinger::PlaybackThread::updateMetadata_l()
 {
-    // TODO: add volume support
-    if (mOutput == nullptr || mOutput->stream == nullptr ||
-            !mActiveTracks.readAndClearHasChanged()) {
-        return;
+    if (mOutput == nullptr || mOutput->stream == nullptr ) {
+        return; // That should not happen
+    }
+    bool hasChanged = mActiveTracks.readAndClearHasChanged();
+    for (const sp<Track> &track : mActiveTracks) {
+        // Do not short-circuit as all hasChanged states must be reset
+        // as all the metadata are going to be sent
+        hasChanged |= track->readAndClearHasChanged();
+    }
+    if (!hasChanged) {
+        return; // nothing to do
     }
     StreamOutHalInterface::SourceMetadata metadata;
+    auto backInserter = std::back_inserter(metadata.tracks);
     for (const sp<Track> &track : mActiveTracks) {
         // No track is invalid as this is called after prepareTrack_l in the same critical section
-        if (track->isOutputTrack()) {
-            // TODO: OutputTrack (used for duplication) are currently not supported
-            continue;
-        }
-        metadata.tracks.push_back({
-                .usage = track->attributes().usage,
-                .content_type = track->attributes().content_type,
-                .gain = 1,
-        });
+        track->copyMetadataTo(backInserter);
     }
-    mOutput->stream->updateSourceMetadata(metadata);
+    sendMetadataToBackend_l(metadata);
 }
 
+void AudioFlinger::PlaybackThread::sendMetadataToBackend_l(
+        const StreamOutHalInterface::SourceMetadata& metadata)
+{
+    mOutput->stream->updateSourceMetadata(metadata);
+};
+
 status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
 {
     if (halFrames == NULL || dspFrames == NULL) {
@@ -2839,6 +2870,9 @@
         ATRACE_END();
         if (framesWritten > 0) {
             bytesWritten = framesWritten * mFrameSize;
+#ifdef TEE_SINK
+            mTee.write((char *)mSinkBuffer + offset, framesWritten);
+#endif
         } else {
             bytesWritten = framesWritten;
         }
@@ -3179,16 +3213,23 @@
                 logString = NULL;
             }
 
+            // Collect timestamp statistics for the Playback Thread types that support it.
+            if (mType == MIXER
+                    || mType == DUPLICATING
+                    || (mType == DIRECT && audio_is_linear_pcm(mHALFormat))) { // no indentation
             // Gather the framesReleased counters for all active tracks,
             // and associate with the sink frames written out.  We need
             // this to convert the sink timestamp to the track timestamp.
             bool kernelLocationUpdate = false;
-            if (mNormalSink != 0) {
-                // Note: The DuplicatingThread may not have a mNormalSink.
+            ExtendedTimestamp timestamp; // use private copy to fetch
+            if (mStandby) {
+                mTimestampVerifier.discontinuity();
+            } else if (threadloop_getHalTimestamp_l(&timestamp) == OK) {
+                mTimestampVerifier.add(timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+                        mSampleRate);
                 // We always fetch the timestamp here because often the downstream
                 // sink will block while writing.
-                ExtendedTimestamp timestamp; // use private copy to fetch
-                (void) mNormalSink->getTimestamp(timestamp);
 
                 // We keep track of the last valid kernel position in case we are in underrun
                 // and the normal mixer period is the same as the fast mixer period, or there
@@ -3217,7 +3258,10 @@
                         + mSuspendedFrames; // add frames discarded when suspended
                 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
                         timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+            } else {
+                mTimestampVerifier.error();
             }
+
             // mFramesWritten for non-offloaded tracks are contiguous
             // even after standby() is called. This is useful for the track frame
             // to sink frame mapping.
@@ -3245,10 +3289,12 @@
                         t->updateTrackFrameInfo(
                                 t->mAudioTrackServerProxy->framesReleased(),
                                 mFramesWritten,
+                                mSampleRate,
                                 mTimestamp);
                     }
                 }
             }
+            } // if (mType ... ) { // no indentation
 #if 0
             // logFormat example
             if (z % 100 == 0) {
@@ -3766,12 +3812,16 @@
     destroyTrack_l(track);
 }
 
-void AudioFlinger::PlaybackThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::PlaybackThread::toAudioPortConfig(struct audio_port_config *config)
 {
-    ThreadBase::getAudioPortConfig(config);
+    ThreadBase::toAudioPortConfig(config);
     config->role = AUDIO_PORT_ROLE_SOURCE;
     config->ext.mix.hw_module = mOutput->audioHwDev->handle();
     config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+    if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+        config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+        config->flags.output = mOutput->flags;
+    }
 }
 
 // ----------------------------------------------------------------------------
@@ -3852,9 +3902,7 @@
 
         // create a MonoPipe to connect our submix to FastMixer
         NBAIO_Format format = mOutputSink->format();
-#ifdef TEE_SINK
-        NBAIO_Format origformat = format;
-#endif
+
         // adjust format to match that of the Fast Mixer
         ALOGV("format changed from %#x to %#x", format.mFormat, fastMixerFormat);
         format.mFormat = fastMixerFormat;
@@ -3866,7 +3914,7 @@
         MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
         const NBAIO_Format offers[1] = {format};
         size_t numCounterOffers = 0;
-#if !LOG_NDEBUG || defined(TEE_SINK)
+#if !LOG_NDEBUG
         ssize_t index =
 #else
         (void)
@@ -3877,25 +3925,8 @@
                 (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
         mPipeSink = monoPipe;
 
-#ifdef TEE_SINK
-        if (mTeeSinkOutputEnabled) {
-            // create a Pipe to archive a copy of FastMixer's output for dumpsys
-            Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, origformat);
-            const NBAIO_Format offers2[1] = {origformat};
-            numCounterOffers = 0;
-            index = teeSink->negotiate(offers2, 1, NULL, numCounterOffers);
-            ALOG_ASSERT(index == 0);
-            mTeeSink = teeSink;
-            PipeReader *teeSource = new PipeReader(*teeSink);
-            numCounterOffers = 0;
-            index = teeSource->negotiate(offers2, 1, NULL, numCounterOffers);
-            ALOG_ASSERT(index == 0);
-            mTeeSource = teeSource;
-        }
-#endif
-
         // create fast mixer and configure it initially with just one fast track for our submix
-        mFastMixer = new FastMixer();
+        mFastMixer = new FastMixer(mId);
         FastMixerStateQueue *sq = mFastMixer->sq();
 #ifdef STATE_QUEUE_DUMP
         sq->setObserverDump(&mStateQueueObserverDump);
@@ -3921,9 +3952,6 @@
         state->mColdFutexAddr = &mFastMixerFutex;
         state->mColdGen++;
         state->mDumpState = &mFastMixerDumpState;
-#ifdef TEE_SINK
-        state->mTeeSink = mTeeSink.get();
-#endif
         mFastMixerNBLogWriter = audioFlinger->newWriter_l(kFastMixerLogSize, "FastMixer");
         state->mNBLogWriter = mFastMixerNBLogWriter.get();
         sq->end();
@@ -3932,7 +3960,7 @@
         // start the fast mixer
         mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
         pid_t tid = mFastMixer->getTid();
-        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false /*forApp*/);
+        sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
         stream()->setHalThreadPriority(kPriorityFastMixer);
 
 #ifdef AUDIO_WATCHDOG
@@ -3941,9 +3969,14 @@
         mAudioWatchdog->setDump(&mAudioWatchdogDump);
         mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
         tid = mAudioWatchdog->getTid();
-        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false /*forApp*/);
+        sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
 #endif
-
+    } else {
+#ifdef TEE_SINK
+        // Only use the MixerThread tee if there is no FastMixer.
+        mTee.set(mOutputSink->format(), NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
+        mTee.setId(std::string("_") + std::to_string(mId) + "_M");
+#endif
     }
 
     switch (kUseFastMixer) {
@@ -4163,16 +4196,31 @@
     // buffer size, then write 0s to the output
     if (mSleepTimeUs == 0) {
         if (mMixerStatus == MIXER_TRACKS_ENABLED) {
-            mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
-            if (mSleepTimeUs < kMinThreadSleepTimeUs) {
-                mSleepTimeUs = kMinThreadSleepTimeUs;
-            }
-            // reduce sleep time in case of consecutive application underruns to avoid
-            // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
-            // duration we would end up writing less data than needed by the audio HAL if
-            // the condition persists.
-            if (sleepTimeShift < kMaxThreadSleepTimeShift) {
-                sleepTimeShift++;
+            if (mPipeSink.get() != nullptr && mPipeSink == mNormalSink) {
+                // Using the Monopipe availableToWrite, we estimate the
+                // sleep time to retry for more data (before we underrun).
+                MonoPipe *monoPipe = static_cast<MonoPipe *>(mPipeSink.get());
+                const ssize_t availableToWrite = mPipeSink->availableToWrite();
+                const size_t pipeFrames = monoPipe->maxFrames();
+                const size_t framesLeft = pipeFrames - max(availableToWrite, 0);
+                // HAL_framecount <= framesDelay ~ framesLeft / 2 <= Normal_Mixer_framecount
+                const size_t framesDelay = std::min(
+                        mNormalFrameCount, max(framesLeft / 2, mFrameCount));
+                ALOGV("pipeFrames:%zu framesLeft:%zu framesDelay:%zu",
+                        pipeFrames, framesLeft, framesDelay);
+                mSleepTimeUs = framesDelay * MICROS_PER_SECOND / mSampleRate;
+            } else {
+                mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
+                if (mSleepTimeUs < kMinThreadSleepTimeUs) {
+                    mSleepTimeUs = kMinThreadSleepTimeUs;
+                }
+                // reduce sleep time in case of consecutive application underruns to avoid
+                // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
+                // duration we would end up writing less data than needed by the audio HAL if
+                // the condition persists.
+                if (sleepTimeShift < kMaxThreadSleepTimeShift) {
+                    sleepTimeShift++;
+                }
             }
         } else {
             mSleepTimeUs = mIdleSleepTimeUs;
@@ -4244,6 +4292,37 @@
     mMixerBufferValid = false;  // mMixerBuffer has no valid data until appropriate tracks found.
     mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
 
+    // DeferredOperations handles statistics after setting mixerStatus.
+    class DeferredOperations {
+    public:
+        DeferredOperations(mixer_state *mixerStatus)
+            : mMixerStatus(mixerStatus) { }
+
+        // when leaving scope, tally frames properly.
+        ~DeferredOperations() {
+            // Tally underrun frames only if we are actually mixing (MIXER_TRACKS_READY)
+            // because that is when the underrun occurs.
+            // We do not distinguish between FastTracks and NormalTracks here.
+            if (*mMixerStatus == MIXER_TRACKS_READY) {
+                for (const auto &underrun : mUnderrunFrames) {
+                    underrun.first->mAudioTrackServerProxy->tallyUnderrunFrames(
+                            underrun.second);
+                }
+            }
+        }
+
+        // tallyUnderrunFrames() is called to update the track counters
+        // with the number of underrun frames for a particular mixer period.
+        // We defer tallying until we know the final mixer status.
+        void tallyUnderrunFrames(sp<Track> track, size_t underrunFrames) {
+            mUnderrunFrames.emplace_back(track, underrunFrames);
+        }
+
+    private:
+        const mixer_state * const mMixerStatus;
+        std::vector<std::pair<sp<Track>, size_t>> mUnderrunFrames;
+    } deferredOperations(&mixerStatus); // implicit nested scope for variable capture
+
     for (size_t i=0 ; i<count ; i++) {
         const sp<Track> t = mActiveTracks[i];
 
@@ -4278,13 +4357,14 @@
             track->mObservedUnderruns = underruns;
             // don't count underruns that occur while stopping or pausing
             // or stopped which can occur when flush() is called while active
+            size_t underrunFrames = 0;
             if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
                     recentUnderruns > 0) {
                 // FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
-                track->mAudioTrackServerProxy->tallyUnderrunFrames(recentUnderruns * mFrameCount);
-            } else {
-                track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
+                underrunFrames = recentUnderruns * mFrameCount;
             }
+            // Immediately account for FastTrack underruns.
+            track->mAudioTrackServerProxy->tallyUnderrunFrames(underrunFrames);
 
             // This is similar to the state machine for normal tracks,
             // with a few modifications for fast tracks.
@@ -4381,13 +4461,19 @@
                     didModify = true;
                     // no acknowledgement required for newly active tracks
                 }
+                sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
                 // cache the combined master volume and stream type volume for fast mixer; this
                 // lacks any synchronization or barrier so VolumeProvider may read a stale value
                 const float vh = track->getVolumeHandler()->getVolume(
-                        track->mAudioTrackServerProxy->framesReleased()).first;
-                track->mCachedVolume = masterVolume
+                        proxy->framesReleased()).first;
+                float volume = masterVolume
                         * mStreamTypes[track->streamType()].volume
                         * vh;
+                track->mCachedVolume = volume;
+                gain_minifloat_packed_t vlr = proxy->getVolumeLR();
+                float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
+                float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+                track->setFinalVolume((vlf + vrf) / 2.f);
                 ++fastTracks;
             } else {
                 // was it previously active?
@@ -4564,6 +4650,8 @@
                 vaf = v * sendLevel * (1. / MAX_GAIN_INT);
             }
 
+            track->setFinalVolume((vrf + vlf) / 2.f);
+
             // Delegate volume control to effect in track effect chain if needed
             if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
                 // Do not ramp volume if volume is controlled by effect
@@ -4691,13 +4779,13 @@
                 mixerStatus = MIXER_TRACKS_READY;
             }
         } else {
+            size_t underrunFrames = 0;
             if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
                 ALOGV("track(%p) underrun,  framesReady(%zu) < framesDesired(%zd)",
                         track, framesReady, desiredFrames);
-                track->mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
-            } else {
-                track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
+                underrunFrames = desiredFrames;
             }
+            deferredOperations.tallyUnderrunFrames(track, underrunFrames);
 
             // clear effect chain input buffer if an active track underruns to avoid sending
             // previous audio buffer again to effects
@@ -4996,6 +5084,12 @@
     dprintf(fd, "  Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
     dprintf(fd, "  AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
     dprintf(fd, "  Master mono: %s\n", mMasterMono ? "on" : "off");
+    const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
+    if (latencyMs != 0.) {
+        dprintf(fd, "  NormalMixer latency ms: %.2lf\n", latencyMs);
+    } else {
+        dprintf(fd, "  NormalMixer latency ms: unavail\n");
+    }
 
     if (hasFastMixer()) {
         dprintf(fd, "  FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
@@ -5027,12 +5121,6 @@
     } else {
         dprintf(fd, "  No FastMixer\n");
     }
-
-#ifdef TEE_SINK
-    // Write the tee output to a .wav file
-    dumpTee(fd, mTeeSource, mId, 'M');
-#endif
-
 }
 
 uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
@@ -5108,6 +5196,7 @@
     }
 
     if (lastTrack) {
+        track->setFinalVolume((left + right) / 2.f);
         if (left != mLeftVolFloat || right != mRightVolFloat) {
             mLeftVolFloat = left;
             mRightVolFloat = right;
@@ -6032,7 +6121,22 @@
 ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
 {
     for (size_t i = 0; i < outputTracks.size(); i++) {
-        outputTracks[i]->write(mSinkBuffer, writeFrames);
+        const ssize_t actualWritten = outputTracks[i]->write(mSinkBuffer, writeFrames);
+
+        // Consider the first OutputTrack for timestamp and frame counting.
+
+        // The threadLoop() generally assumes writing a full sink buffer size at a time.
+        // Here, we correct for writeFrames of 0 (a stop) or underruns because
+        // we always claim success.
+        if (i == 0) {
+            const ssize_t correction = mSinkBufferSize / mFrameSize - actualWritten;
+            ALOGD_IF(correction != 0 && writeFrames != 0,
+                    "%s: writeFrames:%u  actualWritten:%zd  correction:%zd  mFramesWritten:%lld",
+                    __func__, writeFrames, actualWritten, correction, (long long)mFramesWritten);
+            mFramesWritten -= correction;
+        }
+
+        // TODO: Report correction for the other output tracks and show in the dump.
     }
     mStandby = false;
     return (ssize_t)mSinkBufferSize;
@@ -6165,14 +6269,12 @@
     return true;
 }
 
-void AudioFlinger::DuplicatingThread::updateMetadata_l()
+void AudioFlinger::DuplicatingThread::sendMetadataToBackend_l(
+        const StreamOutHalInterface::SourceMetadata& metadata)
 {
-    // TODO: The duplicated track metadata needs to be pushed to downstream
-    // but this information can be read at any time by the downstream threads.
-    // Taking the lock of any downstream threads is no possible due to cross deadlock risks
-    // (eg: during effect move).
-    // A lock-free structure needs to be used to shared the metadata, probably an atomic
-    // pointer to a metadata vector in each output tracks.
+    for (auto& outputTrack : outputTracks) { // not mOutputTracks
+        outputTrack->setMetadatas(metadata.tracks);
+    }
 }
 
 uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
@@ -6199,9 +6301,6 @@
                                          audio_devices_t outDevice,
                                          audio_devices_t inDevice,
                                          bool systemReady
-#ifdef TEE_SINK
-                                         , const sp<NBAIO_Sink>& teeSink
-#endif
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
     mInput(input),
@@ -6209,9 +6308,6 @@
     mRsmpInBuffer(NULL),
     // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
     mRsmpInRear(0)
-#ifdef TEE_SINK
-    , mTeeSink(teeSink)
-#endif
     , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
             "RecordThreadRO", MemoryHeapBase::READ_ONLY))
     // mFastCapture below
@@ -6326,7 +6422,7 @@
         // start the fast capture
         mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
         pid_t tid = mFastCapture->getTid();
-        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture, false /*forApp*/);
+        sendPrioConfigEvent(getpid(), tid, kPriorityFastCapture, false /*forApp*/);
         stream()->setHalThreadPriority(kPriorityFastCapture);
 #ifdef AUDIO_WATCHDOG
         // FIXME
@@ -6334,6 +6430,10 @@
 
         mFastTrackAvail = true;
     }
+#ifdef TEE_SINK
+    mTee.set(mInputSource->format(), NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
+    mTee.setId(std::string("_") + std::to_string(mId) + "_C");
+#endif
 failed: ;
 
     // FIXME mNormalSource
@@ -6590,13 +6690,27 @@
         if (mPipeSource != 0) {
             size_t framesToRead = mBufferSize / mFrameSize;
             framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
-            framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
-                    framesToRead);
-            // since pipe is non-blocking, simulate blocking input by waiting for 1/2 of
-            // buffer size or at least for 20ms.
-            size_t sleepFrames = max(
-                    min(mPipeFramesP2, mRsmpInFramesP2) / 2, FMS_20 * mSampleRate / 1000);
-            if (framesRead <= (ssize_t) sleepFrames) {
+
+            // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
+            // to the full buffer point (clearing the overflow condition).  Upon OVERRUN error,
+            // we immediately retry the read() to get data and prevent another overflow.
+            for (int retries = 0; retries <= 2; ++retries) {
+                ALOGW_IF(retries > 0, "overrun on read from pipe, retry #%d", retries);
+                framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
+                        framesToRead);
+                if (framesRead != OVERRUN) break;
+            }
+
+            const ssize_t availableToRead = mPipeSource->availableToRead();
+            if (availableToRead >= 0) {
+                // PipeSource is the master clock.  It is up to the AudioRecord client to keep up.
+                LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
+                        "more frames to read than fifo size, %zd > %zu",
+                        availableToRead, mPipeFramesP2);
+                const size_t pipeFramesFree = mPipeFramesP2 - availableToRead;
+                const size_t sleepFrames = min(pipeFramesFree, mRsmpInFramesP2) / 2;
+                ALOGVV("mPipeFramesP2:%zu mRsmpInFramesP2:%zu sleepFrames:%zu availableToRead:%zd",
+                        mPipeFramesP2, mRsmpInFramesP2, sleepFrames, availableToRead);
                 sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
             }
             if (framesRead < 0) {
@@ -6637,8 +6751,9 @@
         // Update server timestamp with kernel stats
         if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
             int64_t position, time;
-            int ret = mInput->stream->getCapturePosition(&position, &time);
-            if (ret == NO_ERROR) {
+            if (mStandby) {
+                mTimestampVerifier.discontinuity();
+            } else if (mInput->stream->getCapturePosition(&position, &time) == NO_ERROR) {
                 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
                 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
                 // Note: In general record buffers should tend to be empty in
@@ -6646,6 +6761,12 @@
                 //
                 // Also, it is not advantageous to call get_presentation_position during the read
                 // as the read obtains a lock, preventing the timestamp call from executing.
+
+                mTimestampVerifier.add(mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+                        mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+                        mSampleRate);
+            } else {
+                mTimestampVerifier.error();
             }
         }
         // Use this to track timestamp information
@@ -6662,9 +6783,9 @@
         }
         ALOG_ASSERT(framesRead > 0);
 
-        if (mTeeSink != 0) {
-            (void) mTeeSink->write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
-        }
+#ifdef TEE_SINK
+        (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
+#endif
         // If destination is non-contiguous, we now correct for reading past end of buffer.
         {
             size_t part1 = mRsmpInFramesP2 - rear;
@@ -6722,9 +6843,33 @@
                 framesOut = min(framesOut,
                         destinationFramesPossible(
                                 framesIn, mSampleRate, activeTrack->mSampleRate));
-                // process frames from the RecordThread buffer provider to the RecordTrack buffer
-                framesOut = activeTrack->mRecordBufferConverter->convert(
-                        activeTrack->mSink.raw, activeTrack->mResamplerBufferProvider, framesOut);
+
+                if (activeTrack->isDirect()) {
+                    // No RecordBufferConverter used for compressed formats. Pass
+                    // straight from RecordThread buffer to RecordTrack buffer.
+                    AudioBufferProvider::Buffer buffer;
+                    buffer.frameCount = framesOut;
+                    status_t status = activeTrack->mResamplerBufferProvider->getNextBuffer(&buffer);
+                    if (status == OK && buffer.frameCount != 0) {
+                        ALOGV_IF(buffer.frameCount != framesOut,
+                                "%s() read less than expected (%zu vs %zu)",
+                                __func__, buffer.frameCount, framesOut);
+                        framesOut = buffer.frameCount;
+                        memcpy(activeTrack->mSink.raw, buffer.raw, buffer.frameCount);
+                        activeTrack->mResamplerBufferProvider->releaseBuffer(&buffer);
+                    } else {
+                        framesOut = 0;
+                        ALOGE("%s() cannot fill request, status: %d, frameCount: %zu",
+                            __func__, status, buffer.frameCount);
+                    }
+                } else {
+                    // process frames from the RecordThread buffer provider to the RecordTrack
+                    // buffer
+                    framesOut = activeTrack->mRecordBufferConverter->convert(
+                            activeTrack->mSink.raw,
+                            activeTrack->mResamplerBufferProvider,
+                            framesOut);
+                }
 
                 if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
                     overrun = OVERRUN_FALSE;
@@ -7096,8 +7241,10 @@
         // see previously buffered data before it called start(), but with greater risk of overrun.
 
         recordTrack->mResamplerBufferProvider->reset();
-        // clear any converter state as new data will be discontinuous
-        recordTrack->mRecordBufferConverter->reset();
+        if (!recordTrack->isDirect()) {
+            // clear any converter state as new data will be discontinuous
+            recordTrack->mRecordBufferConverter->reset();
+        }
         recordTrack->mState = TrackBase::STARTING_2;
         // signal thread to start
         mWaitWorkCV.broadcast();
@@ -7262,6 +7409,13 @@
         (void)input->stream->dump(fd);
     }
 
+    const double latencyMs = - mTimestamp.getOutputServerLatencyMs(mSampleRate);
+    if (latencyMs != 0.) {
+        dprintf(fd, "  NormalRecord latency ms: %.2lf\n", latencyMs);
+    } else {
+        dprintf(fd, "  NormalRecord latency ms: unavail\n");
+    }
+
     dprintf(fd, "  Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
     dprintf(fd, "  Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
 
@@ -7285,7 +7439,7 @@
     if (numtracks) {
         dprintf(fd, " of which %zu are active\n", numactive);
         result.append(prefix);
-        RecordTrack::appendDumpHeader(result);
+        mTracks[0]->appendDumpHeader(result);
         for (size_t i = 0; i < numtracks ; ++i) {
             sp<RecordTrack> track = mTracks[i];
             if (track != 0) {
@@ -7305,7 +7459,7 @@
         result.append("  The following tracks are in the active list but"
                 " not in the track list\n");
         result.append(prefix);
-        RecordTrack::appendDumpHeader(result);
+        mActiveTracks[0]->appendDumpHeader(result);
         for (size_t i = 0; i < numactive; ++i) {
             sp<RecordTrack> track = mActiveTracks[i];
             if (mTracks.indexOf(track) < 0) {
@@ -7813,24 +7967,28 @@
     return status;
 }
 
-void AudioFlinger::RecordThread::addPatchRecord(const sp<PatchRecord>& record)
+void AudioFlinger::RecordThread::addPatchTrack(const sp<PatchRecord>& record)
 {
     Mutex::Autolock _l(mLock);
     mTracks.add(record);
 }
 
-void AudioFlinger::RecordThread::deletePatchRecord(const sp<PatchRecord>& record)
+void AudioFlinger::RecordThread::deletePatchTrack(const sp<PatchRecord>& record)
 {
     Mutex::Autolock _l(mLock);
     destroyTrack_l(record);
 }
 
-void AudioFlinger::RecordThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::RecordThread::toAudioPortConfig(struct audio_port_config *config)
 {
-    ThreadBase::getAudioPortConfig(config);
+    ThreadBase::toAudioPortConfig(config);
     config->role = AUDIO_PORT_ROLE_SINK;
     config->ext.mix.hw_module = mInput->audioHwDev->handle();
     config->ext.mix.usecase.source = mAudioSource;
+    if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+        config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+        config->flags.input = mInput->flags;
+    }
 }
 
 // ----------------------------------------------------------------------------
@@ -7885,7 +8043,9 @@
       mSessionId(AUDIO_SESSION_NONE),
       mDeviceId(AUDIO_PORT_HANDLE_NONE), mPortId(AUDIO_PORT_HANDLE_NONE),
       mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev),
-      mActiveTracks(&this->mLocalLog)
+      mActiveTracks(&this->mLocalLog),
+      mHalVolFloat(-1.0f), // Initialize to illegal value so it always gets set properly later.
+      mNoCallbackWarningCount(0)
 {
     mStandby = true;
     readHalParameters_l();
@@ -7903,7 +8063,14 @@
 
 void AudioFlinger::MmapThread::disconnect()
 {
-    for (const sp<MmapTrack> &t : mActiveTracks) {
+    ActiveTracks<MmapTrack> activeTracks;
+    {
+        Mutex::Autolock _l(mLock);
+        for (const sp<MmapTrack> &t : mActiveTracks) {
+            activeTracks.add(t);
+        }
+    }
+    for (const sp<MmapTrack> &t : activeTracks) {
         stop(t->portId());
     }
     // This will decrement references and may cause the destruction of this thread.
@@ -7948,6 +8115,17 @@
     return mHalStream->getMmapPosition(position);
 }
 
+status_t AudioFlinger::MmapThread::exitStandby()
+{
+    status_t ret = mHalStream->start();
+    if (ret != NO_ERROR) {
+        ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
+        return ret;
+    }
+    mStandby = false;
+    return NO_ERROR;
+}
+
 status_t AudioFlinger::MmapThread::start(const AudioClient& client,
                                          audio_port_handle_t *handle)
 {
@@ -7961,13 +8139,7 @@
 
     if (*handle == mPortId) {
         // for the first track, reuse portId and session allocated when the stream was opened
-        ret = mHalStream->start();
-        if (ret != NO_ERROR) {
-            ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
-            return ret;
-        }
-        mStandby = false;
-        return NO_ERROR;
+        return exitStandby();
     }
 
     audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
@@ -8015,33 +8187,46 @@
         return BAD_VALUE;
     }
 
+    bool silenced = false;
     if (isOutput()) {
         ret = AudioSystem::startOutput(mId, streamType(), mSessionId);
     } else {
-        // TODO: Block recording for idle UIDs (b/72134552)
-        bool silenced;
         ret = AudioSystem::startInput(portId, &silenced);
     }
 
+    Mutex::Autolock _l(mLock);
     // abort if start is rejected by audio policy manager
     if (ret != NO_ERROR) {
         ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
         if (mActiveTracks.size() != 0) {
+            mLock.unlock();
             if (isOutput()) {
                 AudioSystem::releaseOutput(mId, streamType(), mSessionId);
             } else {
                 AudioSystem::releaseInput(portId);
             }
+            mLock.lock();
         } else {
             mHalStream->stop();
         }
         return PERMISSION_DENIED;
     }
 
+    if (isOutput()) {
+        // force volume update when a new track is added
+        mHalVolFloat = -1.0f;
+    } else if (!silenced) {
+        for (const sp<MmapTrack> &track : mActiveTracks) {
+            if (track->isSilenced_l() && track->uid() != client.clientUid)
+                track->invalidate();
+        }
+    }
+
     // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
     sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
-                                        client.clientUid, client.clientPid, portId);
+                                        isOutput(), client.clientUid, client.clientPid, portId);
 
+    track->setSilenced_l(silenced);
     mActiveTracks.add(track);
     sp<EffectChain> chain = getEffectChain_l(mSessionId);
     if (chain != 0) {
@@ -8071,6 +8256,8 @@
         return NO_ERROR;
     }
 
+    Mutex::Autolock _l(mLock);
+
     sp<MmapTrack> track;
     for (const sp<MmapTrack> &t : mActiveTracks) {
         if (handle == t->portId()) {
@@ -8084,6 +8271,7 @@
 
     mActiveTracks.remove(track);
 
+    mLock.unlock();
     if (isOutput()) {
         AudioSystem::stopOutput(mId, streamType(), track->sessionId());
         AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
@@ -8091,6 +8279,7 @@
         AudioSystem::stopInput(track->portId());
         AudioSystem::releaseInput(track->portId());
     }
+    mLock.lock();
 
     sp<EffectChain> chain = getEffectChain_l(track->sessionId());
     if (chain != 0) {
@@ -8342,7 +8531,9 @@
         sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
         sp<MmapStreamCallback> callback = mCallback.promote();
         if (mDeviceId != deviceId && callback != 0) {
+            mLock.unlock();
             callback->onRoutingChanged(deviceId);
+            mLock.lock();
         }
         mDeviceId = deviceId;
     }
@@ -8351,7 +8542,9 @@
         sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
         sp<MmapStreamCallback> callback = mCallback.promote();
         if (mDeviceId != deviceId && callback != 0) {
+            mLock.unlock();
             callback->onRoutingChanged(deviceId);
+            mLock.lock();
         }
         mDeviceId = deviceId;
     }
@@ -8377,9 +8570,9 @@
     return status;
 }
 
-void AudioFlinger::MmapThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::MmapThread::toAudioPortConfig(struct audio_port_config *config)
 {
-    ThreadBase::getAudioPortConfig(config);
+    ThreadBase::toAudioPortConfig(config);
     if (isOutput()) {
         config->role = AUDIO_PORT_ROLE_SOURCE;
         config->ext.mix.hw_module = mAudioHwDev->handle();
@@ -8517,9 +8710,13 @@
         if (track->isInvalid()) {
             sp<MmapStreamCallback> callback = mCallback.promote();
             if (callback != 0) {
-                callback->onTearDown();
+                mLock.unlock();
+                callback->onTearDown(track->portId());
+                mLock.lock();
+            } else if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+                ALOGW("Could not notify MMAP stream tear down: no onTearDown callback!");
+                mNoCallbackWarningCount++;
             }
-            break;
         }
     }
 }
@@ -8553,7 +8750,7 @@
     const char *prefix = "    ";
     if (numtracks) {
         result.append(prefix);
-        MmapTrack::appendDumpHeader(result);
+        mActiveTracks[0]->appendDumpHeader(result);
         for (size_t i = 0; i < numtracks ; ++i) {
             sp<MmapTrack> track = mActiveTracks[i];
             result.append(prefix);
@@ -8573,8 +8770,6 @@
       mStreamType(AUDIO_STREAM_MUSIC),
       mStreamVolume(1.0),
       mStreamMute(false),
-      mHalVolFloat(-1.0f), // Initialize to illegal value so it always gets set properly later.
-      mNoCallbackWarningCount(0),
       mOutput(output)
 {
     snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
@@ -8711,9 +8906,11 @@
                 for (int i = 0; i < channelCount; i++) {
                     values.add(volume);
                 }
-                callback->onVolumeChanged(mChannelMask, values);
                 mHalVolFloat = volume; // SW volume control worked, so update value.
                 mNoCallbackWarningCount = 0;
+                mLock.unlock();
+                callback->onVolumeChanged(mChannelMask, values);
+                mLock.lock();
             } else {
                 if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
                     ALOGW("Could not set MMAP stream volume: no volume callback!");
@@ -8759,6 +8956,15 @@
     }
 }
 
+void AudioFlinger::MmapPlaybackThread::toAudioPortConfig(struct audio_port_config *config)
+{
+    MmapThread::toAudioPortConfig(config);
+    if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+        config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+        config->flags.output = mOutput->flags;
+    }
+}
+
 void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
 {
     MmapThread::dumpInternals(fd, args);
@@ -8779,6 +8985,12 @@
     mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
 }
 
+status_t AudioFlinger::MmapCaptureThread::exitStandby()
+{
+    mInput->stream->setGain(1.0f);
+    return MmapThread::exitStandby();
+}
+
 AudioFlinger::AudioStreamIn* AudioFlinger::MmapCaptureThread::clearInput()
 {
     Mutex::Autolock _l(mLock);
@@ -8787,6 +8999,34 @@
     return input;
 }
 
+
+void AudioFlinger::MmapCaptureThread::processVolume_l()
+{
+    bool changed = false;
+    bool silenced = false;
+
+    sp<MmapStreamCallback> callback = mCallback.promote();
+    if (callback == 0) {
+        if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+            ALOGW("Could not set MMAP stream silenced: no onStreamSilenced callback!");
+            mNoCallbackWarningCount++;
+        }
+    }
+
+    // After a change occurred in track silenced state, mute capture in audio DSP if at least one
+    // track is silenced and unmute otherwise
+    for (size_t i = 0; i < mActiveTracks.size() && !silenced; i++) {
+        if (!mActiveTracks[i]->getAndSetSilencedNotified_l()) {
+            changed = true;
+            silenced = mActiveTracks[i]->isSilenced_l();
+        }
+    }
+
+    if (changed) {
+        mInput->stream->setGain(silenced ? 0.0f: 1.0f);
+    }
+}
+
 void AudioFlinger::MmapCaptureThread::updateMetadata_l()
 {
     if (mInput == nullptr || mInput->stream == nullptr ||
@@ -8804,4 +9044,24 @@
     mInput->stream->updateSinkMetadata(metadata);
 }
 
+void AudioFlinger::MmapCaptureThread::setRecordSilenced(uid_t uid, bool silenced)
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mActiveTracks.size() ; i++) {
+        if (mActiveTracks[i]->uid() == uid) {
+            mActiveTracks[i]->setSilenced_l(silenced);
+            broadcast_l();
+        }
+    }
+}
+
+void AudioFlinger::MmapCaptureThread::toAudioPortConfig(struct audio_port_config *config)
+{
+    MmapThread::toAudioPortConfig(config);
+    if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+        config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+        config->flags.input = mInput->flags;
+    }
+}
+
 } // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index bb81224..064e291 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -281,7 +281,7 @@
     virtual     status_t    createAudioPatch_l(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle) = 0;
     virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle) = 0;
-    virtual     void        getAudioPortConfig(struct audio_port_config *config) = 0;
+    virtual     void        toAudioPortConfig(struct audio_port_config *config) = 0;
 
 
                 // see note at declaration of mStandby, mOutDevice and mInDevice
@@ -434,6 +434,12 @@
     virtual     void        setMasterMono_l(bool mono __unused) { }
     virtual     bool        requireMonoBlend() { return false; }
 
+                            // called within the threadLoop to obtain timestamp from the HAL.
+    virtual     status_t    threadloop_getHalTimestamp_l(
+                                    ExtendedTimestamp *timestamp __unused) const {
+                                return INVALID_OPERATION;
+                            }
+
     friend class AudioFlinger;      // for mEffectChains
 
                 const type_t            mType;
@@ -493,10 +499,16 @@
                 sp<NBLog::Writer>       mNBLogWriter;
                 bool                    mSystemReady;
                 ExtendedTimestamp       mTimestamp;
+                TimestampVerifier< // For timestamp statistics.
+                        int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
+
                 // A condition that must be evaluated by the thread loop has changed and
                 // we must not wait for async write callback in the thread loop before evaluating it
                 bool                    mSignalPending;
 
+#ifdef TEE_SINK
+                NBAIO_Tee               mTee;
+#endif
                 // ActiveTracks is a sorted vector of track type T representing the
                 // active tracks of threadLoop() to be considered by the locked prepare portion.
                 // ActiveTracks should be accessed with the ThreadBase lock held.
@@ -566,8 +578,8 @@
                     // periodically called in the threadLoop() to update power state uids.
                     void            updatePowerState(sp<ThreadBase> thread, bool force = false);
 
-                    /** @return true if the active tracks have changed since the last time
-                     *          this function was called or the vector was created. */
+                    /** @return true if one or move active tracks was added or removed since the
+                     *          last time this function was called or the vector was created. */
                     bool            readAndClearHasChanged();
 
                 private:
@@ -588,7 +600,7 @@
                     int                 mLastActiveTracksGeneration;
                     wp<T>               mLatestActiveTrack; // latest track added to ActiveTracks
                     SimpleLog * const   mLocalLog;
-                    // If the active tracks have changed since last call to readAndClearHasChanged
+                    // If the vector has changed since last call to readAndClearHasChanged
                     bool                mHasChanged = false;
                 };
 
@@ -782,7 +794,7 @@
                 void        addPatchTrack(const sp<PatchTrack>& track);
                 void        deletePatchTrack(const sp<PatchTrack>& track);
 
-    virtual     void        getAudioPortConfig(struct audio_port_config *config);
+    virtual     void        toAudioPortConfig(struct audio_port_config *config);
 
                 // Return the asynchronous signal wait time.
     virtual     int64_t     computeWaitTimeNs_l() const { return INT64_MAX; }
@@ -927,7 +939,8 @@
     void        removeTrack_l(const sp<Track>& track);
 
     void        readOutputParameters_l();
-    void        updateMetadata_l() override;
+    void        updateMetadata_l() final;
+    virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
 
     virtual void dumpInternals(int fd, const Vector<String16>& args);
     void        dumpTracks(int fd, const Vector<String16>& args);
@@ -1053,11 +1066,6 @@
     sp<NBAIO_Sink>          mPipeSink;
     // The current sink for the normal mixer to write it's (sub)mix, mOutputSink or mPipeSink
     sp<NBAIO_Sink>          mNormalSink;
-#ifdef TEE_SINK
-    // For dumpsys
-    sp<NBAIO_Sink>          mTeeSink;
-    sp<NBAIO_Source>        mTeeSource;
-#endif
     uint32_t                mScreenState;   // cached copy of gScreenState
     // TODO: add comment and adjust size as needed
     static const size_t     kFastMixerLogSize = 8 * 1024;
@@ -1151,6 +1159,14 @@
                               return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
                             }
 
+                status_t    threadloop_getHalTimestamp_l(
+                                    ExtendedTimestamp *timestamp) const override {
+                                if (mNormalSink.get() != nullptr) {
+                                    return mNormalSink->getTimestamp(*timestamp);
+                                }
+                                return INVALID_OPERATION;
+                            }
+
 protected:
     virtual     void       setMasterMono_l(bool mono) {
                                mMasterMono.store(mono);
@@ -1287,7 +1303,8 @@
                 void        removeOutputTrack(MixerThread* thread);
                 uint32_t    waitTimeMs() const { return mWaitTimeMs; }
 
-                void        updateMetadata_l() override;
+                void        sendMetadataToBackend_l(
+                        const StreamOutHalInterface::SourceMetadata& metadata) override;
 protected:
     virtual     uint32_t    activeSleepTimeUs() const;
 
@@ -1314,6 +1331,22 @@
     SortedVector < sp<OutputTrack> >  mOutputTracks;
 public:
     virtual     bool        hasFastMixer() const { return false; }
+                status_t    threadloop_getHalTimestamp_l(
+                                    ExtendedTimestamp *timestamp) const override {
+        if (mOutputTracks.size() > 0) {
+            // forward the first OutputTrack's kernel information for timestamp.
+            const ExtendedTimestamp trackTimestamp =
+                    mOutputTracks[0]->getClientProxyTimestamp();
+            if (trackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0) {
+                timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+                        trackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+                timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+                        trackTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+                return OK;  // discard server timestamp - that's ignored.
+            }
+        }
+        return INVALID_OPERATION;
+    }
 };
 
 // record thread
@@ -1372,9 +1405,6 @@
                     audio_devices_t outDevice,
                     audio_devices_t inDevice,
                     bool systemReady
-#ifdef TEE_SINK
-                    , const sp<NBAIO_Sink>& teeSink
-#endif
                     );
             virtual     ~RecordThread();
 
@@ -1435,8 +1465,8 @@
                                            audio_patch_handle_t *handle);
     virtual status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
 
-            void        addPatchRecord(const sp<PatchRecord>& record);
-            void        deletePatchRecord(const sp<PatchRecord>& record);
+            void        addPatchTrack(const sp<PatchRecord>& record);
+            void        deletePatchTrack(const sp<PatchRecord>& record);
 
             void        readInputParameters_l();
     virtual uint32_t    getInputFramesLost();
@@ -1457,7 +1487,7 @@
 
     virtual size_t      frameCount() const { return mFrameCount; }
             bool        hasFastCapture() const { return mFastCapture != 0; }
-    virtual void        getAudioPortConfig(struct audio_port_config *config);
+    virtual void        toAudioPortConfig(struct audio_port_config *config);
 
     virtual status_t    checkEffectCompatibility_l(const effect_descriptor_t *desc,
                                                    audio_session_t sessionId);
@@ -1504,8 +1534,6 @@
             int32_t                             mRsmpInRear;    // last filled frame + 1
 
             // For dumpsys
-            const sp<NBAIO_Sink>                mTeeSink;
-
             const sp<MemoryDealer>              mReadOnlyHeap;
 
             // one-time initialization, no locks required
@@ -1587,6 +1615,7 @@
     virtual     void        threadLoop_exit();
     virtual     void        threadLoop_standby();
     virtual     bool        shouldStandby_l() { return false; }
+    virtual     status_t    exitStandby();
 
     virtual     status_t    initCheck() const { return (mHalStream == 0) ? NO_INIT : NO_ERROR; }
     virtual     size_t      frameCount() const { return mFrameCount; }
@@ -1599,7 +1628,7 @@
     virtual     status_t    createAudioPatch_l(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle);
     virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
-    virtual     void        getAudioPortConfig(struct audio_port_config *config);
+    virtual     void        toAudioPortConfig(struct audio_port_config *config);
 
     virtual     sp<StreamHalInterface> stream() const { return mHalStream; }
     virtual     status_t    addEffectChain_l(const sp<EffectChain>& chain);
@@ -1619,6 +1648,9 @@
 
     virtual     void        invalidateTracks(audio_stream_type_t streamType __unused) {}
 
+                // Sets the UID records silence
+    virtual     void        setRecordSilenced(uid_t uid __unused, bool silenced __unused) {}
+
                 void        dump(int fd, const Vector<String16>& args);
     virtual     void        dumpInternals(int fd, const Vector<String16>& args);
                 void        dumpTracks(int fd, const Vector<String16>& args);
@@ -1635,6 +1667,10 @@
                 sp<DeviceHalInterface>  mHalDevice;
                 AudioHwDevice* const    mAudioHwDev;
                 ActiveTracks<MmapTrack> mActiveTracks;
+                float                   mHalVolFloat;
+
+                int32_t                 mNoCallbackWarningCount;
+     static     constexpr int32_t       kMaxNoCallbackWarnings = 5;
 };
 
 class MmapPlaybackThread : public MmapThread, public VolumeInterface
@@ -1668,7 +1704,7 @@
 
     virtual     audio_stream_type_t streamType() { return mStreamType; }
     virtual     void        checkSilentMode_l();
-    virtual     void        processVolume_l();
+                void        processVolume_l() override;
 
     virtual     void        dumpInternals(int fd, const Vector<String16>& args);
 
@@ -1676,6 +1712,8 @@
 
                 void        updateMetadata_l() override;
 
+    virtual     void        toAudioPortConfig(struct audio_port_config *config);
+
 protected:
 
                 audio_stream_type_t         mStreamType;
@@ -1683,9 +1721,6 @@
                 float                       mStreamVolume;
                 bool                        mMasterMute;
                 bool                        mStreamMute;
-                float                       mHalVolFloat;
-                int32_t                     mNoCallbackWarningCount;
-     static     constexpr int32_t           kMaxNoCallbackWarnings = 5;
                 AudioStreamOut*             mOutput;
 };
 
@@ -1700,9 +1735,14 @@
 
                 AudioStreamIn* clearInput();
 
+                status_t       exitStandby() override;
     virtual     bool           isOutput() const override { return false; }
 
                 void           updateMetadata_l() override;
+                void           processVolume_l() override;
+                void           setRecordSilenced(uid_t uid, bool silenced) override;
+
+    virtual     void           toAudioPortConfig(struct audio_port_config *config);
 
 protected:
 
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index ccfb69f..95da9d7 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -91,6 +91,7 @@
             void*       buffer() const { return mBuffer; }
             size_t      bufferSize() const { return mBufferSize; }
     virtual bool        isFastTrack() const = 0;
+    virtual bool        isDirect() const = 0;
             bool        isOutputTrack() const { return (mType == TYPE_OUTPUT); }
             bool        isPatchTrack() const { return (mType == TYPE_PATCH); }
             bool        isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
@@ -100,6 +101,92 @@
 
     audio_attributes_t  attributes() const { return mAttr; }
 
+#ifdef TEE_SINK
+           void         dumpTee(int fd, const std::string &reason) const {
+                                mTee.dump(fd, reason);
+                        }
+#endif
+
+            /** returns the buffer contents size converted to time in milliseconds
+             * for PCM Playback or Record streaming tracks. The return value is zero for
+             * PCM static tracks and not defined for non-PCM tracks.
+             *
+             * This may be called without the thread lock.
+             */
+    virtual double      bufferLatencyMs() const {
+                            return mServerProxy->framesReadySafe() * 1000 / sampleRate();
+                        }
+
+            /** returns whether the track supports server latency computation.
+             * This is set in the constructor and constant throughout the track lifetime.
+             */
+
+            bool        isServerLatencySupported() const { return mServerLatencySupported; }
+
+            /** computes the server latency for PCM Playback or Record track
+             * to the device sink/source.  This is the time for the next frame in the track buffer
+             * written or read from the server thread to the device source or sink.
+             *
+             * This may be called without the thread lock, but latencyMs and fromTrack
+             * may be not be synchronized. For example PatchPanel may not obtain the
+             * thread lock before calling.
+             *
+             * \param latencyMs on success is set to the latency in milliseconds of the
+             *        next frame written/read by the server thread to/from the track buffer
+             *        from the device source/sink.
+             * \param fromTrack on success is set to true if latency was computed directly
+             *        from the track timestamp; otherwise set to false if latency was
+             *        estimated from the server timestamp.
+             *        fromTrack may be nullptr or omitted if not required.
+             *
+             * \returns OK or INVALID_OPERATION on failure.
+             */
+            status_t    getServerLatencyMs(double *latencyMs, bool *fromTrack = nullptr) const {
+                            if (!isServerLatencySupported()) {
+                                return INVALID_OPERATION;
+                            }
+
+                            // if no thread lock is acquired, these atomics are not
+                            // synchronized with each other, considered a benign race.
+
+                            const double serverLatencyMs = mServerLatencyMs.load();
+                            if (serverLatencyMs == 0.) {
+                                return INVALID_OPERATION;
+                            }
+                            if (fromTrack != nullptr) {
+                                *fromTrack = mServerLatencyFromTrack.load();
+                            }
+                            *latencyMs = serverLatencyMs;
+                            return OK;
+                        }
+
+            /** computes the total client latency for PCM Playback or Record tracks
+             * for the next client app access to the device sink/source; i.e. the
+             * server latency plus the buffer latency.
+             *
+             * This may be called without the thread lock, but latencyMs and fromTrack
+             * may be not be synchronized. For example PatchPanel may not obtain the
+             * thread lock before calling.
+             *
+             * \param latencyMs on success is set to the latency in milliseconds of the
+             *        next frame written/read by the client app to/from the track buffer
+             *        from the device sink/source.
+             * \param fromTrack on success is set to true if latency was computed directly
+             *        from the track timestamp; otherwise set to false if latency was
+             *        estimated from the server timestamp.
+             *        fromTrack may be nullptr or omitted if not required.
+             *
+             * \returns OK or INVALID_OPERATION on failure.
+             */
+            status_t    getTrackLatencyMs(double *latencyMs, bool *fromTrack = nullptr) const {
+                            double serverLatencyMs;
+                            status_t status = getServerLatencyMs(&serverLatencyMs, fromTrack);
+                            if (status == OK) {
+                                *latencyMs = serverLatencyMs + bufferLatencyMs();
+                            }
+                            return status;
+                        }
+
 protected:
     DISALLOW_COPY_AND_ASSIGN(TrackBase);
 
@@ -208,13 +295,18 @@
     const bool          mIsOut;
     sp<ServerProxy>     mServerProxy;
     const int           mId;
-    sp<NBAIO_Sink>      mTeeSink;
-    sp<NBAIO_Source>    mTeeSource;
+#ifdef TEE_SINK
+    NBAIO_Tee           mTee;
+#endif
     bool                mTerminated;
     track_type          mType;      // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
     audio_io_handle_t   mThreadIoHandle; // I/O handle of the thread the track is attached to
     audio_port_handle_t mPortId; // unique ID for this track used by audio policy
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
+
+    bool                mServerLatencySupported = false;
+    std::atomic<bool>   mServerLatencyFromTrack{}; // latency from track or server timestamp.
+    std::atomic<double> mServerLatencyMs{};        // last latency pushed from server thread.
 };
 
 // PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 236412b..22e610e 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -28,11 +28,11 @@
 #include <private/media/AudioTrackShared.h>
 
 #include "AudioFlinger.h"
-#include "ServiceUtilities.h"
 
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
 #include <media/RecordBufferConverter.h>
+#include <mediautils/ServiceUtilities.h>
 #include <audio_utils/minifloat.h>
 
 // ----------------------------------------------------------------------------
@@ -102,7 +102,7 @@
         mIsInvalid(false)
 {
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
-    if (!isTrustedCallingUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
+    if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
         ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
                 "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
         clientUid = callingUid;
@@ -210,22 +210,7 @@
         mBufferSize = bufferSize;
 
 #ifdef TEE_SINK
-        if (mTeeSinkTrackEnabled) {
-            NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat);
-            if (Format_isValid(pipeFormat)) {
-                Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
-                size_t numCounterOffers = 0;
-                const NBAIO_Format offers[1] = {pipeFormat};
-                ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
-                ALOG_ASSERT(index == 0);
-                PipeReader *pipeReader = new PipeReader(*pipe);
-                numCounterOffers = 0;
-                index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
-                ALOG_ASSERT(index == 0);
-                mTeeSink = pipe;
-                mTeeSource = pipeReader;
-            }
-        }
+        mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
 #endif
 
     }
@@ -244,9 +229,6 @@
 
 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
 {
-#ifdef TEE_SINK
-    dumpTee(-1, mTeeSource, mId, 'T');
-#endif
     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
     mServerProxy.clear();
     if (mCblk != NULL) {
@@ -274,9 +256,7 @@
 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
 {
 #ifdef TEE_SINK
-    if (mTeeSink != 0) {
-        (void) mTeeSink->write(buffer->raw, buffer->frameCount);
-    }
+    mTee.write(buffer->raw, buffer->frameCount);
 #endif
 
     ServerProxy::Buffer buf;
@@ -407,6 +387,9 @@
     // mSinkTimestamp
     mFastIndex(-1),
     mCachedVolume(1.0),
+    /* The track might not play immediately after being active, similarly as if its volume was 0.
+     * When the track starts playing, its volume will be computed. */
+    mFinalVolume(0.f),
     mResumeToStopping(false),
     mFlushHwPending(false),
     mFlags(flags)
@@ -451,6 +434,14 @@
         thread->mFastTrackAvailMask &= ~(1 << i);
     }
     mName = TRACK_NAME_PENDING;
+
+    mServerLatencySupported = thread->type() == ThreadBase::MIXER
+            || thread->type() == ThreadBase::DUPLICATING;
+#ifdef TEE_SINK
+    mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+            + "_" + std::to_string(mId) +
+            + "_PEND_T");
+#endif
 }
 
 AudioFlinger::PlaybackThread::Track::~Track()
@@ -500,13 +491,15 @@
     }
 }
 
-/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
+void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
 {
-    result.append("T Name Active Client Session S  Flags "
-                  "  Format Chn mask  SRate "
-                  "ST  L dB  R dB  VS dB "
-                  "  Server FrmCnt  FrmRdy F Underruns  Flushed "
-                  "Main Buf  Aux Buf\n");
+    result.appendFormat("T Name Active Client Session S  Flags "
+                        "  Format Chn mask  SRate "
+                        "ST Usg CT "
+                        " G db  L dB  R dB  VS dB "
+                        "  Server FrmCnt  FrmRdy F Underruns  Flushed"
+                        "%s\n",
+                        isServerLatencySupported() ? "   Latency" : "");
 }
 
 void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
@@ -515,7 +508,7 @@
     switch (mType) {
     case TYPE_DEFAULT:
     case TYPE_OUTPUT:
-        if (mSharedBuffer.get() != nullptr) {
+        if (isStatic()) {
             trackType = 'S'; // static
         } else {
             trackType = ' '; // normal
@@ -591,21 +584,25 @@
                     ? 'e' /* error */ : ' ' /* identical */;
 
     result.appendFormat("%7s %6u %7u %2s 0x%03X "
-                           "%08X %08X %6u "
-                           "%2u %5.2g %5.2g %5.2g%c "
-                           "%08X %6zu%c %6zu %c %9u%c %7u "
-                           "%08zX %08zX\n",
+                        "%08X %08X %6u "
+                        "%2u %3x %2x "
+                        "%5.2g %5.2g %5.2g %5.2g%c "
+                        "%08X %6zu%c %6zu %c %9u%c %7u",
             active ? "yes" : "no",
-            (mClient == 0) ? getpid_cached : mClient->pid(),
+            (mClient == 0) ? getpid() : mClient->pid(),
             mSessionId,
             getTrackStateString(),
             mCblk->mFlags,
 
             mFormat,
             mChannelMask,
-            mAudioTrackServerProxy->getSampleRate(),
+            sampleRate(),
 
             mStreamType,
+            mAttr.usage,
+            mAttr.content_type,
+
+            20.0 * log10(mFinalVolume),
             20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
             20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
             20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
@@ -618,11 +615,21 @@
             fillingStatus,
             mAudioTrackServerProxy->getUnderrunFrames(),
             nowInUnderrun,
-            (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
-
-            (size_t)mMainBuffer, // use %zX as %p appends 0x
-            (size_t)mAuxBuffer   // use %zX as %p appends 0x
+            (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000
             );
+
+    if (isServerLatencySupported()) {
+        double latencyMs;
+        bool fromTrack;
+        if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
+            // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
+            // or 'k' if estimated from kernel because track frames haven't been presented yet.
+            result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
+        } else {
+            result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
+        }
+    }
+    result.append("\n");
 }
 
 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
@@ -679,6 +686,13 @@
     mAudioTrackServerProxy->setTimestamp(timestamp);
 
     // We do not set drained here, as FastTrack timestamp may not go to very last frame.
+
+    // Compute latency.
+    // TODO: Consider whether the server latency may be passed in by FastMixer
+    // as a constant for all active FastTracks.
+    const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
+    mServerLatencyFromTrack.store(true);
+    mServerLatencyMs.store(latencyMs);
 }
 
 // Don't call for fast tracks; the framesReady() could result in priority inversion
@@ -982,7 +996,7 @@
         // Signal thread to fetch new volume.
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-             Mutex::Autolock _l(thread->mLock);
+            Mutex::Autolock _l(thread->mLock);
             thread->broadcast_l();
         }
     }
@@ -997,6 +1011,23 @@
     return mVolumeHandler->getVolumeShaperState(id);
 }
 
+void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
+{
+    if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
+        mFinalVolume = volume;
+        setMetadataHasChanged();
+    }
+}
+
+void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
+{
+    *backInserter++ = {
+            .usage = mAttr.usage,
+            .content_type = mAttr.content_type,
+            .gain = mFinalVolume,
+    };
+}
+
 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
 {
     if (!isOffloaded() && !isDirect()) {
@@ -1226,7 +1257,7 @@
 //To be called with thread lock held
 void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
         int64_t trackFramesReleased, int64_t sinkFramesWritten,
-        const ExtendedTimestamp &timeStamp) {
+        uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
     //update frame map
     mFrameMap.push(trackFramesReleased, sinkFramesWritten);
 
@@ -1235,6 +1266,7 @@
     // Our timestamps are only updated when the track is on the Thread active list.
     // We need to ensure that tracks are not removed before full drain.
     ExtendedTimestamp local = timeStamp;
+    bool drained = true; // default assume drained, if no server info found
     bool checked = false;
     for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
             i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
@@ -1243,18 +1275,25 @@
             local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
             // check drain state from the latest stage in the pipeline.
             if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
-                mAudioTrackServerProxy->setDrained(
-                        local.mPosition[i] >= mAudioTrackServerProxy->framesReleased());
+                drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
                 checked = true;
             }
         }
     }
-    if (!checked) { // no server info, assume drained.
-        mAudioTrackServerProxy->setDrained(true);
-    }
+
+    mAudioTrackServerProxy->setDrained(drained);
     // Set correction for flushed frames that are not accounted for in released.
     local.mFlushed = mAudioTrackServerProxy->framesFlushed();
     mServerProxy->setTimestamp(local);
+
+    // Compute latency info.
+    const bool useTrackTimestamp = !drained;
+    const double latencyMs = useTrackTimestamp
+            ? local.getOutputServerLatencyMs(sampleRate())
+            : timeStamp.getOutputServerLatencyMs(halSampleRate);
+
+    mServerLatencyFromTrack.store(useTrackTimestamp);
+    mServerLatencyMs.store(latencyMs);
 }
 
 // ----------------------------------------------------------------------------
@@ -1322,7 +1361,7 @@
     mActive = false;
 }
 
-bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
+ssize_t AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
 {
     Buffer *pInBuffer;
     Buffer inBuffer;
@@ -1411,9 +1450,12 @@
                 mBufferQueue.add(pInBuffer);
                 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %zu", this,
                         mThread.unsafe_get(), mBufferQueue.size());
+                // audio data is consumed (stored locally); set frameCount to 0.
+                inBuffer.frameCount = 0;
             } else {
                 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
                         mThread.unsafe_get(), this);
+                // TODO: return error for this.
             }
         }
     }
@@ -1424,7 +1466,22 @@
         stop();
     }
 
-    return outputBufferFull;
+    return frames - inBuffer.frameCount;  // number of frames consumed.
+}
+
+void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
+{
+    std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+    backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
+}
+
+void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
+    {
+        std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+        mTrackMetadatas = metadatas;
+    }
+    // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
+    setMetadataHasChanged();
 }
 
 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
@@ -1474,7 +1531,7 @@
               audio_attributes_t{} /* currently unused for patch track */,
               sampleRate, format, channelMask, frameCount,
               buffer, bufferSize, nullptr /* sharedBuffer */,
-              AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
+              AUDIO_SESSION_NONE, AID_AUDIOSERVER, flags, TYPE_PATCH),
               mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
 {
     uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
@@ -1493,7 +1550,7 @@
 }
 
 status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
-                                                          audio_session_t triggerSession)
+                                                         audio_session_t triggerSession)
 {
     status_t status = Track::start(event, triggerSession);
     if (status != NO_ERROR) {
@@ -1536,9 +1593,11 @@
     status_t status = NO_ERROR;
     static const int32_t kMaxTries = 5;
     int32_t tryCounter = kMaxTries;
+    const size_t originalFrameCount = buffer->mFrameCount;
     do {
         if (status == NOT_ENOUGH_DATA) {
             restartIfDisabled();
+            buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
         }
         status = mProxy->obtainBuffer(buffer, timeOut);
     } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
@@ -1628,24 +1687,27 @@
         mFramesToDrop(0),
         mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
         mRecordBufferConverter(NULL),
-        mFlags(flags)
+        mFlags(flags),
+        mSilenced(false)
 {
     if (mCblk == NULL) {
         return;
     }
 
-    mRecordBufferConverter = new RecordBufferConverter(
-            thread->mChannelMask, thread->mFormat, thread->mSampleRate,
-            channelMask, format, sampleRate);
-    // Check if the RecordBufferConverter construction was successful.
-    // If not, don't continue with construction.
-    //
-    // NOTE: It would be extremely rare that the record track cannot be created
-    // for the current device, but a pending or future device change would make
-    // the record track configuration valid.
-    if (mRecordBufferConverter->initCheck() != NO_ERROR) {
-        ALOGE("RecordTrack unable to create record buffer converter");
-        return;
+    if (!isDirect()) {
+        mRecordBufferConverter = new RecordBufferConverter(
+                thread->mChannelMask, thread->mFormat, thread->mSampleRate,
+                channelMask, format, sampleRate);
+        // Check if the RecordBufferConverter construction was successful.
+        // If not, don't continue with construction.
+        //
+        // NOTE: It would be extremely rare that the record track cannot be created
+        // for the current device, but a pending or future device change would make
+        // the record track configuration valid.
+        if (mRecordBufferConverter->initCheck() != NO_ERROR) {
+            ALOGE("RecordTrack unable to create record buffer converter");
+            return;
+        }
     }
 
     mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
@@ -1656,7 +1718,15 @@
     if (flags & AUDIO_INPUT_FLAG_FAST) {
         ALOG_ASSERT(thread->mFastTrackAvail);
         thread->mFastTrackAvail = false;
+    } else {
+        // TODO: only Normal Record has timestamps (Fast Record does not).
+        mServerLatencySupported = true;
     }
+#ifdef TEE_SINK
+    mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+            + "_" + std::to_string(mId)
+            + "_R");
+#endif
 }
 
 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
@@ -1745,19 +1815,22 @@
 }
 
 
-/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
+void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.append("Active Client Session S  Flags   Format Chn mask  SRate   Server FrmCnt\n");
+    result.appendFormat("Active Client Session S  Flags  "
+                        " Format Chn mask  SRate Source "
+                        " Server FrmCnt FrmRdy Sil%s\n",
+                        isServerLatencySupported() ? "   Latency" : "");
 }
 
 void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
 {
     result.appendFormat("%c%5s %6u %7u %2s 0x%03X "
-            "%08X %08X %6u "
-            "%08X %6zu\n",
+            "%08X %08X %6u %6X "
+            "%08X %6zu %6zu %3c",
             isFastTrack() ? 'F' : ' ',
             active ? "yes" : "no",
-            (mClient == 0) ? getpid_cached : mClient->pid(),
+            (mClient == 0) ? getpid() : mClient->pid(),
             mSessionId,
             getTrackStateString(),
             mCblk->mFlags,
@@ -1765,10 +1838,25 @@
             mFormat,
             mChannelMask,
             mSampleRate,
+            mAttr.source,
 
             mCblk->mServer,
-            mFrameCount
+            mFrameCount,
+            mServerProxy->framesReadySafe(),
+            isSilenced() ? 's' : 'n'
             );
+    if (isServerLatencySupported()) {
+        double latencyMs;
+        bool fromTrack;
+        if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
+            // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
+            // or 'k' if estimated from kernel (usually for debugging).
+            result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
+        } else {
+            result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
+        }
+    }
+    result.append("\n");
 }
 
 void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
@@ -1811,6 +1899,15 @@
         }
     }
     mServerProxy->setTimestamp(local);
+
+    // Compute latency info.
+    const bool useTrackTimestamp = true; // use track unless debugging.
+    const double latencyMs = - (useTrackTimestamp
+            ? local.getOutputServerLatencyMs(sampleRate())
+            : timestamp.getOutputServerLatencyMs(halSampleRate));
+
+    mServerLatencyFromTrack.store(useTrackTimestamp);
+    mServerLatencyMs.store(latencyMs);
 }
 
 status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
@@ -1836,7 +1933,8 @@
     :   RecordTrack(recordThread, NULL,
                 audio_attributes_t{} /* currently unused for patch track */,
                 sampleRate, format, channelMask, frameCount,
-                buffer, bufferSize, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
+                buffer, bufferSize, AUDIO_SESSION_NONE, AID_AUDIOSERVER,
+                flags, TYPE_PATCH),
                 mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
 {
     uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
@@ -1901,16 +1999,17 @@
         audio_format_t format,
         audio_channel_mask_t channelMask,
         audio_session_t sessionId,
+        bool isOut,
         uid_t uid,
         pid_t pid,
         audio_port_handle_t portId)
     :   TrackBase(thread, NULL, attr, sampleRate, format,
                   channelMask, (size_t)0 /* frameCount */,
                   nullptr /* buffer */, (size_t)0 /* bufferSize */,
-                  sessionId, uid, false /* isOut */,
+                  sessionId, uid, isOut,
                   ALLOC_NONE,
                   TYPE_DEFAULT, portId),
-        mPid(pid)
+        mPid(pid), mSilenced(false), mSilencedNotified(false)
 {
 }
 
@@ -1924,7 +2023,7 @@
 }
 
 status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
-                                                        audio_session_t triggerSession __unused)
+                                                    audio_session_t triggerSession __unused)
 {
     return NO_ERROR;
 }
@@ -1955,19 +2054,27 @@
 {
 }
 
-/*static*/ void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
+void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
 {
-    result.append("Client Session   Format Chn mask  SRate\n");
+    result.appendFormat("Client Session   Format Chn mask  SRate Flags %s\n",
+                        isOut() ? "Usg CT": "Source");
 }
 
 void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
 {
-    result.appendFormat("%6u %7u %08X %08X %6u\n",
+    result.appendFormat("%6u %7u %08X %08X %6u 0x%03X ",
             mPid,
             mSessionId,
             mFormat,
             mChannelMask,
-            mSampleRate);
+            mSampleRate,
+            mAttr.flags);
+    if (isOut()) {
+        result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
+    } else {
+        result.appendFormat("%6x", mAttr.source);
+    }
+    result.append("\n");
 }
 
 } // namespace android
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 65571f9..b75e957 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -13,7 +13,6 @@
     $(call include-path-for, audio-utils) \
     frameworks/av/services/audiopolicy/common/include \
     frameworks/av/services/audiopolicy/engine/interface \
-    frameworks/av/services/audiopolicy/utilities
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -22,9 +21,10 @@
     libbinder \
     libaudioclient \
     libhardware_legacy \
-    libserviceutility \
     libaudiopolicymanager \
     libmedia_helper \
+    libmediametrics \
+    libmediautils \
     libeffectsconfig
 
 LOCAL_STATIC_LIBRARIES := \
@@ -60,6 +60,7 @@
     audio_policy_criteria.conf \
 
 LOCAL_C_INCLUDES += frameworks/av/services/audiopolicy/engineconfigurable/include
+LOCAL_C_INCLUDES += frameworks/av/include
 
 LOCAL_SHARED_LIBRARIES += libaudiopolicyengineconfigurable
 
@@ -72,12 +73,12 @@
 LOCAL_C_INCLUDES += \
     frameworks/av/services/audiopolicy/common/include \
     frameworks/av/services/audiopolicy/engine/interface \
-    frameworks/av/services/audiopolicy/utilities
 
 LOCAL_STATIC_LIBRARIES := \
     libaudiopolicycomponents
 
 LOCAL_SHARED_LIBRARIES += libmedia_helper
+LOCAL_SHARED_LIBRARIES += libmediametrics
 
 ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 LOCAL_SHARED_LIBRARIES += libicuuc libxml2
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 7f09e9b..fe49483 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -69,8 +69,12 @@
         API_INPUT_CONCURRENCY_NONE = 0,
         API_INPUT_CONCURRENCY_CALL = (1 << 0),      // Concurrency with a call
         API_INPUT_CONCURRENCY_CAPTURE = (1 << 1),   // Concurrency with another capture
+        API_INPUT_CONCURRENCY_HOTWORD = (1 << 2),   // Concurrency with a hotword
+        API_INPUT_CONCURRENCY_PREEMPT = (1 << 3),   // pre-empted someone
+                // NB: preempt is marked on a successful return, others are on failing calls
+        API_INPUT_CONCURRENCY_LAST = (1 << 4),
 
-        API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_CALL | API_INPUT_CONCURRENCY_CAPTURE),
+        API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_LAST - 1),
     };
 
     typedef uint32_t concurrency_type__mask_t;
@@ -241,6 +245,12 @@
     virtual float    getStreamVolumeDB(
                 audio_stream_type_t stream, int index, audio_devices_t device) = 0;
 
+    virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+                                        audio_format_t *surroundFormats,
+                                        bool *surroundFormatsEnabled,
+                                        bool reported) = 0;
+    virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
+
     virtual void     setRecordSilenced(uid_t uid, bool silenced);
 };
 
@@ -314,11 +324,6 @@
     // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
     virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
 
-    // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
-    // over a telephony device during a phone call.
-    virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0;
-    virtual status_t stopTone() = 0;
-
     // set down link audio volume.
     virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
 
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index e263c0c..b3611c4 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -35,7 +35,7 @@
     $(LOCAL_PATH)/include \
     frameworks/av/services/audiopolicy/common/include \
     frameworks/av/services/audiopolicy \
-    frameworks/av/services/audiopolicy/utilities \
+    $(call include-path-for, audio-utils) \
 
 ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
similarity index 67%
rename from services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
rename to services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
index e0037fc..9f3fc0c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
@@ -19,26 +19,27 @@
 namespace android {
 
 /**
- * Interface for input descriptors to implement so dependent audio sessions can query information
- * about their context
+ * Interface for I/O descriptors to implement so information about their context
+ * can be queried and updated.
  */
-class AudioSessionInfoProvider
+class AudioIODescriptorInterface
 {
 public:
-    virtual ~AudioSessionInfoProvider() {};
+    virtual ~AudioIODescriptorInterface() {};
 
     virtual audio_config_base_t getConfig() const = 0;
 
     virtual audio_patch_handle_t getPatchHandle() const = 0;
 
+    virtual void setPatchHandle(audio_patch_handle_t handle) = 0;
 };
 
-class AudioSessionInfoUpdateListener
+class AudioIODescriptorUpdateListener
 {
 public:
-    virtual ~AudioSessionInfoUpdateListener() {};
+    virtual ~AudioIODescriptorUpdateListener() {};
 
-    virtual void onSessionInfoUpdate() const = 0;;
+    virtual void onIODescriptorUpdate() const = 0;
 };
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b25d6d4..85f3b86 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -16,9 +16,9 @@
 
 #pragma once
 
+#include "AudioIODescriptorInterface.h"
 #include "AudioPort.h"
 #include "AudioSession.h"
-#include "AudioSessionInfoProvider.h"
 #include <utils/Errors.h>
 #include <system/audio.h>
 #include <utils/SortedVector.h>
@@ -31,7 +31,7 @@
 
 // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
 // and keep track of the usage of this input.
-class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
+class AudioInputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
 {
 public:
     explicit AudioInputDescriptor(const sp<IOProfile>& profile,
@@ -67,11 +67,10 @@
     size_t getAudioSessionCount(bool activeOnly) const;
     audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
-    // implementation of AudioSessionInfoProvider
-    virtual audio_config_base_t getConfig() const;
-    virtual audio_patch_handle_t getPatchHandle() const;
-
-    void setPatchHandle(audio_patch_handle_t handle);
+    // implementation of AudioIODescriptorInterface
+    audio_config_base_t getConfig() const override;
+    audio_patch_handle_t getPatchHandle() const override;
+    void setPatchHandle(audio_patch_handle_t handle) override;
 
     status_t open(const audio_config_t *config,
                   audio_devices_t device,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 5e5d38b..57d1cfa 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -24,6 +24,7 @@
 #include <utils/Timers.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
+#include "AudioIODescriptorInterface.h"
 #include "AudioSourceDescriptor.h"
 
 namespace android {
@@ -35,7 +36,7 @@
 
 // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
 // and keep track of the usage of this output by each audio stream type.
-class AudioOutputDescriptor: public AudioPortConfig
+class AudioOutputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
 {
 public:
     AudioOutputDescriptor(const sp<AudioPort>& port,
@@ -73,8 +74,10 @@
 
     audio_module_handle_t getModuleHandle() const;
 
-    audio_patch_handle_t getPatchHandle() const { return mPatchHandle; };
-    void setPatchHandle(audio_patch_handle_t handle) { mPatchHandle = handle; };
+    // implementation of AudioIODescriptorInterface
+    audio_config_base_t getConfig() const override;
+    audio_patch_handle_t getPatchHandle() const override;
+    void setPatchHandle(audio_patch_handle_t handle) override;
 
     sp<AudioPort>       mPort;
     audio_devices_t mDevice;                   // current device this output is routed to
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index 43f6ed6..f861b95 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -38,16 +38,24 @@
     AudioPolicyConfig(HwModuleCollection &hwModules,
                       DeviceVector &availableOutputDevices,
                       DeviceVector &availableInputDevices,
-                      sp<DeviceDescriptor> &defaultOutputDevices,
+                      sp<DeviceDescriptor> &defaultOutputDevice,
                       VolumeCurvesCollection *volumes = nullptr)
         : mHwModules(hwModules),
           mAvailableOutputDevices(availableOutputDevices),
           mAvailableInputDevices(availableInputDevices),
-          mDefaultOutputDevices(defaultOutputDevices),
+          mDefaultOutputDevice(defaultOutputDevice),
           mVolumeCurves(volumes),
           mIsSpeakerDrcEnabled(false)
     {}
 
+    const std::string& getSource() const {
+        return mSource;
+    }
+
+    void setSource(const std::string& file) {
+        mSource = file;
+    }
+
     void setVolumes(const VolumeCurvesCollection &volumes)
     {
         if (mVolumeCurves != nullptr) {
@@ -100,46 +108,52 @@
 
     void setDefaultOutputDevice(const sp<DeviceDescriptor> &defaultDevice)
     {
-        mDefaultOutputDevices = defaultDevice;
+        mDefaultOutputDevice = defaultDevice;
     }
 
-    const sp<DeviceDescriptor> &getDefaultOutputDevice() const { return mDefaultOutputDevices; }
+    const sp<DeviceDescriptor> &getDefaultOutputDevice() const { return mDefaultOutputDevice; }
 
     void setDefault(void)
     {
-        mDefaultOutputDevices = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
-        sp<HwModule> module;
+        mSource = "AudioPolicyConfig::setDefault";
+        mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
+        mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic());
         sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
-        mAvailableOutputDevices.add(mDefaultOutputDevices);
+        defaultInputDevice->addAudioProfile(AudioProfile::createFullDynamic());
+        sp<AudioProfile> micProfile = new AudioProfile(
+                AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000);
+        defaultInputDevice->addAudioProfile(micProfile);
+        mAvailableOutputDevices.add(mDefaultOutputDevice);
         mAvailableInputDevices.add(defaultInputDevice);
 
-        module = new HwModule(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+        sp<HwModule> module = new HwModule(AUDIO_HARDWARE_MODULE_ID_PRIMARY, 2 /*halVersionMajor*/);
+        mHwModules.add(module);
+        mDefaultOutputDevice->attach(module);
+        defaultInputDevice->attach(module);
 
         sp<OutputProfile> outProfile;
         outProfile = new OutputProfile(String8("primary"));
         outProfile->attach(module);
         outProfile->addAudioProfile(
                 new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 44100));
-        outProfile->addSupportedDevice(mDefaultOutputDevices);
+        outProfile->addSupportedDevice(mDefaultOutputDevice);
         outProfile->setFlags(AUDIO_OUTPUT_FLAG_PRIMARY);
         module->addOutputProfile(outProfile);
 
         sp<InputProfile> inProfile;
         inProfile = new InputProfile(String8("primary"));
         inProfile->attach(module);
-        inProfile->addAudioProfile(
-                new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000));
+        inProfile->addAudioProfile(micProfile);
         inProfile->addSupportedDevice(defaultInputDevice);
         module->addInputProfile(inProfile);
-
-        mHwModules.add(module);
     }
 
 private:
+    std::string mSource;
     HwModuleCollection &mHwModules; /**< Collection of Module, with Profiles, i.e. Mix Ports. */
     DeviceVector &mAvailableOutputDevices;
     DeviceVector &mAvailableInputDevices;
-    sp<DeviceDescriptor> &mDefaultOutputDevices;
+    sp<DeviceDescriptor> &mDefaultOutputDevice;
     VolumeCurvesCollection *mVolumeCurves;
     // TODO: remove when legacy conf file is removed. true on devices that use DRC on the
     // DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index caf3c02..bd7517f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -52,7 +52,7 @@
     void setGains(const AudioGainCollection &gains) { mGains = gains; }
     const AudioGainCollection &getGains() const { return mGains; }
 
-    void setFlags(uint32_t flags)
+    virtual void setFlags(uint32_t flags)
     {
         //force direct flag if offload flag is set: offloading implies a direct output stream
         // and all common behaviors are driven by checking only the direct flag
@@ -153,9 +153,6 @@
 class AudioPortConfig : public virtual RefBase
 {
 public:
-    AudioPortConfig();
-    virtual ~AudioPortConfig() {}
-
     status_t applyAudioPortConfig(const struct audio_port_config *config,
                                   struct audio_port_config *backupConfig = NULL);
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -165,10 +162,11 @@
         return (other != 0) &&
                 (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
     }
-    uint32_t mSamplingRate;
-    audio_format_t mFormat;
-    audio_channel_mask_t mChannelMask;
-    struct audio_gain_config mGain;
+    unsigned int mSamplingRate = 0u;
+    audio_format_t mFormat = AUDIO_FORMAT_INVALID;
+    audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+    struct audio_gain_config mGain = { .index = -1 };
+    union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
 };
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
index 8741c66..4226ff2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
@@ -36,6 +36,8 @@
 class AudioProfile : public virtual RefBase
 {
 public:
+    static sp<AudioProfile> createFullDynamic();
+
     AudioProfile(audio_format_t format,
                  audio_channel_mask_t channelMasks,
                  uint32_t samplingRate) :
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index dd5247d..53e6ec9 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -23,13 +23,13 @@
 #include <utils/KeyedVector.h>
 #include <media/AudioPolicy.h>
 #include <media/IAudioPolicyServiceClient.h>
-#include "AudioSessionInfoProvider.h"
+#include "AudioIODescriptorInterface.h"
 
 namespace android {
 
 class AudioPolicyClientInterface;
 
-class AudioSession : public RefBase, public AudioSessionInfoUpdateListener
+class AudioSession : public RefBase, public AudioIODescriptorUpdateListener
 {
 public:
     AudioSession(audio_session_t session,
@@ -63,9 +63,9 @@
     uint32_t changeOpenCount(int delta);
     uint32_t changeActiveCount(int delta);
 
-    void setInfoProvider(AudioSessionInfoProvider *provider);
-    // implementation of AudioSessionInfoUpdateListener
-    virtual void onSessionInfoUpdate() const;
+    void setInfoProvider(AudioIODescriptorInterface *provider);
+    // implementation of AudioIODescriptorUpdateListener
+    virtual void onIODescriptorUpdate() const;
 
 private:
     record_client_info_t mRecordClientInfo;
@@ -77,17 +77,17 @@
     uint32_t  mActiveCount;
     AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
     AudioPolicyClientInterface* mClientInterface;
-    const AudioSessionInfoProvider* mInfoProvider;
+    const AudioIODescriptorInterface* mInfoProvider;
 };
 
 class AudioSessionCollection :
     public DefaultKeyedVector<audio_session_t, sp<AudioSession> >,
-    public AudioSessionInfoUpdateListener
+    public AudioIODescriptorUpdateListener
 {
 public:
     status_t addSession(audio_session_t session,
                              const sp<AudioSession>& audioSession,
-                             AudioSessionInfoProvider *provider);
+                             AudioIODescriptorInterface *provider);
 
     status_t removeSession(audio_session_t session);
 
@@ -99,8 +99,8 @@
     bool isSourceActive(audio_source_t source) const;
     audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
-    // implementation of AudioSessionInfoUpdateListener
-    virtual void onSessionInfoUpdate() const;
+    // implementation of AudioIODescriptorUpdateListener
+    virtual void onIODescriptorUpdate() const;
 
     status_t dump(int fd, int spaces) const;
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 92a4c3e..2325e4f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -76,11 +76,12 @@
 
     audio_devices_t types() const { return mDeviceTypes; }
 
-    sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8& address) const;
-    DeviceVector getDevicesFromType(audio_devices_t types) const;
+    // If 'address' is empty, a device with a non-empty address may be returned
+    // if there is no device with the specified 'type' and empty address.
+    sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address) const;
+    DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
     sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
     sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
-    DeviceVector getDevicesFromTypeAddr(audio_devices_t type, const String8& address) const;
 
     audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index cb9f49e..05cfc31 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -107,7 +107,7 @@
     sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t device,
                                              const char *device_address,
                                              const char *device_name,
-                                             bool matchAdress = true) const;
+                                             bool matchAddress = true) const;
 
     status_t dump(int fd) const;
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 24fe7cb..67ac9bc 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -43,6 +43,20 @@
     // For a Profile aka MixPort, tag name and name are equivalent.
     virtual const String8 getTagName() const { return getName(); }
 
+    // FIXME: this is needed because shared MMAP stream clients use the same audio session.
+    // Once capture clients are tracked individually and not per session this can be removed
+    // MMAP no IRQ input streams do not have the default limitation of one active client
+    // max as they can be used in shared mode by the same application.
+    // NOTE: this works for explicit values set in audio_policy_configuration.xml because
+    // flags are parsed before maxActiveCount by the serializer.
+    void setFlags(uint32_t flags) override
+    {
+        AudioPort::setFlags(flags);
+        if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
+            maxActiveCount = 0;
+        }
+    }
+
     // This method is used for input and direct output, and is not used for other output.
     // If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
     // For input, flags is interpreted as audio_input_flags_t.
@@ -55,7 +69,9 @@
                              audio_format_t *updatedFormat,
                              audio_channel_mask_t channelMask,
                              audio_channel_mask_t *updatedChannelMask,
-                             uint32_t flags) const;
+                             // FIXME parameter type
+                             uint32_t flags,
+                             bool exactMatchRequiredForInputFlags = false) const;
 
     void dump(int fd);
     void log();
diff --git a/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
index fc2c273..32b4440 100644
--- a/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
+++ b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
@@ -24,6 +24,7 @@
 namespace android {
 
 class DeviceDescriptor;
+class DeviceVector;
 
 class SessionRoute : public RefBase
 {
@@ -54,7 +55,7 @@
 
     void log(const char* prefix);
 
-    bool isActive() {
+    bool isActiveOrChanged() {
         return (mDeviceDescriptor != 0) && (mChanged || (mActivityCount > 0));
     }
 
@@ -96,9 +97,10 @@
 
     int incRouteActivity(audio_session_t session);
     int decRouteActivity(audio_session_t session);
-    bool hasRouteChanged(audio_session_t session); // also clears the changed flag
+    bool getAndClearRouteChanged(audio_session_t session); // also clears the changed flag
     void log(const char* caption);
-
+    audio_devices_t getActiveDeviceForStream(audio_stream_type_t streamType,
+                                             const DeviceVector& availableDevices);
     // Specify an Output(Sink) route by passing SessionRoute::SOURCE_TYPE_NA in the
     // source argument.
     // Specify an Input(Source) rout by passing SessionRoute::AUDIO_STREAM_DEFAULT
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 92332fb..f0144db 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -164,7 +164,7 @@
 
 status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
                          const sp<AudioSession>& audioSession) {
-    return mSessions.addSession(session, audioSession, /*AudioSessionInfoProvider*/this);
+    return mSessions.addSession(session, audioSession, /*AudioIODescriptorInterface*/this);
 }
 
 status_t AudioInputDescriptor::removeAudioSession(audio_session_t session) {
@@ -179,7 +179,7 @@
 void AudioInputDescriptor::setPatchHandle(audio_patch_handle_t handle)
 {
     mPatchHandle = handle;
-    mSessions.onSessionInfoUpdate();
+    mSessions.onIODescriptorUpdate();
 }
 
 audio_config_base_t AudioInputDescriptor::getConfig() const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 294a2a6..3c69de5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -55,11 +55,28 @@
     }
 }
 
+audio_config_base_t AudioOutputDescriptor::getConfig() const
+{
+    const audio_config_base_t config = { .sample_rate = mSamplingRate, .channel_mask = mChannelMask,
+            .format = mFormat };
+    return config;
+}
+
 audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
 {
     return mPort.get() != nullptr ? mPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
 }
 
+audio_patch_handle_t AudioOutputDescriptor::getPatchHandle() const
+{
+    return mPatchHandle;
+}
+
+void AudioOutputDescriptor::setPatchHandle(audio_patch_handle_t handle)
+{
+    mPatchHandle = handle;
+}
+
 audio_port_handle_t AudioOutputDescriptor::getId() const
 {
     return mId;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index a9fe48d..e78e121 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -34,51 +34,32 @@
 {
 }
 
+static String8 dumpPatchEndpoints(
+        int spaces, const char *prefix, int count, const audio_port_config *cfgs)
+{
+    String8 result;
+    for (int i = 0; i < count; ++i) {
+        const audio_port_config &cfg = cfgs[i];
+        result.appendFormat("%*s  [%s %d] ", spaces, "", prefix, i + 1);
+        if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
+            std::string device;
+            deviceToString(cfg.ext.device.type, device);
+            result.appendFormat("Device ID %d %s", cfg.id, device.c_str());
+        } else {
+            result.appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+        }
+        result.append("\n");
+    }
+    return result;
+}
+
 status_t AudioPatch::dump(int fd, int spaces, int index) const
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
     String8 result;
-
-    snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
-    result.append(buffer);
-    for (size_t i = 0; i < mPatch.num_sources; i++) {
-        if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
-            std::string device;
-            deviceToString(mPatch.sources[i].ext.device.type, device);
-            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sources[i].id,
-                     device.c_str());
-        } else {
-            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
-                     mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
-        }
-        result.append(buffer);
-    }
-    snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
-    result.append(buffer);
-    for (size_t i = 0; i < mPatch.num_sinks; i++) {
-        if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
-            std::string device;
-            deviceToString(mPatch.sinks[i].ext.device.type, device);
-            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sinks[i].id,
-                     device.c_str());
-        } else {
-            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
-                     mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
-        }
-        result.append(buffer);
-    }
-
+    result.appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
+            spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+    result.append(dumpPatchEndpoints(spaces, "src ", mPatch.num_sources, mPatch.sources));
+    result.append(dumpPatchEndpoints(spaces, "sink", mPatch.num_sinks, mPatch.sinks));
     write(fd, result.string(), result.size());
     return NO_ERROR;
 }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index d85562e..3fe37ab 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -386,22 +386,12 @@
 
 // --- AudioPortConfig class implementation
 
-AudioPortConfig::AudioPortConfig()
-{
-    mSamplingRate = 0;
-    mChannelMask = AUDIO_CHANNEL_NONE;
-    mFormat = AUDIO_FORMAT_INVALID;
-    memset(&mGain, 0, sizeof(struct audio_gain_config));
-    mGain.index = -1;
-}
-
 status_t AudioPortConfig::applyAudioPortConfig(const struct audio_port_config *config,
                                                struct audio_port_config *backupConfig)
 {
-    struct audio_port_config localBackupConfig;
+    struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
     status_t status = NO_ERROR;
 
-    localBackupConfig.config_mask = config->config_mask;
     toAudioPortConfig(&localBackupConfig);
 
     sp<AudioPort> audioport = getAudioPort();
@@ -425,6 +415,9 @@
     if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
         mGain = config->gain;
     }
+    if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+        mFlags = config->flags;
+    }
 
 exit:
     if (status != NO_ERROR) {
@@ -436,33 +429,38 @@
     return status;
 }
 
+namespace {
+
+template<typename T>
+void updateField(
+        const T& portConfigField, T audio_port_config::*port_config_field,
+        struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
+        unsigned int configMask, T defaultValue)
+{
+    if (dstConfig->config_mask & configMask) {
+        if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
+            dstConfig->*port_config_field = srcConfig->*port_config_field;
+        } else {
+            dstConfig->*port_config_field = portConfigField;
+        }
+    } else {
+        dstConfig->*port_config_field = defaultValue;
+    }
+}
+
+} // namespace
+
 void AudioPortConfig::toAudioPortConfig(struct audio_port_config *dstConfig,
                                         const struct audio_port_config *srcConfig) const
 {
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        dstConfig->sample_rate = mSamplingRate;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE)) {
-            dstConfig->sample_rate = srcConfig->sample_rate;
-        }
-    } else {
-        dstConfig->sample_rate = 0;
-    }
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        dstConfig->channel_mask = mChannelMask;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK)) {
-            dstConfig->channel_mask = srcConfig->channel_mask;
-        }
-    } else {
-        dstConfig->channel_mask = AUDIO_CHANNEL_NONE;
-    }
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        dstConfig->format = mFormat;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT)) {
-            dstConfig->format = srcConfig->format;
-        }
-    } else {
-        dstConfig->format = AUDIO_FORMAT_INVALID;
-    }
+    updateField(mSamplingRate, &audio_port_config::sample_rate,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
+    updateField(mChannelMask, &audio_port_config::channel_mask,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
+            (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
+    updateField(mFormat, &audio_port_config::format,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
+
     sp<AudioPort> audioport = getAudioPort();
     if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
         dstConfig->gain = mGain;
@@ -478,6 +476,9 @@
     } else {
         dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
     }
+
+    updateField(mFlags, &audio_port_config::flags,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
 }
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index fd6fc1c..26af9b4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -28,6 +28,23 @@
 
 namespace android {
 
+static AudioProfile* createFullDynamicImpl()
+{
+    AudioProfile* dynamicProfile = new AudioProfile(gDynamicFormat,
+            ChannelsVector(), SampleRateVector());
+    dynamicProfile->setDynamicFormat(true);
+    dynamicProfile->setDynamicChannels(true);
+    dynamicProfile->setDynamicRate(true);
+    return dynamicProfile;
+}
+
+// static
+sp<AudioProfile> AudioProfile::createFullDynamic()
+{
+    static sp<AudioProfile> dynamicProfile = createFullDynamicImpl();
+    return dynamicProfile;
+}
+
 status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
                                   audio_format_t format) const
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index 7cda46b..91dee35 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -86,7 +86,7 @@
         }
 
         // Recording configuration callback:
-        const AudioSessionInfoProvider* provider = mInfoProvider;
+        const AudioIODescriptorInterface* provider = mInfoProvider;
         const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
                 AUDIO_CONFIG_BASE_INITIALIZER;
         const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
@@ -114,16 +114,16 @@
     return false;
 }
 
-void AudioSession::setInfoProvider(AudioSessionInfoProvider *provider)
+void AudioSession::setInfoProvider(AudioIODescriptorInterface *provider)
 {
     mInfoProvider = provider;
 }
 
-void AudioSession::onSessionInfoUpdate() const
+void AudioSession::onIODescriptorUpdate() const
 {
     if (mActiveCount > 0) {
         // resend the callback after requerying the informations from the info provider
-        const AudioSessionInfoProvider* provider = mInfoProvider;
+        const AudioIODescriptorInterface* provider = mInfoProvider;
         const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
                 AUDIO_CONFIG_BASE_INITIALIZER;
         const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
@@ -170,7 +170,7 @@
 
 status_t AudioSessionCollection::addSession(audio_session_t session,
                                          const sp<AudioSession>& audioSession,
-                                         AudioSessionInfoProvider *provider)
+                                         AudioIODescriptorInterface *provider)
 {
     ssize_t index = indexOfKey(session);
 
@@ -271,10 +271,10 @@
     return source;
 }
 
-void AudioSessionCollection::onSessionInfoUpdate() const
+void AudioSessionCollection::onIODescriptorUpdate() const
 {
     for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->onSessionInfoUpdate();
+        valueAt(i)->onIODescriptorUpdate();
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index 1e105f5..19eac26 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -412,6 +412,7 @@
     free(data);
 
     ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
+    config.setSource(path);
 
     return NO_ERROR;
 }
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 3b1e751..d3cc8b9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "APM::Devices"
 //#define LOG_NDEBUG 0
 
+#include <audio_utils/string.h>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
 #include "AudioGain.h"
@@ -144,8 +145,8 @@
             }
         }
     }
-    ALOGV("DeviceVector::getDevice() for type %08x address %s found %p",
-          type, address.string(), device.get());
+    ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p",
+            __func__, type, address.string(), device.get());
     return device;
 }
 
@@ -159,7 +160,7 @@
     return nullptr;
 }
 
-DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
+DeviceVector DeviceVector::getDevicesFromTypeMask(audio_devices_t type) const
 {
     DeviceVector devices;
     bool isOutput = audio_is_output_devices(type);
@@ -170,20 +171,8 @@
         if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
             devices.add(itemAt(i));
             type &= ~curType;
-            ALOGV("DeviceVector::getDevicesFromType() for type %x found %p",
-                  itemAt(i)->type(), itemAt(i).get());
-        }
-    }
-    return devices;
-}
-
-DeviceVector DeviceVector::getDevicesFromTypeAddr(
-        audio_devices_t type, const String8& address) const
-{
-    DeviceVector devices;
-    for (const auto& device : *this) {
-        if (device->type() == type && device->mAddress == address) {
-            devices.add(device);
+            ALOGV("DeviceVector::%s() for type %08x found %p",
+                    __func__, itemAt(i)->type(), itemAt(i).get());
         }
     }
     return devices;
@@ -247,18 +236,18 @@
     // ALOG_ASSERT(mModule != NULL);
     dstConfig->ext.device.hw_module =
             mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
-    strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
+    (void)audio_utils_strlcpy_zerofill(dstConfig->ext.device.address, mAddress.string());
 }
 
 void DeviceDescriptor::toAudioPort(struct audio_port *port) const
 {
-    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %x", mId, mDeviceType);
+    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %08x", mId, mDeviceType);
     AudioPort::toAudioPort(port);
     port->id = mId;
     toAudioPortConfig(&port->active_config);
     port->ext.device.type = mDeviceType;
     port->ext.device.hw_module = mModule->getHandle();
-    strncpy(port->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
+    (void)audio_utils_strlcpy_zerofill(port->ext.device.address, mAddress.string());
 }
 
 void DeviceDescriptor::importAudioPort(const sp<AudioPort>& port, bool force) {
@@ -304,7 +293,7 @@
 {
     std::string device;
     deviceToString(mDeviceType, device);
-    ALOGI("Device id:%d type:0x%X:%s, addr:%s", mId,  mDeviceType, device.c_str(),
+    ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId,  mDeviceType, device.c_str(),
           mAddress.string());
 
     AudioPort::log("  ");
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index aef7dbe..dcc0ec8 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -278,9 +278,10 @@
 sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
                                                              const char *device_address,
                                                              const char *device_name,
-                                                             bool matchAdress) const
+                                                             bool matchAddress) const
 {
-    String8 address = (device_address == nullptr) ? String8("") : String8(device_address);
+    String8 address = (device_address == nullptr || !matchAddress) ?
+            String8("") : String8(device_address);
     // handle legacy remote submix case where the address was not always specified
     if (device_distinguishes_on_address(device) && (address.length() == 0)) {
         address = String8("0");
@@ -288,15 +289,9 @@
 
     for (const auto& hwModule : *this) {
         DeviceVector declaredDevices = hwModule->getDeclaredDevices();
-        DeviceVector deviceList = declaredDevices.getDevicesFromTypeAddr(device, address);
-        if (!deviceList.isEmpty()) {
-            return deviceList.itemAt(0);
-        }
-        if (!matchAdress) {
-            deviceList = declaredDevices.getDevicesFromType(device);
-            if (!deviceList.isEmpty()) {
-                return deviceList.itemAt(0);
-            }
+        sp<DeviceDescriptor> deviceDesc = declaredDevices.getDevice(device, address);
+        if (deviceDesc) {
+            return deviceDesc;
         }
     }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 69dd06b..fbc2384 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -35,7 +35,9 @@
                                     audio_format_t *updatedFormat,
                                     audio_channel_mask_t channelMask,
                                     audio_channel_mask_t *updatedChannelMask,
-                                    uint32_t flags) const
+                                    // FIXME type punning here
+                                    uint32_t flags,
+                                    bool exactMatchRequiredForInputFlags) const
 {
     const bool isPlaybackThread =
             getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SOURCE;
@@ -90,7 +92,7 @@
     // An existing normal stream is compatible with a fast track request,
     // but the fast request will be denied by AudioFlinger and converted to normal track.
     if (isRecordThread && ((getFlags() ^ flags) &
-            ~AUDIO_INPUT_FLAG_FAST)) {
+            ~(exactMatchRequiredForInputFlags ? AUDIO_INPUT_FLAG_NONE : AUDIO_INPUT_FLAG_FAST))) {
         return false;
     }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index a253113..8008a7c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -242,12 +242,7 @@
     AudioProfileTraits::Collection profiles;
     deserializeCollection<AudioProfileTraits>(doc, child, profiles, NULL);
     if (profiles.isEmpty()) {
-        sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
-                                                            ChannelsVector(), SampleRateVector());
-        dynamicProfile->setDynamicFormat(true);
-        dynamicProfile->setDynamicChannels(true);
-        dynamicProfile->setDynamicRate(true);
-        profiles.add(dynamicProfile);
+        profiles.add(AudioProfile::createFullDynamic());
     }
     mixPort->setAudioProfiles(profiles);
 
@@ -328,12 +323,7 @@
     AudioProfileTraits::Collection profiles;
     deserializeCollection<AudioProfileTraits>(doc, root, profiles, NULL);
     if (profiles.isEmpty()) {
-        sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
-                                                            ChannelsVector(), SampleRateVector());
-        dynamicProfile->setDynamicFormat(true);
-        dynamicProfile->setDynamicChannels(true);
-        dynamicProfile->setDynamicRate(true);
-        profiles.add(dynamicProfile);
+        profiles.add(AudioProfile::createFullDynamic());
     }
     deviceDesc->setAudioProfiles(profiles);
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
index 689f4e6..38ab560 100644
--- a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -40,7 +40,7 @@
     return indexOfKey(session) >= 0 && valueFor(session)->mDeviceDescriptor != 0;
 }
 
-bool SessionRouteMap::hasRouteChanged(audio_session_t session)
+bool SessionRouteMap::getAndClearRouteChanged(audio_session_t session)
 {
     if (indexOfKey(session) >= 0) {
         if (valueFor(session)->mChanged) {
@@ -82,7 +82,7 @@
 void SessionRouteMap::log(const char* caption)
 {
     ALOGI("%s ----", caption);
-    for(size_t index = 0; index < size(); index++) {
+    for (size_t index = 0; index < size(); index++) {
         valueAt(index)->log("  ");
     }
 }
@@ -104,9 +104,7 @@
     sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
 
     if (route != 0) {
-        if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
-                ((route->mDeviceDescriptor != 0) &&
-                 ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) {
+        if (descriptor != 0 || route->mDeviceDescriptor != 0) {
             route->mChanged = true;
         }
         route->mRefCount++;
@@ -114,11 +112,29 @@
     } else {
         route = new SessionRoute(session, streamType, source, descriptor, uid);
         route->mRefCount++;
-        add(session, route);
         if (descriptor != 0) {
             route->mChanged = true;
         }
+        add(session, route);
     }
 }
 
+audio_devices_t SessionRouteMap::getActiveDeviceForStream(audio_stream_type_t streamType,
+                                                          const DeviceVector& availableDevices)
+{
+    audio_devices_t device = AUDIO_DEVICE_NONE;
+
+    for (size_t index = 0; index < size(); index++) {
+        sp<SessionRoute> route = valueAt(index);
+        if (streamType == route->mStreamType && route->isActiveOrChanged()
+                && route->mDeviceDescriptor != 0) {
+            device = route->mDeviceDescriptor->type();
+            if (!availableDevices.getDevicesFromTypeMask(device).isEmpty()) {
+                break;
+            }
+        }
+    }
+    return device;
+}
+
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
index ec861c1..ac3f1bc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -29,6 +29,13 @@
     size_t nbCurvePoints = mCurvePoints.size();
     // the volume index in the UI is relative to the min and max volume indices for this stream
     int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
+    if (indexInUi < volIndexMin) {
+        ALOGV("VOLUME remapping index from %d to min index %d", indexInUi, volIndexMin);
+        indexInUi = volIndexMin;
+    } else if (indexInUi > volIndexMax) {
+        ALOGV("VOLUME remapping index from %d to max index %d", indexInUi, volIndexMax);
+        indexInUi = volIndexMax;
+    }
     int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
 
     // Where would this volume index been inserted in the curve point
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index a75f1cb..9381f1f 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -185,6 +185,9 @@
         <!-- Hearing aid Audio HAL -->
         <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
 
+        <!-- MSD Audio HAL (optional) -->
+        <xi:include href="msd_audio_policy_configuration.xml"/>
+
     </modules>
     <!-- End of Modules section -->
 
diff --git a/services/audiopolicy/config/msd_audio_policy_configuration.xml b/services/audiopolicy/config/msd_audio_policy_configuration.xml
new file mode 100644
index 0000000..a84117e
--- /dev/null
+++ b/services/audiopolicy/config/msd_audio_policy_configuration.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2017-2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Multi Stream Decoder Audio Policy Configuration file -->
+<module name="msd" halVersion="2.0">
+    <attachedDevices>
+        <item>MS12 Input</item>
+        <item>MS12 Output</item>
+    </attachedDevices>
+    <mixPorts>
+        <mixPort name="ms12 input" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="ms12 compressed input" role="source"
+                flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+            <profile name="" format="AUDIO_FORMAT_AC3"
+                     samplingRates="32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1"/>
+            <profile name="" format="AUDIO_FORMAT_E_AC3"
+                     samplingRates="32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+            <profile name="" format="AUDIO_FORMAT_AC4"
+                     samplingRates="32000,44100,48000"
+                     channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+        </mixPort>
+        <mixPort name="ms12 output" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+            <profile name="" format="AUDIO_FORMAT_AC3"
+                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+            <profile name="" format="AUDIO_FORMAT_E_AC3"
+                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+        </mixPort>
+   </mixPorts>
+   <devicePorts>
+       <devicePort tagName="MS12 Input" type="AUDIO_DEVICE_OUT_BUS"  role="sink">
+           <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                    samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+       </devicePort>
+       <devicePort tagName="MS12 Output" type="AUDIO_DEVICE_IN_BUS"  role="source">
+           <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                    samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+        </devicePort>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="MS12 Input" sources="ms12 input,ms12 compressed input"/>
+        <route type="mix" sink="ms12 output" sources="MS12 Output"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index 36e0f42..b128a38 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -10,7 +10,6 @@
     $(LOCAL_PATH)/include \
     frameworks/av/services/audiopolicy/engineconfigurable/include \
     frameworks/av/services/audiopolicy/engineconfigurable/interface \
-    frameworks/av/services/audiopolicy/utilities/convert \
 
 LOCAL_SRC_FILES:= ParameterManagerWrapper.cpp
 
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 08bcf4d..941119b 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -148,7 +148,8 @@
     case AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND:
         if (config != AUDIO_POLICY_FORCE_NONE &&
                 config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER &&
-                config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+                config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS &&
+                config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
             ALOGW("setForceUse() invalid config %d for ENCODED_SURROUND", config);
             return BAD_VALUE;
         }
@@ -407,8 +408,7 @@
 
     case STRATEGY_SONIFICATION:
 
-        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
-        // handleIncallSonification().
+        // If incall, just select the STRATEGY_PHONE device
         if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
             device = getDeviceForStrategyInt(
                     STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
@@ -452,6 +452,12 @@
             }
             // Use both Bluetooth SCO and phone default output when ringing in normal mode
             if (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) {
+                if ((strategy == STRATEGY_SONIFICATION) &&
+                        (device & AUDIO_DEVICE_OUT_SPEAKER) &&
+                        (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
+                    device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+                    device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+                }
                 if (device2 != AUDIO_DEVICE_NONE) {
                     device |= device2;
                     break;
@@ -475,7 +481,7 @@
                 }
             }
             availableOutputDevices =
-                    availableOutputDevices.getDevicesFromType(availableOutputDevicesType);
+                    availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
             if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
                     outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
                 return getDeviceForStrategyInt(
@@ -613,6 +619,23 @@
 
     uint32_t device = AUDIO_DEVICE_NONE;
 
+    // when a call is active, force device selection to match source VOICE_COMMUNICATION
+    // for most other input sources to avoid rerouting call TX audio
+    if (isInCall()) {
+        switch (inputSource) {
+        case AUDIO_SOURCE_DEFAULT:
+        case AUDIO_SOURCE_MIC:
+        case AUDIO_SOURCE_VOICE_RECOGNITION:
+        case AUDIO_SOURCE_UNPROCESSED:
+        case AUDIO_SOURCE_HOTWORD:
+        case AUDIO_SOURCE_CAMCORDER:
+            inputSource = AUDIO_SOURCE_VOICE_COMMUNICATION;
+            break;
+        default:
+            break;
+        }
+    }
+
     switch (inputSource) {
     case AUDIO_SOURCE_VOICE_UPLINK:
       if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 256ef6a..28d2ea6 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -26,7 +26,8 @@
 
 #define AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH 128
 #define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
-#define AUDIO_POLICY_A2DP_OFFLOAD_XML_CONFIG_FILE_NAME "audio_policy_a2dp_offload_configuration.xml"
+#define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \
+        "audio_policy_configuration_a2dp_offload_disabled.xml"
 
 #include <inttypes.h>
 #include <math.h>
@@ -37,6 +38,8 @@
 #include <utils/Log.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicyHelper.h>
+#include <media/PatchBuilder.h>
+#include <private/android_filesystem_config.h>
 #include <soundtrigger/SoundTrigger.h>
 #include <system/audio.h>
 #include <audio_policy_conf.h>
@@ -59,6 +62,26 @@
 // media / notification / system volume.
 constexpr float IN_CALL_EARPIECE_HEADROOM_DB = 3.f;
 
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+// Array of all surround formats.
+static const audio_format_t SURROUND_FORMATS[] = {
+    AUDIO_FORMAT_AC3,
+    AUDIO_FORMAT_E_AC3,
+    AUDIO_FORMAT_DTS,
+    AUDIO_FORMAT_DTS_HD,
+    AUDIO_FORMAT_AAC_LC,
+    AUDIO_FORMAT_DOLBY_TRUEHD,
+    AUDIO_FORMAT_E_AC3_JOC,
+};
+// Array of all AAC formats. When AAC is enabled by users, all AAC formats should be enabled.
+static const audio_format_t AAC_FORMATS[] = {
+    AUDIO_FORMAT_AAC_LC,
+    AUDIO_FORMAT_AAC_HE_V1,
+    AUDIO_FORMAT_AAC_HE_V2,
+    AUDIO_FORMAT_AAC_ELD,
+    AUDIO_FORMAT_AAC_XHE,
+};
+
 // ----------------------------------------------------------------------------
 // AudioPolicyInterface implementation
 // ----------------------------------------------------------------------------
@@ -347,6 +370,9 @@
                                                       const char *device_name)
 {
     status_t status;
+    String8 reply;
+    AudioParameter param;
+    int isReconfigA2dpSupported = 0;
 
     ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s",
           device, device_address, device_name);
@@ -363,6 +389,26 @@
         return NO_ERROR;
     }
 
+    // For offloaded A2DP, Hw modules may have the capability to
+    // configure codecs. Check if any of the loaded hw modules
+    // supports this.
+    // If supported, send a set parameter to configure A2DP codecs
+    // and return. No need to toggle device state.
+    if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+        reply = mpClientInterface->getParameters(
+                    AUDIO_IO_HANDLE_NONE,
+                    String8(AudioParameter::keyReconfigA2dpSupported));
+        AudioParameter repliedParameters(reply);
+        repliedParameters.getInt(
+                String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
+        if (isReconfigA2dpSupported) {
+            const String8 key(AudioParameter::keyReconfigA2dp);
+            param.add(key, String8("true"));
+            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+            return NO_ERROR;
+        }
+    }
+
     // Toggle the device state: UNAVAILABLE -> AVAILABLE
     // This will force reading again the device configuration
     status = setDeviceConnectionState(device,
@@ -432,20 +478,15 @@
 
 sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
         bool isRx, audio_devices_t device, uint32_t delayMs) {
-    struct audio_patch patch;
-    patch.num_sources = 1;
-    patch.num_sinks = 1;
+    PatchBuilder patchBuilder;
 
     sp<DeviceDescriptor> txSourceDeviceDesc;
     if (isRx) {
-        fillAudioPortConfigForDevice(mAvailableOutputDevices, device, &patch.sinks[0]);
-        fillAudioPortConfigForDevice(
-                mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX, &patch.sources[0]);
+        patchBuilder.addSink(findDevice(mAvailableOutputDevices, device)).
+                addSource(findDevice(mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX));
     } else {
-        txSourceDeviceDesc = fillAudioPortConfigForDevice(
-                mAvailableInputDevices, device, &patch.sources[0]);
-        fillAudioPortConfigForDevice(
-                mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX, &patch.sinks[0]);
+        patchBuilder.addSource(txSourceDeviceDesc = findDevice(mAvailableInputDevices, device)).
+                addSink(findDevice(mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX));
     }
 
     audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
@@ -456,9 +497,7 @@
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
         ALOG_ASSERT(!outputDesc->isDuplicated(),
                 "%s() %#x device output %d is duplicated", __func__, outputDevice, output);
-        outputDesc->toAudioPortConfig(&patch.sources[1]);
-        patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
-        patch.num_sources = 2;
+        patchBuilder.addSource(outputDesc, { .stream = AUDIO_STREAM_PATCH });
     }
 
     if (!isRx) {
@@ -480,26 +519,25 @@
     }
 
     audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-    status_t status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
+    status_t status = mpClientInterface->createAudioPatch(
+            patchBuilder.patch(), &afPatchHandle, delayMs);
     ALOGW_IF(status != NO_ERROR,
             "%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX");
     sp<AudioPatch> audioPatch;
     if (status == NO_ERROR) {
-        audioPatch = new AudioPatch(&patch, mUidCached);
+        audioPatch = new AudioPatch(patchBuilder.patch(), mUidCached);
         audioPatch->mAfPatchHandle = afPatchHandle;
         audioPatch->mUid = mUidCached;
     }
     return audioPatch;
 }
 
-sp<DeviceDescriptor> AudioPolicyManager::fillAudioPortConfigForDevice(
-        const DeviceVector& devices, audio_devices_t device, audio_port_config *config) {
-    DeviceVector deviceList = devices.getDevicesFromType(device);
+sp<DeviceDescriptor> AudioPolicyManager::findDevice(
+        const DeviceVector& devices, audio_devices_t device) {
+    DeviceVector deviceList = devices.getDevicesFromTypeMask(device);
     ALOG_ASSERT(!deviceList.isEmpty(),
             "%s() selected device type %#x is not in devices list", __func__, device);
-    sp<DeviceDescriptor> deviceDesc = deviceList.itemAt(0);
-    deviceDesc->toAudioPortConfig(config);
-    return deviceDesc;
+    return deviceList.itemAt(0);
 }
 
 void AudioPolicyManager::setPhoneState(audio_mode_t state)
@@ -513,14 +551,8 @@
         return;
     }
     /// Opens: can these line be executed after the switch of volume curves???
-    // if leaving call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(oldState)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-            handleIncallSonification((audio_stream_type_t)stream, false, true);
-        }
-
         // force reevaluating accessibility routing when call stops
         mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
     }
@@ -589,14 +621,18 @@
             setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
         }
     }
-    // if entering in call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
+
+    // reevaluate routing on all outputs in case tracks have been started during the call
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+        audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
+        if (state != AUDIO_MODE_IN_CALL || desc != mPrimaryOutput) {
+            setOutputDevice(desc, newDevice, (newDevice != AUDIO_DEVICE_NONE), 0 /*delayMs*/);
+        }
+    }
+
     if (isStateInCall(state)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-            handleIncallSonification((audio_stream_type_t)stream, true, true);
-        }
-
         // force reevaluating accessibility routing when call starts
         mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
     }
@@ -765,39 +801,53 @@
         stream_type_to_audio_attributes(*stream, &attributes);
     }
 
-    // TODO: check for existing client for this port ID
-    if (*portId == AUDIO_PORT_HANDLE_NONE) {
-        *portId = AudioPort::getNextUniqueId();
-    }
-
-    sp<SwAudioOutputDescriptor> desc;
-    if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
-        ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
-        if (!audio_has_proportional_frames(config->format)) {
-            return BAD_VALUE;
-        }
-        *stream = streamTypefromAttributesInt(&attributes);
-        *output = desc->mIoHandle;
-        ALOGV("getOutputForAttr() returns output %d", *output);
-        return NO_ERROR;
-    }
-    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
-        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
-        return BAD_VALUE;
-    }
-
     ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
             " session %d selectedDeviceId %d",
             attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
             session, *selectedDeviceId);
 
-    *stream = streamTypefromAttributesInt(&attributes);
+    // TODO: check for existing client for this port ID
+    if (*portId == AUDIO_PORT_HANDLE_NONE) {
+        *portId = AudioPort::getNextUniqueId();
+    }
 
-    // Explicit routing?
+    // First check for explicit routing (eg. setPreferredDevice)
     sp<DeviceDescriptor> deviceDesc;
     if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
         deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
+    } else {
+        // If no explict route, is there a matching dynamic policy that applies?
+        sp<SwAudioOutputDescriptor> desc;
+        if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
+            ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+            if (!audio_has_proportional_frames(config->format)) {
+                return BAD_VALUE;
+            }
+            *stream = streamTypefromAttributesInt(&attributes);
+            *output = desc->mIoHandle;
+            ALOGV("getOutputForAttr() returns output %d", *output);
+            return NO_ERROR;
+        }
+
+        // Virtual sources must always be dynamicaly or explicitly routed
+        if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+            ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+            return BAD_VALUE;
+        }
     }
+
+    // Virtual sources must always be dynamicaly or explicitly routed
+    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+        return BAD_VALUE;
+    }
+
+    *stream = streamTypefromAttributesInt(&attributes);
+
+    // TODO:  Should this happen only if an explicit route is active?
+    // the previous code structure meant that this would always happen which
+    // would appear to result in adding a null deviceDesc when not using an
+    // explicit route.  Is that the intended and necessary behavior?
     mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
 
     routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
@@ -811,13 +861,13 @@
           "flags %#x",
           device, config->sample_rate, config->format, config->channel_mask, *flags);
 
-    *output = getOutputForDevice(device, session, *stream, *output, config, flags);
+    *output = getOutputForDevice(device, session, *stream, config, flags);
     if (*output == AUDIO_IO_HANDLE_NONE) {
         mOutputRoutes.removeRoute(session);
         return INVALID_OPERATION;
     }
 
-    DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
+    DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
     *selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
             : AUDIO_PORT_HANDLE_NONE;
 
@@ -830,11 +880,10 @@
         audio_devices_t device,
         audio_session_t session,
         audio_stream_type_t stream,
-        audio_io_handle_t originalOutput,
         const audio_config_t *config,
         audio_output_flags_t *flags)
 {
-    audio_io_handle_t output = originalOutput;
+    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
     status_t status;
 
     // open a direct output if required by specified parameters
@@ -898,22 +947,20 @@
     }
 
     if (profile != 0) {
-        // exclude MMAP streams
-        if ((*flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0 || output != AUDIO_IO_HANDLE_NONE) {
-            for (size_t i = 0; i < mOutputs.size(); i++) {
-                sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-                if (!desc->isDuplicated() && (profile == desc->mProfile)) {
-                    // reuse direct output if currently open by the same client
-                    // and configured with same parameters
-                    if ((config->sample_rate == desc->mSamplingRate) &&
-                        audio_formats_match(config->format, desc->mFormat) &&
-                        (config->channel_mask == desc->mChannelMask) &&
-                        (session == desc->mDirectClientSession)) {
-                        desc->mDirectOpenCount++;
-                        ALOGI("getOutputForDevice() reusing direct output %d for session %d",
-                              mOutputs.keyAt(i), session);
-                        return mOutputs.keyAt(i);
-                    }
+        // exclusive outputs for MMAP and Offload are enforced by different session ids.
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() && (profile == desc->mProfile)) {
+                // reuse direct output if currently open by the same client
+                // and configured with same parameters
+                if ((config->sample_rate == desc->mSamplingRate) &&
+                    (config->format == desc->mFormat) &&
+                    (config->channel_mask == desc->mChannelMask) &&
+                    (session == desc->mDirectClientSession)) {
+                    desc->mDirectOpenCount++;
+                    ALOGI("getOutputForDevice() reusing direct output %d for session %d",
+                        mOutputs.keyAt(i), session);
+                    return mOutputs.keyAt(i);
                 }
             }
         }
@@ -925,7 +972,7 @@
         sp<SwAudioOutputDescriptor> outputDesc =
                 new SwAudioOutputDescriptor(profile, mpClientInterface);
 
-        DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
+        DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
         String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
                 : String8("");
 
@@ -934,8 +981,7 @@
         // only accept an output with the requested parameters
         if (status != NO_ERROR ||
             (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
-            (config->format != AUDIO_FORMAT_DEFAULT &&
-                    !audio_formats_match(config->format, outputDesc->mFormat)) ||
+            (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
             (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
             ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
                     "format %d %d, channel mask %04x %04x", output, config->sample_rate,
@@ -1024,7 +1070,7 @@
             // if a valid format is specified, skip output if not compatible
             if (format != AUDIO_FORMAT_INVALID) {
                 if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-                    if (!audio_formats_match(format, outputDesc->mFormat)) {
+                    if (format != outputDesc->mFormat) {
                         continue;
                     }
                 } else if (!audio_is_linear_pcm(format)) {
@@ -1105,9 +1151,11 @@
         } else {
             newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
         }
-    } else if (mOutputRoutes.hasRouteChanged(session)) {
+    } else if (mOutputRoutes.getAndClearRouteChanged(session)) {
         newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
-        checkStrategyRoute(getStrategy(stream), output);
+        if (newDevice != outputDesc->device()) {
+            checkStrategyRoute(getStrategy(stream), output);
+        }
     } else {
         newDevice = AUDIO_DEVICE_NONE;
     }
@@ -1233,11 +1281,6 @@
         const uint32_t muteWaitMs =
                 setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
 
-        // handle special case for sonification while in call
-        if (isInCall()) {
-            handleIncallSonification(stream, true, false);
-        }
-
         // apply volume rules for current stream and device if necessary
         checkAndSetVolume(stream,
                           mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
@@ -1332,11 +1375,6 @@
     // always handle stream stop, check which stream type is stopping
     handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
 
-    // handle special case for sonification while in call
-    if (isInCall()) {
-        handleIncallSonification(stream, false, false);
-    }
-
     if (outputDesc->mRefCount[stream] > 0) {
         // decrement usage count of this stream on the output
         outputDesc->changeRefCount(stream, -1);
@@ -1363,6 +1401,7 @@
                         (newDevice != desc->device())) {
                     audio_devices_t newDevice2 = getNewOutputDevice(desc, false /*fromCache*/);
                     bool force = desc->device() != newDevice2;
+
                     setOutputDevice(desc,
                                     newDevice2,
                                     force,
@@ -1473,21 +1512,26 @@
         }
         // For MMAP mode, the first call to getInputForAttr() is made on behalf of audioflinger.
         // The second call is for the first active client and sets the UID. Any further call
-        // corresponds to a new client and is only permitted from the same UId.
+        // corresponds to a new client and is only permitted from the same UID.
+        // If the first UID is silenced, allow a new UID connection and replace with new UID
         if (audioSession->openCount() == 1) {
             audioSession->setUid(uid);
         } else if (audioSession->uid() != uid) {
-            ALOGW("getInputForAttr() bad uid %d for session %d uid %d",
-                  uid, session, audioSession->uid());
-            status = INVALID_OPERATION;
-            goto error;
+            if (!audioSession->isSilenced()) {
+                ALOGW("getInputForAttr() bad uid %d for session %d uid %d",
+                      uid, session, audioSession->uid());
+                status = INVALID_OPERATION;
+                goto error;
+            }
+            audioSession->setUid(uid);
+            audioSession->setSilenced(false);
         }
         audioSession->changeOpenCount(1);
         *inputType = API_INPUT_LEGACY;
         if (*portId == AUDIO_PORT_HANDLE_NONE) {
             *portId = AudioPort::getNextUniqueId();
         }
-        inputDevices = mAvailableInputDevices.getDevicesFromType(inputDesc->mDevice);
+        inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(inputDesc->mDevice);
         *selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
                 : AUDIO_PORT_HANDLE_NONE;
         ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session);
@@ -1554,7 +1598,7 @@
         goto error;
     }
 
-    inputDevices = mAvailableInputDevices.getDevicesFromType(device);
+    inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
     *selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
             : AUDIO_PORT_HANDLE_NONE;
 
@@ -1602,10 +1646,11 @@
     // sampling rate and flags may be updated by getInputProfile
     uint32_t profileSamplingRate = (config->sample_rate == 0) ?
             SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
-    audio_format_t profileFormat = config->format;
+    audio_format_t profileFormat;
     audio_channel_mask_t profileChannelMask = config->channel_mask;
     audio_input_flags_t profileFlags = flags;
     for (;;) {
+        profileFormat = config->format; // reset each time through loop, in case it is updated
         profile = getInputProfile(device, address,
                                   profileSamplingRate, profileFormat, profileChannelMask,
                                   profileFlags);
@@ -1721,7 +1766,7 @@
     lConfig.format = profileFormat;
 
     if (address == "") {
-        DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
+        DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
         // the inputs vector must be of size >= 1, but we don't want to crash here
         address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
     }
@@ -1870,6 +1915,7 @@
         if (mCallTxPatch != 0 &&
             inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
             ALOGW("startInput(%d) failed: call in progress", input);
+            *concurrency |= API_INPUT_CONCURRENCY_CALL;
             return INVALID_OPERATION;
         }
 
@@ -1912,17 +1958,20 @@
                         ALOGW("startInput(%d) failed for HOTWORD: "
                                 "other input %d already started for HOTWORD",
                               input, activeDesc->mIoHandle);
+                        *concurrency |= API_INPUT_CONCURRENCY_HOTWORD;
                         return INVALID_OPERATION;
                     }
                 } else {
                     ALOGV("startInput(%d) failed for HOTWORD: other input %d already started",
                           input, activeDesc->mIoHandle);
+                    *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
                     return INVALID_OPERATION;
                 }
             } else {
                 if (activeSource != AUDIO_SOURCE_HOTWORD) {
                     ALOGW("startInput(%d) failed: other input %d already started",
                           input, activeDesc->mIoHandle);
+                    *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
                     return INVALID_OPERATION;
                 }
             }
@@ -1947,6 +1996,7 @@
                 audio_session_t activeSession = activeSessions.keyAt(0);
                 audio_io_handle_t activeHandle = activeDesc->mIoHandle;
                 SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
+                *concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
                 sessions.add(activeSession);
                 inputDesc->setPreemptedSessions(sessions);
                 stopInput(activeHandle, activeSession);
@@ -1968,7 +2018,7 @@
     // Routing?
     mInputRoutes.incRouteActivity(session);
 
-    if (audioSession->activeCount() == 1 || mInputRoutes.hasRouteChanged(session)) {
+    if (audioSession->activeCount() == 1 || mInputRoutes.getAndClearRouteChanged(session)) {
         // indicate active capture to sound trigger service if starting capture from a mic on
         // primary HW module
         audio_devices_t device = getNewInputDevice(inputDesc);
@@ -2215,11 +2265,10 @@
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
         audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
         for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
-            if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+            if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream) || isInCall())) {
                 continue;
             }
-            if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
-                    (isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
+            if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
                 continue;
             }
             routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
@@ -2563,42 +2612,24 @@
 
 status_t AudioPolicyManager::dump(int fd)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
     String8 result;
-
-    snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
-    result.append(buffer);
-
-    snprintf(buffer, SIZE, " Primary Output: %d\n",
+    result.appendFormat("\nAudioPolicyManager Dump: %p\n", this);
+    result.appendFormat(" Primary Output: %d\n",
              hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
-    result.append(buffer);
     std::string stateLiteral;
     AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
-    snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for communications %d\n",
-             mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for media %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA));
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for record %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD));
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for dock %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK));
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for system %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM));
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
-            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for encoded surround output %d\n",
-            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND));
-    result.append(buffer);
-    snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Master mono: %s\n", mMasterMono ? "on" : "off");
-    result.append(buffer);
-
+    result.appendFormat(" Phone state: %s\n", stateLiteral.c_str());
+    const char* forceUses[AUDIO_POLICY_FORCE_USE_CNT] = {
+        "communications", "media", "record", "dock", "system",
+        "HDMI system audio", "encoded surround output", "vibrate ringing" };
+    for (audio_policy_force_use_t i = AUDIO_POLICY_FORCE_FOR_COMMUNICATION;
+         i < AUDIO_POLICY_FORCE_USE_CNT; i = (audio_policy_force_use_t)((int)i + 1)) {
+        result.appendFormat(" Force use for %s: %d\n",
+                forceUses[i], mEngine->getForceUse(i));
+    }
+    result.appendFormat(" TTS output %savailable\n", mTtsOutputAvailable ? "" : "not ");
+    result.appendFormat(" Master mono: %s\n", mMasterMono ? "on" : "off");
+    result.appendFormat(" Config source: %s\n", getConfig().getSource().c_str());
     write(fd, result.string(), result.size());
 
     mAvailableOutputDevices.dump(fd, String8("Available output"));
@@ -2755,9 +2786,32 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::getAudioPort(struct audio_port *port __unused)
+status_t AudioPolicyManager::getAudioPort(struct audio_port *port)
 {
-    return NO_ERROR;
+    if (port == nullptr || port->id == AUDIO_PORT_HANDLE_NONE) {
+        return BAD_VALUE;
+    }
+    sp<DeviceDescriptor> dev = mAvailableOutputDevices.getDeviceFromId(port->id);
+    if (dev != 0) {
+        dev->toAudioPort(port);
+        return NO_ERROR;
+    }
+    dev = mAvailableInputDevices.getDeviceFromId(port->id);
+    if (dev != 0) {
+        dev->toAudioPort(port);
+        return NO_ERROR;
+    }
+    sp<SwAudioOutputDescriptor> out = mOutputs.getOutputFromId(port->id);
+    if (out != 0) {
+        out->toAudioPort(port);
+        return NO_ERROR;
+    }
+    sp<AudioInputDescriptor> in = mInputs.getInputFromId(port->id);
+    if (in != 0) {
+        in->toAudioPort(port);
+        return NO_ERROR;
+    }
+    return BAD_VALUE;
 }
 
 status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
@@ -2771,8 +2825,7 @@
     }
     ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
 
-    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+    if (!audio_patch_is_valid(patch)) {
         return BAD_VALUE;
     }
     // only one source per audio patch supported for now
@@ -2991,28 +3044,8 @@
             }
             // TODO: check from routing capabilities in config file and other conflicting patches
 
-            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-            if (index >= 0) {
-                afPatchHandle = patchDesc->mAfPatchHandle;
-            }
-
-            status_t status = mpClientInterface->createAudioPatch(&newPatch,
-                                                                  &afPatchHandle,
-                                                                  0);
-            ALOGV("createAudioPatch() patch panel returned %d patchHandle %d",
-                                                                  status, afPatchHandle);
-            if (status == NO_ERROR) {
-                if (index < 0) {
-                    patchDesc = new AudioPatch(&newPatch, uid);
-                    addAudioPatch(patchDesc->mHandle, patchDesc);
-                } else {
-                    patchDesc->mPatch = newPatch;
-                }
-                patchDesc->mAfPatchHandle = afPatchHandle;
-                *handle = patchDesc->mHandle;
-                nextAudioPortGeneration();
-                mpClientInterface->onAudioPatchListUpdate();
-            } else {
+            status_t status = installPatch(__func__, index, handle, &newPatch, 0, uid, &patchDesc);
+            if (status != NO_ERROR) {
                 ALOGW("createAudioPatch() patch panel could not connect device patch, error %d",
                 status);
                 return INVALID_OPERATION;
@@ -3145,10 +3178,10 @@
         return BAD_VALUE;
     }
 
-    struct audio_port_config backupConfig;
+    struct audio_port_config backupConfig = {};
     status_t status = audioPortConfig->applyAudioPortConfig(config, &backupConfig);
     if (status == NO_ERROR) {
-        struct audio_port_config newConfig;
+        struct audio_port_config newConfig = {};
         audioPortConfig->toAudioPortConfig(&newConfig, config);
         status = mpClientInterface->setAudioPortConfig(&newConfig, 0);
     }
@@ -3296,7 +3329,7 @@
     sp<AudioSourceDescriptor> sourceDesc =
             new AudioSourceDescriptor(srcDeviceDesc, attributes, uid);
 
-    struct audio_patch dummyPatch;
+    struct audio_patch dummyPatch = {};
     sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
     sourceDesc->mPatchDesc = patchDesc;
 
@@ -3324,7 +3357,6 @@
             mAvailableOutputDevices.getDevice(sinkDevice, String8(""));
 
     audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-    struct audio_patch *patch = &sourceDesc->mPatchDesc->mPatch;
 
     if (srcDeviceDesc->getAudioPort()->mModule->getHandle() ==
             sinkDeviceDesc->getAudioPort()->mModule->getHandle() &&
@@ -3356,16 +3388,14 @@
         // be connected as well as the stream type for volume control
         // - the sink is defined by whatever output device is currently selected for the output
         // though which this patch is routed.
-        patch->num_sinks = 0;
-        patch->num_sources = 2;
-        srcDeviceDesc->toAudioPortConfig(&patch->sources[0], NULL);
-        outputDesc->toAudioPortConfig(&patch->sources[1], NULL);
-        patch->sources[1].ext.mix.usecase.stream = stream;
-        status = mpClientInterface->createAudioPatch(patch,
+        PatchBuilder patchBuilder;
+        patchBuilder.addSource(srcDeviceDesc).addSource(outputDesc, { .stream = stream });
+        status = mpClientInterface->createAudioPatch(patchBuilder.patch(),
                                                               &afPatchHandle,
                                                               0);
         ALOGV("%s patch panel returned %d patchHandle %d", __FUNCTION__,
                                                               status, afPatchHandle);
+        sourceDesc->mPatchDesc->mPatch = *patchBuilder.patch();
         if (status != NO_ERROR) {
             ALOGW("%s patch panel could not connect device patch, error %d",
                   __FUNCTION__, status);
@@ -3448,6 +3478,275 @@
     return computeVolume(stream, index, device);
 }
 
+status_t AudioPolicyManager::getSupportedFormats(audio_io_handle_t ioHandle,
+                                                 FormatVector& formats) {
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) {
+        return BAD_VALUE;
+    }
+    String8 reply;
+    reply = mpClientInterface->getParameters(
+            ioHandle, String8(AudioParameter::keyStreamSupportedFormats));
+    ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
+    AudioParameter repliedParameters(reply);
+    if (repliedParameters.get(
+            String8(AudioParameter::keyStreamSupportedFormats), reply) != NO_ERROR) {
+        ALOGE("%s: failed to retrieve format, bailing out", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    for (auto format : formatsFromString(reply.string())) {
+        // Only AUDIO_FORMAT_AAC_LC will be used in Settings UI for all AAC formats.
+        for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+            if (format == AAC_FORMATS[i]) {
+                format = AUDIO_FORMAT_AAC_LC;
+                break;
+            }
+        }
+        bool exist = false;
+        for (size_t i = 0; i < formats.size(); i++) {
+            if (format == formats[i]) {
+                exist = true;
+                break;
+            }
+        }
+        bool isSurroundFormat = false;
+        for (size_t i = 0; i < ARRAY_SIZE(SURROUND_FORMATS); i++) {
+            if (SURROUND_FORMATS[i] == format) {
+                isSurroundFormat = true;
+                break;
+            }
+        }
+        if (!exist && isSurroundFormat) {
+            formats.add(format);
+        }
+    }
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getSurroundFormats(unsigned int *numSurroundFormats,
+                                                audio_format_t *surroundFormats,
+                                                bool *surroundFormatsEnabled,
+                                                bool reported)
+{
+    if (numSurroundFormats == NULL || (*numSurroundFormats != 0 &&
+            (surroundFormats == NULL || surroundFormatsEnabled == NULL))) {
+        return BAD_VALUE;
+    }
+    ALOGV("getSurroundFormats() numSurroundFormats %d surroundFormats %p surroundFormatsEnabled %p",
+            *numSurroundFormats, surroundFormats, surroundFormatsEnabled);
+
+    // Only return value if there is HDMI output.
+    if ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_HDMI) == 0) {
+        return INVALID_OPERATION;
+    }
+
+    size_t formatsWritten = 0;
+    size_t formatsMax = *numSurroundFormats;
+    *numSurroundFormats = 0;
+    FormatVector formats;
+    if (reported) {
+        // Only get surround formats which are reported by device.
+        // First list already open outputs that can be routed to this device
+        audio_devices_t device = AUDIO_DEVICE_OUT_HDMI;
+        SortedVector<audio_io_handle_t> outputs;
+        bool reportedFormatFound = false;
+        status_t status;
+        sp<SwAudioOutputDescriptor> desc;
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() && (desc->supportedDevices() & device)) {
+                outputs.add(mOutputs.keyAt(i));
+            }
+        }
+        // Open an output to query dynamic parameters.
+        DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(
+                AUDIO_DEVICE_OUT_HDMI);
+        for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
+            String8 address = hdmiOutputDevices[i]->mAddress;
+            for (const auto& hwModule : mHwModules) {
+                for (size_t i = 0; i < hwModule->getOutputProfiles().size(); i++) {
+                    sp<IOProfile> profile = hwModule->getOutputProfiles()[i];
+                    if (profile->supportDevice(AUDIO_DEVICE_OUT_HDMI) &&
+                            profile->supportDeviceAddress(address)) {
+                        size_t j;
+                        for (j = 0; j < outputs.size(); j++) {
+                            desc = mOutputs.valueFor(outputs.itemAt(j));
+                            if (!desc->isDuplicated() && desc->mProfile == profile) {
+                                break;
+                            }
+                        }
+                        if (j != outputs.size()) {
+                            status = getSupportedFormats(outputs.itemAt(j), formats);
+                            reportedFormatFound |= (status == NO_ERROR);
+                            continue;
+                        }
+
+                        if (!profile->canOpenNewIo()) {
+                            ALOGW("Max Output number %u already opened for this profile %s",
+                                  profile->maxOpenCount, profile->getTagName().c_str());
+                            continue;
+                        }
+
+                        ALOGV("opening output for device %08x with params %s profile %p name %s",
+                              device, address.string(), profile.get(), profile->getName().string());
+                        desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
+                        audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+                        status_t status = desc->open(nullptr, device, address,
+                                                     AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE,
+                                                     &output);
+
+                        if (status == NO_ERROR) {
+                            status = getSupportedFormats(output, formats);
+                            reportedFormatFound |= (status == NO_ERROR);
+                            desc->close();
+                            output = AUDIO_IO_HANDLE_NONE;
+                        }
+                    }
+                }
+            }
+        }
+
+        if (!reportedFormatFound) {
+            return UNKNOWN_ERROR;
+        }
+    } else {
+        for (size_t i = 0; i < ARRAY_SIZE(SURROUND_FORMATS); i++) {
+            formats.add(SURROUND_FORMATS[i]);
+        }
+    }
+    for (size_t i = 0; i < formats.size(); i++) {
+        if (formatsWritten < formatsMax) {
+            surroundFormats[formatsWritten] = formats[i];
+            bool formatEnabled = false;
+            if (formats[i] == AUDIO_FORMAT_AAC_LC) {
+                for (size_t j = 0; j < ARRAY_SIZE(AAC_FORMATS); j++) {
+                    formatEnabled =
+                            mSurroundFormats.find(AAC_FORMATS[i]) != mSurroundFormats.end();
+                    break;
+                }
+            } else {
+                formatEnabled = mSurroundFormats.find(formats[i]) != mSurroundFormats.end();
+            }
+            surroundFormatsEnabled[formatsWritten++] = formatEnabled;
+        }
+        (*numSurroundFormats)++;
+    }
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+{
+    // Check if audio format is a surround formats.
+    bool isSurroundFormat = false;
+    for (size_t i = 0; i < ARRAY_SIZE(SURROUND_FORMATS); i++) {
+        if (audioFormat == SURROUND_FORMATS[i]) {
+            isSurroundFormat = true;
+            break;
+        }
+    }
+    if (!isSurroundFormat) {
+        return BAD_VALUE;
+    }
+
+    // Should only be called when MANUAL.
+    audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
+                AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
+    if (forceUse != AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
+        return INVALID_OPERATION;
+    }
+
+    if ((mSurroundFormats.find(audioFormat) != mSurroundFormats.end() && enabled)
+            || (mSurroundFormats.find(audioFormat) == mSurroundFormats.end() && !enabled)) {
+        return NO_ERROR;
+    }
+
+    // The operation is valid only when there is HDMI output available.
+    if ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_HDMI) == 0) {
+        return INVALID_OPERATION;
+    }
+
+    if (enabled) {
+        if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+            for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+                mSurroundFormats.insert(AAC_FORMATS[i]);
+            }
+        } else {
+            mSurroundFormats.insert(audioFormat);
+        }
+    } else {
+        if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+            for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+                mSurroundFormats.erase(AAC_FORMATS[i]);
+            }
+        } else {
+            mSurroundFormats.erase(audioFormat);
+        }
+    }
+
+    sp<SwAudioOutputDescriptor> outputDesc;
+    bool profileUpdated = false;
+    DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(
+            AUDIO_DEVICE_OUT_HDMI);
+    for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
+        // Simulate reconnection to update enabled surround sound formats.
+        String8 address = hdmiOutputDevices[i]->mAddress;
+        String8 name = hdmiOutputDevices[i]->getName();
+        status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
+                                                      AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                                      address.c_str(),
+                                                      name.c_str());
+        if (status != NO_ERROR) {
+            continue;
+        }
+        status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
+                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                                             address.c_str(),
+                                             name.c_str());
+        profileUpdated |= (status == NO_ERROR);
+    }
+    DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromTypeMask(
+                AUDIO_DEVICE_IN_HDMI);
+    for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
+        // Simulate reconnection to update enabled surround sound formats.
+        String8 address = hdmiInputDevices[i]->mAddress;
+        String8 name = hdmiInputDevices[i]->getName();
+        status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
+                                                      AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                                      address.c_str(),
+                                                      name.c_str());
+        if (status != NO_ERROR) {
+            continue;
+        }
+        status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
+                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                                             address.c_str(),
+                                             name.c_str());
+        profileUpdated |= (status == NO_ERROR);
+    }
+
+    // Undo the surround formats change due to no audio profiles updated.
+    if (!profileUpdated) {
+        if (enabled) {
+            if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+                for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+                    mSurroundFormats.erase(AAC_FORMATS[i]);
+                }
+            } else {
+                mSurroundFormats.erase(audioFormat);
+            }
+        } else {
+            if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+                for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+                    mSurroundFormats.insert(AAC_FORMATS[i]);
+                }
+            } else {
+                mSurroundFormats.insert(audioFormat);
+            }
+        }
+    }
+
+    return profileUpdated ? NO_ERROR : INVALID_OPERATION;
+}
+
 void AudioPolicyManager::setRecordSilenced(uid_t uid, bool silenced)
 {
     ALOGV("AudioPolicyManager:setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
@@ -3531,21 +3830,26 @@
 
 static status_t deserializeAudioPolicyXmlConfig(AudioPolicyConfig &config) {
     char audioPolicyXmlConfigFile[AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH];
+    std::vector<const char*> fileNames;
     status_t ret;
 
-    for (int i = 0; i < kConfigLocationListSize; i++) {
-        PolicySerializer serializer;
-        bool use_a2dp_offload_config =
-                 property_get_bool("persist.bluetooth.a2dp_offload.enable", false);
-        snprintf(audioPolicyXmlConfigFile,
-                 sizeof(audioPolicyXmlConfigFile),
-                 "%s/%s",
-                 kConfigLocationList[i],
-                 use_a2dp_offload_config ? AUDIO_POLICY_A2DP_OFFLOAD_XML_CONFIG_FILE_NAME :
-                     AUDIO_POLICY_XML_CONFIG_FILE_NAME);
-        ret = serializer.deserialize(audioPolicyXmlConfigFile, config);
-        if (ret == NO_ERROR) {
-            break;
+    if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
+        property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+        // A2DP offload supported but disabled: try to use special XML file
+        fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
+    }
+    fileNames.push_back(AUDIO_POLICY_XML_CONFIG_FILE_NAME);
+
+    for (const char* fileName : fileNames) {
+        for (int i = 0; i < kConfigLocationListSize; i++) {
+            PolicySerializer serializer;
+            snprintf(audioPolicyXmlConfigFile, sizeof(audioPolicyXmlConfigFile),
+                     "%s/%s", kConfigLocationList[i], fileName);
+            ret = serializer.deserialize(audioPolicyXmlConfigFile, config);
+            if (ret == NO_ERROR) {
+                config.setSource(audioPolicyXmlConfigFile);
+                return ret;
+            }
         }
     }
     return ret;
@@ -3555,7 +3859,7 @@
 AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface,
                                        bool /*forTesting*/)
     :
-    mUidCached(getuid()),
+    mUidCached(AID_AUDIOSERVER), // no need to call getuid(), there's only one of us running.
     mpClientInterface(clientInterface),
     mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
     mA2dpSuspended(false),
@@ -3666,7 +3970,8 @@
             sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
                                                                                  mpClientInterface);
             const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
-            const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
+            const DeviceVector &devicesForType = supportedDevices.getDevicesFromTypeMask(
+                    profileType);
             String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
                     : String8("");
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
@@ -3720,7 +4025,7 @@
             sp<AudioInputDescriptor> inputDesc =
                     new AudioInputDescriptor(inProfile, mpClientInterface);
 
-            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
+            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(profileType);
             //   the inputs vector must be of size >= 1, but we don't want to crash here
             String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
                     : String8("");
@@ -3817,6 +4122,7 @@
    mInputs.clear();
    mHwModules.clear();
    mHwModulesAll.clear();
+   mSurroundFormats.clear();
 }
 
 status_t AudioPolicyManager::initCheck()
@@ -4365,14 +4671,23 @@
     }
 
     if (!vectorsEqual(srcOutputs,dstOutputs)) {
+        // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
+        // audio from invalidated tracks will be rendered when unmuting
+        uint32_t maxLatency = 0;
+        for (audio_io_handle_t srcOut : srcOutputs) {
+            sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
+            if (desc != 0 && maxLatency < desc->latency()) {
+                maxLatency = desc->latency();
+            }
+        }
         ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
               strategy, srcOutputs[0], dstOutputs[0]);
         // mute strategy while moving tracks from one output to another
         for (audio_io_handle_t srcOut : srcOutputs) {
-            sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOut);
-            if (isStrategyActive(desc, strategy)) {
+            sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
+            if (desc != 0 && isStrategyActive(desc, strategy)) {
                 setStrategyMute(strategy, true, desc);
-                setStrategyMute(strategy, false, desc, MUTE_TIME_MS, newDevice);
+                setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR, newDevice);
             }
             sp<AudioSourceDescriptor> source =
                     getSourceForStrategyOnOutput(srcOut, strategy);
@@ -4474,6 +4789,20 @@
         }
     }
 
+    // Check if an explicit routing request exists for an active stream on this output and
+    // use it in priority before any other rule
+    for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
+        if (outputDesc->isStreamActive((audio_stream_type_t)stream)) {
+            audio_devices_t forcedDevice =
+                    mOutputRoutes.getActiveDeviceForStream(
+                            (audio_stream_type_t)stream, mAvailableOutputDevices);
+
+            if (forcedDevice != AUDIO_DEVICE_NONE) {
+                return forcedDevice;
+            }
+        }
+    }
+
     // check the following by order of priority to request a routing change if necessary:
     // 1: the strategy enforced audible is active and enforced on the output:
     //      use device for strategy enforced audible
@@ -4493,11 +4822,15 @@
     //      use device for strategy DTMF
     // 9: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
     //      use device for strategy t-t-s
+
+    // FIXME: extend use of isStrategyActiveOnSameModule() to all strategies
+    // with a refined rule considering mutually exclusive devices (using same backend)
+    // as opposed to all streams on the same audio HAL module.
     if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE) &&
         mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
         device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
     } else if (isInCall() ||
-                    isStrategyActive(outputDesc, STRATEGY_PHONE)) {
+               isStrategyActiveOnSameModule(outputDesc, STRATEGY_PHONE)) {
         device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION)) {
         device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
@@ -4535,10 +4868,13 @@
         }
     }
 
+    // If we are not in call and no client is active on this input, this methods returns
+    // AUDIO_DEVICE_NONE, causing the patch on the input stream to be released.
     audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
-    if (isInCall()) {
-        device = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
-    } else if (source != AUDIO_SOURCE_DEFAULT) {
+    if (source == AUDIO_SOURCE_DEFAULT && isInCall()) {
+        source = AUDIO_SOURCE_VOICE_COMMUNICATION;
+    }
+    if (source != AUDIO_SOURCE_DEFAULT) {
         device = getDeviceAndMixForInputSource(source);
     }
 
@@ -4678,19 +5014,16 @@
 audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
                                                          bool fromCache)
 {
-    // Routing
-    // see if we have an explicit route
-    // scan the whole RouteMap, for each entry, convert the stream type to a strategy
-    // (getStrategy(stream)).
-    // if the strategy from the stream type in the RouteMap is the same as the argument above,
-    // and activity count is non-zero and the device in the route descriptor is available
-    // then select this device.
-    for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) {
-        sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
-        routing_strategy routeStrategy = getStrategy(route->mStreamType);
-        if ((routeStrategy == strategy) && route->isActive() &&
-                (mAvailableOutputDevices.indexOf(route->mDeviceDescriptor) >= 0)) {
-            return route->mDeviceDescriptor->type();
+    // Check if an explicit routing request exists for a stream type corresponding to the
+    // specified strategy and use it in priority over default routing rules.
+    for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
+        if (getStrategy((audio_stream_type_t)stream) == strategy) {
+            audio_devices_t forcedDevice =
+                    mOutputRoutes.getActiveDeviceForStream(
+                            (audio_stream_type_t)stream, mAvailableOutputDevices);
+            if (forcedDevice != AUDIO_DEVICE_NONE) {
+                return forcedDevice;
+            }
         }
     }
 
@@ -4861,54 +5194,20 @@
     } else {
         DeviceVector deviceList;
         if ((address == NULL) || (strlen(address) == 0)) {
-            deviceList = mAvailableOutputDevices.getDevicesFromType(device);
+            deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device);
         } else {
-            deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
+            sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(
+                    device, String8(address));
+            if (deviceDesc) deviceList.add(deviceDesc);
         }
 
         if (!deviceList.isEmpty()) {
-            struct audio_patch patch;
-            outputDesc->toAudioPortConfig(&patch.sources[0]);
-            patch.num_sources = 1;
-            patch.num_sinks = 0;
+            PatchBuilder patchBuilder;
+            patchBuilder.addSource(outputDesc);
             for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++) {
-                deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
-                patch.num_sinks++;
+                patchBuilder.addSink(deviceList.itemAt(i));
             }
-            ssize_t index;
-            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-                index = mAudioPatches.indexOfKey(*patchHandle);
-            } else {
-                index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
-            }
-            sp< AudioPatch> patchDesc;
-            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-            if (index >= 0) {
-                patchDesc = mAudioPatches.valueAt(index);
-                afPatchHandle = patchDesc->mAfPatchHandle;
-            }
-
-            status_t status = mpClientInterface->createAudioPatch(&patch,
-                                                                   &afPatchHandle,
-                                                                   delayMs);
-            ALOGV("setOutputDevice() createAudioPatch returned %d patchHandle %d"
-                    "num_sources %d num_sinks %d",
-                                       status, afPatchHandle, patch.num_sources, patch.num_sinks);
-            if (status == NO_ERROR) {
-                if (index < 0) {
-                    patchDesc = new AudioPatch(&patch, mUidCached);
-                    addAudioPatch(patchDesc->mHandle, patchDesc);
-                } else {
-                    patchDesc->mPatch = patch;
-                }
-                patchDesc->mAfPatchHandle = afPatchHandle;
-                if (patchHandle) {
-                    *patchHandle = patchDesc->mHandle;
-                }
-                outputDesc->setPatchHandle(patchDesc->mHandle);
-                nextAudioPortGeneration();
-                mpClientInterface->onAudioPatchListUpdate();
-            }
+            installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
         }
 
         // inform all input as well
@@ -4966,53 +5265,21 @@
     if ((device != AUDIO_DEVICE_NONE) && ((device != inputDesc->mDevice) || force)) {
         inputDesc->mDevice = device;
 
-        DeviceVector deviceList = mAvailableInputDevices.getDevicesFromType(device);
+        DeviceVector deviceList = mAvailableInputDevices.getDevicesFromTypeMask(device);
         if (!deviceList.isEmpty()) {
-            struct audio_patch patch;
-            inputDesc->toAudioPortConfig(&patch.sinks[0]);
+            PatchBuilder patchBuilder;
+            patchBuilder.addSink(inputDesc,
             // AUDIO_SOURCE_HOTWORD is for internal use only:
             // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
-            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
-                    !inputDesc->isSoundTrigger()) {
-                patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
-            }
-            patch.num_sinks = 1;
+                    [inputDesc](const PatchBuilder::mix_usecase_t& usecase) {
+                        auto result = usecase;
+                        if (result.source == AUDIO_SOURCE_HOTWORD && !inputDesc->isSoundTrigger()) {
+                            result.source = AUDIO_SOURCE_VOICE_RECOGNITION;
+                        }
+                        return result; }).
             //only one input device for now
-            deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
-            patch.num_sources = 1;
-            ssize_t index;
-            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-                index = mAudioPatches.indexOfKey(*patchHandle);
-            } else {
-                index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
-            }
-            sp< AudioPatch> patchDesc;
-            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-            if (index >= 0) {
-                patchDesc = mAudioPatches.valueAt(index);
-                afPatchHandle = patchDesc->mAfPatchHandle;
-            }
-
-            status_t status = mpClientInterface->createAudioPatch(&patch,
-                                                                  &afPatchHandle,
-                                                                  0);
-            ALOGV("setInputDevice() createAudioPatch returned %d patchHandle %d",
-                                                                          status, afPatchHandle);
-            if (status == NO_ERROR) {
-                if (index < 0) {
-                    patchDesc = new AudioPatch(&patch, mUidCached);
-                    addAudioPatch(patchDesc->mHandle, patchDesc);
-                } else {
-                    patchDesc->mPatch = patch;
-                }
-                patchDesc->mAfPatchHandle = afPatchHandle;
-                if (patchHandle) {
-                    *patchHandle = patchDesc->mHandle;
-                }
-                inputDesc->setPatchHandle(patchDesc->mHandle);
-                nextAudioPortGeneration();
-                mpClientInterface->onAudioPatchListUpdate();
-            }
+                    addSource(deviceList.itemAt(0));
+            status = installPatch(__func__, patchHandle, inputDesc.get(), patchBuilder.patch(), 0);
         }
     }
     return status;
@@ -5054,21 +5321,46 @@
     // TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
     // the best matching profile, not the first one.
 
+    sp<IOProfile> firstInexact;
+    uint32_t updatedSamplingRate = 0;
+    audio_format_t updatedFormat = AUDIO_FORMAT_INVALID;
+    audio_channel_mask_t updatedChannelMask = AUDIO_CHANNEL_INVALID;
     for (const auto& hwModule : mHwModules) {
         for (const auto& profile : hwModule->getInputProfiles()) {
             // profile->log();
+            //updatedFormat = format;
             if (profile->isCompatibleProfile(device, address, samplingRate,
-                                             &samplingRate /*updatedSamplingRate*/,
+                                             &samplingRate  /*updatedSamplingRate*/,
                                              format,
-                                             &format /*updatedFormat*/,
+                                             &format,       /*updatedFormat*/
                                              channelMask,
-                                             &channelMask /*updatedChannelMask*/,
-                                             (audio_output_flags_t) flags)) {
-
+                                             &channelMask   /*updatedChannelMask*/,
+                                             // FIXME ugly cast
+                                             (audio_output_flags_t) flags,
+                                             true /*exactMatchRequiredForInputFlags*/)) {
                 return profile;
             }
+            if (firstInexact == nullptr && profile->isCompatibleProfile(device, address,
+                                             samplingRate,
+                                             &updatedSamplingRate,
+                                             format,
+                                             &updatedFormat,
+                                             channelMask,
+                                             &updatedChannelMask,
+                                             // FIXME ugly cast
+                                             (audio_output_flags_t) flags,
+                                             false /*exactMatchRequiredForInputFlags*/)) {
+                firstInexact = profile;
+            }
+
         }
     }
+    if (firstInexact != nullptr) {
+        samplingRate = updatedSamplingRate;
+        format = updatedFormat;
+        channelMask = updatedChannelMask;
+        return firstInexact;
+    }
     return NULL;
 }
 
@@ -5095,7 +5387,7 @@
     // then select this device.
     for (size_t routeIndex = 0; routeIndex < mInputRoutes.size(); routeIndex++) {
          sp<SessionRoute> route = mInputRoutes.valueAt(routeIndex);
-         if ((inputSource == route->mSource) && route->isActive() &&
+         if ((inputSource == route->mSource) && route->isActiveOrChanged() &&
                  (mAvailableInputDevices.indexOf(route->mDeviceDescriptor) >= 0)) {
              return route->mDeviceDescriptor->type();
          }
@@ -5121,8 +5413,8 @@
         return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
     }
 
-    // in-call: always cap earpiece volume by voice volume + some low headroom
-    if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+    // in-call: always cap volume by voice volume + some low headroom
+    if ((stream != AUDIO_STREAM_VOICE_CALL) &&
             (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
         switch (stream) {
         case AUDIO_STREAM_SYSTEM:
@@ -5134,9 +5426,9 @@
         case AUDIO_STREAM_DTMF:
         case AUDIO_STREAM_ACCESSIBILITY: {
             int voiceVolumeIndex =
-                mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+                mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
             const float maxVoiceVolDb =
-                computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+                computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
                 + IN_CALL_EARPIECE_HEADROOM_DB;
             if (volumeDB > maxVoiceVolDb) {
                 ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
@@ -5235,7 +5527,10 @@
     }
 
     float volumeDb = computeVolume(stream, index, device);
-    if (outputDesc->isFixedVolume(device)) {
+    if (outputDesc->isFixedVolume(device) ||
+            // Force VoIP volume to max for bluetooth SCO
+            ((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
+             (device & AUDIO_DEVICE_OUT_ALL_SCO) != 0)) {
         volumeDb = 0.0f;
     }
 
@@ -5330,55 +5625,6 @@
     }
 }
 
-void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
-                                                      bool starting, bool stateChange)
-{
-    if(!hasPrimaryOutput()) {
-        return;
-    }
-
-    // if the stream pertains to sonification strategy and we are in call we must
-    // mute the stream if it is low visibility. If it is high visibility, we must play a tone
-    // in the device used for phone strategy and play the tone if the selected device does not
-    // interfere with the device used for phone strategy
-    // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
-    // many times as there are active tracks on the output
-    const routing_strategy stream_strategy = getStrategy(stream);
-    if ((stream_strategy == STRATEGY_SONIFICATION) ||
-            ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
-        sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput;
-        ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
-                stream, starting, outputDesc->mDevice, stateChange);
-        if (outputDesc->mRefCount[stream]) {
-            int muteCount = 1;
-            if (stateChange) {
-                muteCount = outputDesc->mRefCount[stream];
-            }
-            if (audio_is_low_visibility(stream)) {
-                ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
-                for (int i = 0; i < muteCount; i++) {
-                    setStreamMute(stream, starting, mPrimaryOutput);
-                }
-            } else {
-                ALOGV("handleIncallSonification() high visibility");
-                if (outputDesc->device() &
-                        getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
-                    ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
-                    for (int i = 0; i < muteCount; i++) {
-                        setStreamMute(stream, starting, mPrimaryOutput);
-                    }
-                }
-                if (starting) {
-                    mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
-                                                 AUDIO_STREAM_VOICE_CALL);
-                } else {
-                    mpClientInterface->stopTone();
-                }
-            }
-        }
-    }
-}
-
 audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
 {
     // flags to stream type mapping
@@ -5477,6 +5723,20 @@
     return false;
 }
 
+bool AudioPolicyManager::isStrategyActiveOnSameModule(const sp<AudioOutputDescriptor>& outputDesc,
+                                          routing_strategy strategy, uint32_t inPastMs,
+                                          nsecs_t sysTime) const
+{
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+        if (outputDesc->sharesHwModuleWith(desc)
+            && isStrategyActive(desc, strategy, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
 audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
 {
     return mEngine->getForceUse(usage);
@@ -5536,81 +5796,110 @@
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
     ALOGD("%s: forced use = %d", __FUNCTION__, forceUse);
 
-    // Analyze original support for various formats.
-    bool supportsAC3 = false;
-    bool supportsOtherSurround = false;
-    bool supportsIEC61937 = false;
-    for (ssize_t formatIndex = 0; formatIndex < (ssize_t)formats.size(); formatIndex++) {
-        audio_format_t format = formats[formatIndex];
-        switch (format) {
-            case AUDIO_FORMAT_AC3:
-                supportsAC3 = true;
-                break;
-            case AUDIO_FORMAT_E_AC3:
-            case AUDIO_FORMAT_DTS:
-            case AUDIO_FORMAT_DTS_HD:
-                // If ALWAYS, remove all other surround formats here since we will add them later.
-                if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
-                    formats.removeAt(formatIndex);
-                    formatIndex--;
-                }
-                supportsOtherSurround = true;
-                break;
-            case AUDIO_FORMAT_IEC61937:
-                supportsIEC61937 = true;
-                break;
-            default:
-                break;
+    // If MANUAL, keep the supported surround sound formats as current enabled ones.
+    if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
+        formats.clear();
+        for (auto it = mSurroundFormats.begin(); it != mSurroundFormats.end(); it++) {
+            formats.add(*it);
         }
-    }
+        // Always enable IEC61937 when in MANUAL mode.
+        formats.add(AUDIO_FORMAT_IEC61937);
+    } else { // NEVER, AUTO or ALWAYS
+        // Analyze original support for various formats.
+        bool supportsAC3 = false;
+        bool supportsOtherSurround = false;
+        bool supportsIEC61937 = false;
+        mSurroundFormats.clear();
+        for (ssize_t formatIndex = 0; formatIndex < (ssize_t)formats.size(); formatIndex++) {
+            audio_format_t format = formats[formatIndex];
+            switch (format) {
+                case AUDIO_FORMAT_AC3:
+                    supportsAC3 = true;
+                    break;
+                case AUDIO_FORMAT_E_AC3:
+                case AUDIO_FORMAT_DTS:
+                case AUDIO_FORMAT_DTS_HD:
+                    // If ALWAYS, remove all other surround formats here
+                    // since we will add them later.
+                    if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+                        formats.removeAt(formatIndex);
+                        formatIndex--;
+                    }
+                    supportsOtherSurround = true;
+                    break;
+                case AUDIO_FORMAT_IEC61937:
+                    supportsIEC61937 = true;
+                    break;
+                default:
+                    break;
+            }
+        }
 
-    // Modify formats based on surround preferences.
-    // If NEVER, remove support for surround formats.
-    if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
-        if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
-            // Remove surround sound related formats.
-            for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
+        // Modify formats based on surround preferences.
+        // If NEVER, remove support for surround formats.
+        if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
+            if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
+                // Remove surround sound related formats.
+                for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
+                    audio_format_t format = formats[formatIndex];
+                    switch(format) {
+                        case AUDIO_FORMAT_AC3:
+                        case AUDIO_FORMAT_E_AC3:
+                        case AUDIO_FORMAT_DTS:
+                        case AUDIO_FORMAT_DTS_HD:
+                        case AUDIO_FORMAT_IEC61937:
+                            formats.removeAt(formatIndex);
+                            break;
+                        default:
+                            formatIndex++; // keep it
+                            break;
+                    }
+                }
+                supportsAC3 = false;
+                supportsOtherSurround = false;
+                supportsIEC61937 = false;
+            }
+        } else { // AUTO or ALWAYS
+            // Most TVs support AC3 even if they do not report it in the EDID.
+            if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
+                    && !supportsAC3) {
+                formats.add(AUDIO_FORMAT_AC3);
+                supportsAC3 = true;
+            }
+
+            // If ALWAYS, add support for raw surround formats if all are missing.
+            // This assumes that if any of these formats are reported by the HAL
+            // then the report is valid and should not be modified.
+            if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+                formats.add(AUDIO_FORMAT_E_AC3);
+                formats.add(AUDIO_FORMAT_DTS);
+                formats.add(AUDIO_FORMAT_DTS_HD);
+                supportsOtherSurround = true;
+            }
+
+            // Add support for IEC61937 if any raw surround supported.
+            // The HAL could do this but add it here, just in case.
+            if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
+                formats.add(AUDIO_FORMAT_IEC61937);
+                supportsIEC61937 = true;
+            }
+
+            // Add reported surround sound formats to enabled surround formats.
+            for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
                 audio_format_t format = formats[formatIndex];
                 switch(format) {
                     case AUDIO_FORMAT_AC3:
                     case AUDIO_FORMAT_E_AC3:
                     case AUDIO_FORMAT_DTS:
                     case AUDIO_FORMAT_DTS_HD:
-                    case AUDIO_FORMAT_IEC61937:
-                        formats.removeAt(formatIndex);
-                        break;
+                    case AUDIO_FORMAT_AAC_LC:
+                    case AUDIO_FORMAT_DOLBY_TRUEHD:
+                    case AUDIO_FORMAT_E_AC3_JOC:
+                        mSurroundFormats.insert(format);
                     default:
-                        formatIndex++; // keep it
                         break;
                 }
             }
-            supportsAC3 = false;
-            supportsOtherSurround = false;
-            supportsIEC61937 = false;
-        }
-    } else { // AUTO or ALWAYS
-        // Most TVs support AC3 even if they do not report it in the EDID.
-        if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
-                && !supportsAC3) {
-            formats.add(AUDIO_FORMAT_AC3);
-            supportsAC3 = true;
-        }
-
-        // If ALWAYS, add support for raw surround formats if all are missing.
-        // This assumes that if any of these formats are reported by the HAL
-        // then the report is valid and should not be modified.
-        if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
-            formats.add(AUDIO_FORMAT_E_AC3);
-            formats.add(AUDIO_FORMAT_DTS);
-            formats.add(AUDIO_FORMAT_DTS_HD);
-            supportsOtherSurround = true;
-        }
-
-        // Add support for IEC61937 if any raw surround supported.
-        // The HAL could do this but add it here, just in case.
-        if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
-            formats.add(AUDIO_FORMAT_IEC61937);
-            supportsIEC61937 = true;
         }
     }
 }
@@ -5632,8 +5921,9 @@
                 maskIndex++;
             }
         }
-    // If ALWAYS, then make sure we at least support 5.1
-    } else if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+    // If ALWAYS or MANUAL, then make sure we at least support 5.1
+    } else if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS
+            || forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
         bool supports5dot1 = false;
         // Are there any channel masks that can be considered "surround"?
         for (audio_channel_mask_t channelMask : channelMasks) {
@@ -5660,7 +5950,7 @@
     if (profiles.hasDynamicFormat()) {
         reply = mpClientInterface->getParameters(
                 ioHandle, String8(AudioParameter::keyStreamSupportedFormats));
-        ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
+        ALOGV("%s: supported formats %d, %s", __FUNCTION__, ioHandle, reply.string());
         AudioParameter repliedParameters(reply);
         if (repliedParameters.get(
                 String8(AudioParameter::keyStreamSupportedFormats), reply) != NO_ERROR) {
@@ -5710,4 +6000,58 @@
     }
 }
 
+status_t AudioPolicyManager::installPatch(const char *caller,
+                                          audio_patch_handle_t *patchHandle,
+                                          AudioIODescriptorInterface *ioDescriptor,
+                                          const struct audio_patch *patch,
+                                          int delayMs)
+{
+    ssize_t index = mAudioPatches.indexOfKey(
+            patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE ?
+            *patchHandle : ioDescriptor->getPatchHandle());
+    sp<AudioPatch> patchDesc;
+    status_t status = installPatch(
+            caller, index, patchHandle, patch, delayMs, mUidCached, &patchDesc);
+    if (status == NO_ERROR) {
+        ioDescriptor->setPatchHandle(patchDesc->mHandle);
+    }
+    return status;
+}
+
+status_t AudioPolicyManager::installPatch(const char *caller,
+                                          ssize_t index,
+                                          audio_patch_handle_t *patchHandle,
+                                          const struct audio_patch *patch,
+                                          int delayMs,
+                                          uid_t uid,
+                                          sp<AudioPatch> *patchDescPtr)
+{
+    sp<AudioPatch> patchDesc;
+    audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+    if (index >= 0) {
+        patchDesc = mAudioPatches.valueAt(index);
+        afPatchHandle = patchDesc->mAfPatchHandle;
+    }
+
+    status_t status = mpClientInterface->createAudioPatch(patch, &afPatchHandle, delayMs);
+    ALOGV("%s() AF::createAudioPatch returned %d patchHandle %d num_sources %d num_sinks %d",
+            caller, status, afPatchHandle, patch->num_sources, patch->num_sinks);
+    if (status == NO_ERROR) {
+        if (index < 0) {
+            patchDesc = new AudioPatch(patch, uid);
+            addAudioPatch(patchDesc->mHandle, patchDesc);
+        } else {
+            patchDesc->mPatch = *patch;
+        }
+        patchDesc->mAfPatchHandle = afPatchHandle;
+        if (patchHandle) {
+            *patchHandle = patchDesc->mHandle;
+        }
+        nextAudioPortGeneration();
+        mpClientInterface->onAudioPatchListUpdate();
+    }
+    if (patchDescPtr) *patchDescPtr = patchDesc;
+    return status;
+}
+
 } // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index d05ba1f..136e522 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -18,6 +18,7 @@
 
 #include <atomic>
 #include <memory>
+#include <unordered_set>
 
 #include <stdint.h>
 #include <sys/types.h>
@@ -67,6 +68,10 @@
 // is switched
 #define MUTE_TIME_MS 2000
 
+// multiplication factor applied to output latency when calculating a safe mute delay when
+// invalidating tracks
+#define LATENCY_MUTE_FACTOR 4
+
 #define NUM_TEST_OUTPUTS 5
 
 #define NUM_VOL_CURVE_KNEES 2
@@ -233,6 +238,12 @@
         virtual float    getStreamVolumeDB(
                     audio_stream_type_t stream, int index, audio_devices_t device);
 
+        virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+                                            audio_format_t *surroundFormats,
+                                            bool *surroundFormatsEnabled,
+                                            bool reported);
+        virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
+
         // return the strategy corresponding to a given stream type
         routing_strategy getStrategy(audio_stream_type_t stream) const;
 
@@ -310,6 +321,10 @@
         bool isStrategyActive(const sp<AudioOutputDescriptor>& outputDesc, routing_strategy strategy,
                               uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
 
+        bool isStrategyActiveOnSameModule(const sp<AudioOutputDescriptor>& outputDesc,
+                                                  routing_strategy strategy, uint32_t inPastMs = 0,
+                                                  nsecs_t sysTime = 0) const;
+
         // change the route of the specified output. Returns the number of ms we have slept to
         // allow new routing to take effect in certain cases.
         virtual uint32_t setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
@@ -362,10 +377,6 @@
                            int delayMs = 0,
                            audio_devices_t device = (audio_devices_t)0);
 
-        // handle special cases for sonification strategy while in call: mute streams or replace by
-        // a special tone in the device used for communication
-        void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
-
         audio_mode_t getPhoneState();
 
         // true if device is in a telephony or VoIP call
@@ -491,8 +502,8 @@
 
         uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
         sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
-        sp<DeviceDescriptor> fillAudioPortConfigForDevice(
-                const DeviceVector& devices, audio_devices_t device, audio_port_config *config);
+        sp<DeviceDescriptor> findDevice(
+                const DeviceVector& devices, audio_devices_t device);
 
         // if argument "device" is different from AUDIO_DEVICE_NONE,  startSource() will force
         // the re-evaluation of the output device.
@@ -529,7 +540,7 @@
         static bool streamsMatchForvolume(audio_stream_type_t stream1,
                                           audio_stream_type_t stream2);
 
-        uid_t mUidCached;
+        const uid_t mUidCached;                         // AID_AUDIOSERVER
         AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
         sp<SwAudioOutputDescriptor> mPrimaryOutput;     // primary output descriptor
         // list of descriptors for outputs currently opened
@@ -592,11 +603,16 @@
 
         // Audio Policy Engine Interface.
         AudioPolicyManagerInterface *mEngine;
+
+        // Surround formats that are enabled.
+        std::unordered_set<audio_format_t> mSurroundFormats;
 private:
         // Add or remove AC3 DTS encodings based on user preferences.
         void filterSurroundFormats(FormatVector *formatsPtr);
         void filterSurroundChannelMasks(ChannelsVector *channelMasksPtr);
 
+        status_t getSupportedFormats(audio_io_handle_t ioHandle, FormatVector& formats);
+
         // If any, resolve any "dynamic" fields of an Audio Profiles collection
         void updateAudioProfiles(audio_devices_t device, audio_io_handle_t ioHandle,
                 AudioProfileVector &profiles);
@@ -628,7 +644,6 @@
                 audio_devices_t device,
                 audio_session_t session,
                 audio_stream_type_t stream,
-                audio_io_handle_t originalOutput,
                 const audio_config_t *config,
                 audio_output_flags_t *flags);
         // internal method to return the input handle for the given device and format
@@ -665,6 +680,18 @@
             param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
             mpClientInterface->setParameters(output, param.toString());
         }
+        status_t installPatch(const char *caller,
+                audio_patch_handle_t *patchHandle,
+                AudioIODescriptorInterface *ioDescriptor,
+                const struct audio_patch *patch,
+                int delayMs);
+        status_t installPatch(const char *caller,
+                ssize_t index,
+                audio_patch_handle_t *patchHandle,
+                const struct audio_patch *patch,
+                int delayMs,
+                uid_t uid,
+                sp<AudioPatch> *patchDescPtr);
 
         bool soundTriggerSupportsConcurrentCapture();
         bool mSoundTriggerSupportsConcurrentCapture;
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index b064f8c..21fffec 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -155,17 +155,6 @@
     return result;
 }
 
-status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
-              audio_stream_type_t stream)
-{
-    return mAudioPolicyService->startTone(tone, stream);
-}
-
-status_t AudioPolicyService::AudioPolicyClient::stopTone()
-{
-    return mAudioPolicyService->stopTone();
-}
-
 status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
 {
     return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index c7dfe0f..fdae23b 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -24,6 +24,7 @@
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
 #include <media/EffectsConfig.h>
+#include <mediautils/ServiceUtilities.h>
 #include <system/audio.h>
 #include <system/audio_effects/audio_effects_conf.h>
 #include <utils/Vector.h>
@@ -31,7 +32,6 @@
 #include <cutils/config_utils.h>
 #include <binder/IPCThreadState.h>
 #include "AudioPolicyEffects.h"
-#include "ServiceUtilities.h"
 
 namespace android {
 
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 8f0c846..d2bc40d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -18,8 +18,11 @@
 //#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
+#include <media/MediaAnalyticsItem.h>
+
 #include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
+#include <mediautils/ServiceUtilities.h>
+#include "TypeConverter.h"
 
 namespace android {
 
@@ -44,6 +47,7 @@
 
     ALOGV("setDeviceConnectionState()");
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->setDeviceConnectionState(device, state,
                                                          device_address, device_name);
 }
@@ -55,6 +59,7 @@
     if (mAudioPolicyManager == NULL) {
         return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     }
+    AutoCallerClear acc;
     return mAudioPolicyManager->getDeviceConnectionState(device,
                                                       device_address);
 }
@@ -72,6 +77,7 @@
 
     ALOGV("handleDeviceConfigChange()");
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->handleDeviceConfigChange(device, device_address,
                                                          device_name);
 }
@@ -94,10 +100,10 @@
     // operation from policy manager standpoint (no other operation (e.g track start or stop)
     // can be interleaved).
     Mutex::Autolock _l(mLock);
-
     // TODO: check if it is more appropriate to do it in platform specific policy manager
     AudioSystem::setMode(state);
 
+    AutoCallerClear acc;
     mAudioPolicyManager->setPhoneState(state);
     mPhoneState = state;
     return NO_ERROR;
@@ -115,9 +121,11 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-    if (!settingsAllowed()) {
+
+    if (!modifyAudioRoutingAllowed()) {
         return PERMISSION_DENIED;
     }
+
     if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
         return BAD_VALUE;
     }
@@ -126,6 +134,7 @@
     }
     ALOGV("setForceUse()");
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     mAudioPolicyManager->setForceUse(usage, config);
     return NO_ERROR;
 }
@@ -138,6 +147,7 @@
     if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
         return AUDIO_POLICY_FORCE_NONE;
     }
+    AutoCallerClear acc;
     return mAudioPolicyManager->getForceUse(usage);
 }
 
@@ -151,6 +161,7 @@
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->getOutput(stream);
 }
 
@@ -172,12 +183,13 @@
     Mutex::Autolock _l(mLock);
 
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
-    if (!isTrustedCallingUid(callingUid) || uid == (uid_t)-1) {
+    if (!isAudioServerOrMediaServerUid(callingUid) || uid == (uid_t)-1) {
         ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
                 "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
         uid = callingUid;
     }
     audio_output_flags_t originalFlags = flags;
+    AutoCallerClear acc;
     status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
                                                  config,
                                                  &flags, selectedDeviceId, portId);
@@ -223,6 +235,7 @@
         }
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->startOutput(output, stream, session);
 }
 
@@ -259,6 +272,7 @@
         }
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->stopOutput(output, stream, session);
 }
 
@@ -279,6 +293,7 @@
 {
     ALOGV("doReleaseOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
+    // called from internal thread: no need to clear caller identity
     mAudioPolicyManager->releaseOutput(output, stream, session);
 }
 
@@ -305,7 +320,7 @@
 
     bool updatePid = (pid == -1);
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
-    if (!isTrustedCallingUid(callingUid)) {
+    if (!isAudioServerOrMediaServerUid(callingUid)) {
         ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
                 "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
         uid = callingUid;
@@ -327,6 +342,13 @@
         return PERMISSION_DENIED;
     }
 
+    if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
+        attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
+        attr->source == AUDIO_SOURCE_VOICE_CALL) &&
+        !captureAudioOutputAllowed(pid, uid)) {
+        return PERMISSION_DENIED;
+    }
+
     if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed(pid, uid)) {
         return BAD_VALUE;
     }
@@ -337,11 +359,14 @@
         AudioPolicyInterface::input_type_t inputType;
 
         Mutex::Autolock _l(mLock);
-        // the audio_in_acoustics_t parameter is ignored by get_input()
-        status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
-                                                     config,
-                                                     flags, selectedDeviceId,
-                                                     &inputType, portId);
+        {
+            AutoCallerClear acc;
+            // the audio_in_acoustics_t parameter is ignored by get_input()
+            status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
+                                                         config,
+                                                         flags, selectedDeviceId,
+                                                         &inputType, portId);
+        }
         audioPolicyEffects = mAudioPolicyEffects;
 
         if (status == NO_ERROR) {
@@ -372,6 +397,7 @@
 
         if (status != NO_ERROR) {
             if (status == PERMISSION_DENIED) {
+                AutoCallerClear acc;
                 mAudioPolicyManager->releaseInput(*input, session);
             }
             return status;
@@ -382,6 +408,7 @@
         client->active = false;
         client->isConcurrent = false;
         client->isVirtualDevice = false; //TODO : update from APM->getInputForAttr()
+        client->deviceId = *selectedDeviceId;
         mAudioRecordClients.add(*portId, client);
     }
 
@@ -395,6 +422,47 @@
     return NO_ERROR;
 }
 
+// this is replicated from frameworks/av/media/libaudioclient/AudioRecord.cpp
+// XXX -- figure out how to put it into a common, shared location
+
+static std::string audioSourceString(audio_source_t value) {
+    std::string source;
+    if (SourceTypeConverter::toString(value, source)) {
+        return source;
+    }
+    char rawbuffer[16];  // room for "%d"
+    snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+    return rawbuffer;
+}
+
+static std::string audioConcurrencyString(
+        AudioPolicyInterface::concurrency_type__mask_t concurrency)
+{
+    char buffer[64]; // oversized
+    if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL) {
+        snprintf(buffer, sizeof(buffer), "%s%s%s%s",
+            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL)? ",call":"",
+            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE)? ",capture":"",
+            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_HOTWORD)? ",hotword":"",
+            (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_PREEMPT)? ",preempt":"");
+    } else {
+        snprintf(buffer, sizeof(buffer), ",none");
+    }
+
+    return &buffer[1];
+}
+
+std::string AudioPolicyService::getDeviceTypeStrForPortId(audio_port_handle_t portId) {
+    std::string typeStr;
+    struct audio_port port = {};
+    port.id = portId;
+    status_t status = mAudioPolicyManager->getAudioPort(&port);
+    if (status == NO_ERROR && port.type == AUDIO_PORT_TYPE_DEVICE) {
+        deviceToString(port.ext.device.type, typeStr);
+    }
+    return typeStr;
+}
+
 status_t AudioPolicyService::startInput(audio_port_handle_t portId, bool *silenced)
 {
     if (mAudioPolicyManager == NULL) {
@@ -425,8 +493,84 @@
     AudioPolicyInterface::concurrency_type__mask_t concurrency =
             AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE;
 
-    status_t status = mAudioPolicyManager->startInput(
-            client->input, client->session, *silenced, &concurrency);
+    status_t status;
+    {
+        AutoCallerClear acc;
+        status = mAudioPolicyManager->startInput(
+                    client->input, client->session, *silenced, &concurrency);
+
+    }
+
+    // including successes gets very verbose
+    if (status != NO_ERROR) {
+
+        static constexpr char kAudioPolicy[] = "audiopolicy";
+
+        static constexpr char kAudioPolicyReason[] = "android.media.audiopolicy.reason";
+        static constexpr char kAudioPolicyStatus[] = "android.media.audiopolicy.status";
+        static constexpr char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
+        static constexpr char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
+        static constexpr char kAudioPolicyRqstSession[] = "android.media.audiopolicy.rqst.session";
+        static constexpr char kAudioPolicyRqstDevice[] =
+                "android.media.audiopolicy.rqst.device";
+        static constexpr char kAudioPolicyActiveSrc[] = "android.media.audiopolicy.active.src";
+        static constexpr char kAudioPolicyActivePkg[] = "android.media.audiopolicy.active.pkg";
+        static constexpr char kAudioPolicyActiveSession[] =
+                "android.media.audiopolicy.active.session";
+        static constexpr char kAudioPolicyActiveDevice[] =
+                "android.media.audiopolicy.active.device";
+
+        MediaAnalyticsItem *item = new MediaAnalyticsItem(kAudioPolicy);
+        if (item != NULL) {
+
+            item->setCString(kAudioPolicyReason, audioConcurrencyString(concurrency).c_str());
+            item->setInt32(kAudioPolicyStatus, status);
+
+            item->setCString(kAudioPolicyRqstSrc,
+                             audioSourceString(client->attributes.source).c_str());
+            item->setInt32(kAudioPolicyRqstSession, client->session);
+            if (client->opPackageName.size() != 0) {
+                item->setCString(kAudioPolicyRqstPkg,
+                                 std::string(String8(client->opPackageName).string()).c_str());
+            } else {
+                item->setCString(kAudioPolicyRqstPkg, std::to_string(client->uid).c_str());
+            }
+            item->setCString(
+                    kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
+
+            // figure out who is active
+            // NB: might the other party have given up the microphone since then? how sure.
+            // perhaps could have given up on it.
+            // we hold mLock, so perhaps we're safe for this looping
+            if (concurrency != AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE) {
+                int count = mAudioRecordClients.size();
+                for (int i = 0; i<count ; i++) {
+                    if (portId == mAudioRecordClients.keyAt(i)) {
+                        continue;
+                    }
+                    sp<AudioRecordClient> other = mAudioRecordClients.valueAt(i);
+                    if (other->active) {
+                        // keeps the last of the clients marked active
+                        item->setCString(kAudioPolicyActiveSrc,
+                                         audioSourceString(other->attributes.source).c_str());
+                        item->setInt32(kAudioPolicyActiveSession, other->session);
+                        if (other->opPackageName.size() != 0) {
+                            item->setCString(kAudioPolicyActivePkg,
+                                 std::string(String8(other->opPackageName).string()).c_str());
+                        } else {
+                            item->setCString(kAudioPolicyRqstPkg,
+                                             std::to_string(other->uid).c_str());
+                        }
+                        item->setCString(kAudioPolicyActiveDevice,
+                                         getDeviceTypeStrForPortId(other->deviceId).c_str());
+                    }
+                }
+            }
+            item->selfrecord();
+            delete item;
+            item = NULL;
+        }
+    }
 
     if (status == NO_ERROR) {
         LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
@@ -439,6 +583,8 @@
         if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
             //TODO: check concurrent capture permission
         }
+
+        client->active = true;
     } else {
         finishRecording(client->opPackageName, client->uid);
     }
@@ -459,9 +605,11 @@
     }
     sp<AudioRecordClient> client = mAudioRecordClients.valueAt(index);
 
+    client->active = false;
+
     // finish the recording app op
     finishRecording(client->opPackageName, client->uid);
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->stopInput(client->input, client->session);
 }
 
@@ -494,6 +642,7 @@
     }
     {
         Mutex::Autolock _l(mLock);
+        AutoCallerClear acc;
         mAudioPolicyManager->releaseInput(client->input, client->session);
     }
 }
@@ -512,6 +661,7 @@
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax);
     return NO_ERROR;
 }
@@ -530,6 +680,7 @@
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->setStreamVolumeIndex(stream,
                                                     index,
                                                     device);
@@ -546,6 +697,7 @@
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->getStreamVolumeIndex(stream,
                                                     index,
                                                     device);
@@ -559,6 +711,7 @@
     if (mAudioPolicyManager == NULL) {
         return 0;
     }
+    AutoCallerClear acc;
     return mAudioPolicyManager->getStrategyForStream(stream);
 }
 
@@ -573,6 +726,7 @@
         return AUDIO_DEVICE_NONE;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->getDevicesForStream(stream);
 }
 
@@ -583,6 +737,7 @@
         return 0;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->getOutputForEffect(desc);
 }
 
@@ -596,6 +751,7 @@
         return NO_INIT;
     }
     Mutex::Autolock _l(mEffectsLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->registerEffect(desc, io, strategy, session, id);
 }
 
@@ -605,6 +761,7 @@
         return NO_INIT;
     }
     Mutex::Autolock _l(mEffectsLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->unregisterEffect(id);
 }
 
@@ -614,6 +771,7 @@
         return NO_INIT;
     }
     Mutex::Autolock _l(mEffectsLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->setEffectEnabled(id, enabled);
 }
 
@@ -626,6 +784,7 @@
         return false;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->isStreamActive(stream, inPastMs);
 }
 
@@ -638,6 +797,7 @@
         return false;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
 }
 
@@ -647,6 +807,7 @@
         return false;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->isSourceActive(source);
 }
 
@@ -680,6 +841,7 @@
     Mutex::Autolock _l(mLock);
     Mutex::Autolock _le(mEffectsLock); // isOffloadSupported queries for
                                       // non-offloadable effects
+    AutoCallerClear acc;
     return mAudioPolicyManager->isOffloadSupported(info);
 }
 
@@ -693,7 +855,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->listAudioPorts(role, type, num_ports, ports, generation);
 }
 
@@ -703,7 +865,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->getAudioPort(port);
 }
 
@@ -717,6 +879,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
+    AutoCallerClear acc;
     return mAudioPolicyManager->createAudioPatch(patch, handle,
                                                   IPCThreadState::self()->getCallingUid());
 }
@@ -730,7 +893,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->releaseAudioPatch(handle,
                                                      IPCThreadState::self()->getCallingUid());
 }
@@ -743,7 +906,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->listAudioPatches(num_patches, patches, generation);
 }
 
@@ -756,7 +919,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->setAudioPortConfig(config);
 }
 
@@ -768,7 +931,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->acquireSoundTriggerSession(session, ioHandle, device);
 }
 
@@ -778,7 +941,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->releaseSoundTriggerSession(session);
 }
 
@@ -791,6 +954,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
+    AutoCallerClear acc;
     if (registration) {
         return mAudioPolicyManager->registerPolicyMixes(mixes);
     } else {
@@ -806,7 +970,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->startAudioSource(source, attributes, handle,
                                                  IPCThreadState::self()->getCallingUid());
 }
@@ -817,7 +981,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-
+    AutoCallerClear acc;
     return mAudioPolicyManager->stopAudioSource(handle);
 }
 
@@ -830,6 +994,7 @@
         return PERMISSION_DENIED;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->setMasterMono(mono);
 }
 
@@ -839,6 +1004,7 @@
         return NO_INIT;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->getMasterMono(mono);
 }
 
@@ -850,8 +1016,32 @@
         return NAN;
     }
     Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
     return mAudioPolicyManager->getStreamVolumeDB(stream, index, device);
 }
 
+status_t AudioPolicyService::getSurroundFormats(unsigned int *numSurroundFormats,
+                                                audio_format_t *surroundFormats,
+                                                bool *surroundFormatsEnabled,
+                                                bool reported)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
+    return mAudioPolicyManager->getSurroundFormats(numSurroundFormats, surroundFormats,
+                                                   surroundFormatsEnabled, reported);
+}
+
+status_t AudioPolicyService::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
+    return mAudioPolicyManager->setSurroundFormatEnabled(audioFormat, enabled);
+}
 
 } // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 082923a..ca3b6b6 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -34,16 +34,14 @@
 #include <utils/String16.h>
 #include <utils/threads.h>
 #include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
 #include <hardware_legacy/power.h>
 #include <media/AudioEffect.h>
 #include <media/AudioParameter.h>
+#include <mediautils/ServiceUtilities.h>
 
 #include <system/audio.h>
 #include <system/audio_policy.h>
 
-#include <private/android_filesystem_config.h>
-
 namespace android {
 
 static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n";
@@ -69,8 +67,6 @@
     {
         Mutex::Autolock _l(mLock);
 
-        // start tone playback thread
-        mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
         // start audio commands thread
         mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
         // start output activity command thread
@@ -92,7 +88,6 @@
 
 AudioPolicyService::~AudioPolicyService()
 {
-    mTonePlaybackThread->exit();
     mAudioCommandThread->exit();
     mOutputCommandThread->exit();
 
@@ -117,13 +112,17 @@
     Mutex::Autolock _l(mNotificationClientsLock);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
-    if (mNotificationClients.indexOfKey(uid) < 0) {
+    pid_t pid = IPCThreadState::self()->getCallingPid();
+    int64_t token = ((int64_t)uid<<32) | pid;
+
+    if (mNotificationClients.indexOfKey(token) < 0) {
         sp<NotificationClient> notificationClient = new NotificationClient(this,
                                                                            client,
-                                                                           uid);
-        ALOGV("registerClient() client %p, uid %d", client.get(), uid);
+                                                                           uid,
+                                                                           pid);
+        ALOGV("registerClient() client %p, uid %d pid %d", client.get(), uid, pid);
 
-        mNotificationClients.add(uid, notificationClient);
+        mNotificationClients.add(token, notificationClient);
 
         sp<IBinder> binder = IInterface::asBinder(client);
         binder->linkToDeath(notificationClient);
@@ -135,22 +134,34 @@
     Mutex::Autolock _l(mNotificationClientsLock);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
-    if (mNotificationClients.indexOfKey(uid) < 0) {
+    pid_t pid = IPCThreadState::self()->getCallingPid();
+    int64_t token = ((int64_t)uid<<32) | pid;
+
+    if (mNotificationClients.indexOfKey(token) < 0) {
         return;
     }
-    mNotificationClients.valueFor(uid)->setAudioPortCallbacksEnabled(enabled);
+    mNotificationClients.valueFor(token)->setAudioPortCallbacksEnabled(enabled);
 }
 
 // removeNotificationClient() is called when the client process dies.
-void AudioPolicyService::removeNotificationClient(uid_t uid)
+void AudioPolicyService::removeNotificationClient(uid_t uid, pid_t pid)
 {
     {
         Mutex::Autolock _l(mNotificationClientsLock);
-        mNotificationClients.removeItem(uid);
+        int64_t token = ((int64_t)uid<<32) | pid;
+        mNotificationClients.removeItem(token);
     }
     {
         Mutex::Autolock _l(mLock);
-        if (mAudioPolicyManager) {
+        bool hasSameUid = false;
+        for (size_t i = 0; i < mNotificationClients.size(); i++) {
+            if (mNotificationClients.valueAt(i)->uid() == uid) {
+                hasSameUid = true;
+                break;
+            }
+        }
+        if (mAudioPolicyManager && !hasSameUid) {
+            // called from binder death notification: no need to clear caller identity
             mAudioPolicyManager->releaseResourcesForUid(uid);
         }
     }
@@ -237,8 +248,9 @@
 
 AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
                                                      const sp<IAudioPolicyServiceClient>& client,
-                                                     uid_t uid)
-    : mService(service), mUid(uid), mAudioPolicyServiceClient(client),
+                                                     uid_t uid,
+                                                     pid_t pid)
+    : mService(service), mUid(uid), mPid(pid), mAudioPolicyServiceClient(client),
       mAudioPortCallbacksEnabled(false)
 {
 }
@@ -252,7 +264,7 @@
     sp<NotificationClient> keep(this);
     sp<AudioPolicyService> service = mService.promote();
     if (service != 0) {
-        service->removeNotificationClient(mUid);
+        service->removeNotificationClient(mUid, mPid);
     }
 }
 
@@ -273,7 +285,7 @@
 void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
         const String8& regId, int32_t state)
 {
-    if (mAudioPolicyServiceClient != 0 && (mUid % AID_USER_OFFSET) < AID_APP_START) {
+    if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
         mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
     }
 }
@@ -283,7 +295,7 @@
         const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
         audio_patch_handle_t patchHandle)
 {
-    if (mAudioPolicyServiceClient != 0 && (mUid % AID_USER_OFFSET) < AID_APP_START) {
+    if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
         mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, clientInfo,
                 clientConfig, deviceConfig, patchHandle);
     }
@@ -323,8 +335,6 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
     result.append(buffer);
-    snprintf(buffer, SIZE, "Tones Thread: %p\n", mTonePlaybackThread.get());
-    result.append(buffer);
 
     write(fd, result.string(), result.size());
     return NO_ERROR;
@@ -335,6 +345,7 @@
     {
         Mutex::Autolock _l(mLock);
         if (mAudioPolicyManager) {
+            AutoCallerClear acc;
             mAudioPolicyManager->setRecordSilenced(uid, silenced);
         }
     }
@@ -359,9 +370,6 @@
         if (mAudioCommandThread != 0) {
             mAudioCommandThread->dump(fd);
         }
-        if (mTonePlaybackThread != 0) {
-            mTonePlaybackThread->dump(fd);
-        }
 
         if (mAudioPolicyManager) {
             mAudioPolicyManager->dump(fd);
@@ -574,10 +582,6 @@
     updateUidCache(uid, false, true);
 }
 
-bool AudioPolicyService::UidPolicy::isServiceUid(uid_t uid) const {
-    return uid % AID_USER_OFFSET < AID_APP_START;
-}
-
 void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
     sp<AudioPolicyService> service = mService.promote();
     if (service != nullptr) {
@@ -636,7 +640,6 @@
                                                            const wp<AudioPolicyService>& service)
     : Thread(false), mName(name), mService(service)
 {
-    mpToneGenerator = NULL;
 }
 
 
@@ -646,7 +649,6 @@
         release_wake_lock(mName.string());
     }
     mAudioCommands.clear();
-    delete mpToneGenerator;
 }
 
 void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -671,26 +673,6 @@
                 mLastCommand = command;
 
                 switch (command->mCommand) {
-                case START_TONE: {
-                    mLock.unlock();
-                    ToneData *data = (ToneData *)command->mParam.get();
-                    ALOGV("AudioCommandThread() processing start tone %d on stream %d",
-                            data->mType, data->mStream);
-                    delete mpToneGenerator;
-                    mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
-                    mpToneGenerator->startTone(data->mType);
-                    mLock.lock();
-                    }break;
-                case STOP_TONE: {
-                    mLock.unlock();
-                    ALOGV("AudioCommandThread() processing stop tone");
-                    if (mpToneGenerator != NULL) {
-                        mpToneGenerator->stopTone();
-                        delete mpToneGenerator;
-                        mpToneGenerator = NULL;
-                    }
-                    mLock.lock();
-                    }break;
                 case SET_VOLUME: {
                     VolumeData *data = (VolumeData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set volume stream %d, \
@@ -897,27 +879,6 @@
     return NO_ERROR;
 }
 
-void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type,
-        audio_stream_type_t stream)
-{
-    sp<AudioCommand> command = new AudioCommand();
-    command->mCommand = START_TONE;
-    sp<ToneData> data = new ToneData();
-    data->mType = type;
-    data->mStream = stream;
-    command->mParam = data;
-    ALOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream);
-    sendCommand(command);
-}
-
-void AudioPolicyService::AudioCommandThread::stopToneCommand()
-{
-    sp<AudioCommand> command = new AudioCommand();
-    command->mCommand = STOP_TONE;
-    ALOGV("AudioCommandThread() adding tone stop");
-    sendCommand(command);
-}
-
 status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
                                                                float volume,
                                                                audio_io_handle_t output,
@@ -1209,6 +1170,7 @@
                 patch = ((CreateAudioPatchData *)command->mParam.get())->mPatch;
             } else {
                 handle = ((ReleaseAudioPatchData *)command->mParam.get())->mHandle;
+                memset(&patch, 0, sizeof(patch));
             }
             audio_patch_handle_t handle2;
             struct audio_patch patch2;
@@ -1253,8 +1215,6 @@
 
         } break;
 
-        case START_TONE:
-        case STOP_TONE:
         default:
             break;
         }
@@ -1327,27 +1287,6 @@
                                                    output, delayMs);
 }
 
-int AudioPolicyService::startTone(audio_policy_tone_t tone,
-                                  audio_stream_type_t stream)
-{
-    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
-        ALOGE("startTone: illegal tone requested (%d)", tone);
-    }
-    if (stream != AUDIO_STREAM_VOICE_CALL) {
-        ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
-            tone);
-    }
-    mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
-                                          AUDIO_STREAM_VOICE_CALL);
-    return 0;
-}
-
-int AudioPolicyService::stopTone()
-{
-    mTonePlaybackThread->stopToneCommand();
-    return 0;
-}
-
 int AudioPolicyService::setVoiceVolume(float volume, int delayMs)
 {
     return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
@@ -1403,9 +1342,6 @@
 int aps_set_stream_volume(void *service, audio_stream_type_t stream,
                                      float volume, audio_io_handle_t output,
                                      int delay_ms);
-int aps_start_tone(void *service, audio_policy_tone_t tone,
-                              audio_stream_type_t stream);
-int aps_stop_tone(void *service);
 int aps_set_voice_volume(void *service, float volume, int delay_ms);
 };
 
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index b3bc12b..a1366bb 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -38,8 +38,6 @@
 
 namespace android {
 
-using namespace std;
-
 // ----------------------------------------------------------------------------
 
 class AudioPolicyService :
@@ -159,8 +157,6 @@
                                      float volume,
                                      audio_io_handle_t output,
                                      int delayMs = 0);
-    virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
-    virtual status_t stopTone();
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
     virtual bool isOffloadSupported(const audio_offload_info_t &config);
 
@@ -203,6 +199,12 @@
     virtual float    getStreamVolumeDB(
                 audio_stream_type_t stream, int index, audio_devices_t device);
 
+    virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+                                        audio_format_t *surroundFormats,
+                                        bool *surroundFormatsEnabled,
+                                        bool reported);
+    virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
+
             status_t doStopOutput(audio_io_handle_t output,
                                   audio_stream_type_t stream,
                                   audio_session_t session);
@@ -218,7 +220,7 @@
             virtual status_t clientSetAudioPortConfig(const struct audio_port_config *config,
                                                       int delayMs);
 
-            void removeNotificationClient(uid_t uid);
+            void removeNotificationClient(uid_t uid, pid_t pid);
             void onAudioPortListUpdate();
             void doOnAudioPortListUpdate();
             void onAudioPatchListUpdate();
@@ -257,6 +259,8 @@
     // Prints the shell command help
     status_t printHelp(int out);
 
+    std::string getDeviceTypeStrForPortId(audio_port_handle_t portId);
+
     // If recording we need to make sure the UID is allowed to do that. If the UID is idle
     // then it cannot record and gets buffers with zeros - silence. As soon as the UID
     // transitions to an active state we will start reporting buffers with data. This approach
@@ -285,7 +289,6 @@
         void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
 
     private:
-        bool isServiceUid(uid_t uid) const;
         void notifyService(uid_t uid, bool active);
         void updateOverrideUid(uid_t uid, bool active, bool insert);
         void updateUidCache(uid_t uid, bool active, bool insert);
@@ -299,10 +302,7 @@
         std::unordered_map<uid_t, bool> mCachedUids;
     };
 
-    // Thread used for tone playback and to send audio config commands to audio flinger
-    // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
-    // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
-    // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+    // Thread used to send audio config commands to audio flinger
     // For audio config commands, it is necessary because audio flinger requires that the calling
     // process (user) has permission to modify audio settings.
     class AudioCommandThread : public Thread {
@@ -311,8 +311,6 @@
 
         // commands for tone AudioCommand
         enum {
-            START_TONE,
-            STOP_TONE,
             SET_VOLUME,
             SET_PARAMETERS,
             SET_VOICE_VOLUME,
@@ -337,9 +335,6 @@
         virtual     bool        threadLoop();
 
                     void        exit();
-                    void        startToneCommand(ToneGenerator::tone_type type,
-                                                 audio_stream_type_t stream);
-                    void        stopToneCommand();
                     status_t    volumeCommand(audio_stream_type_t stream, float volume,
                                             audio_io_handle_t output, int delayMs = 0);
                     status_t    parametersCommand(audio_io_handle_t ioHandle,
@@ -382,7 +377,7 @@
 
             void dump(char* buffer, size_t size);
 
-            int mCommand;   // START_TONE, STOP_TONE ...
+            int mCommand;   // SET_VOLUME, SET_PARAMETERS...
             nsecs_t mTime;  // time stamp
             Mutex mLock;    // mutex associated to mCond
             Condition mCond; // condition for status return
@@ -398,12 +393,6 @@
             AudioCommandData() {}
         };
 
-        class ToneData : public AudioCommandData {
-        public:
-            ToneGenerator::tone_type mType; // tone type (START_TONE only)
-            audio_stream_type_t mStream;    // stream type (START_TONE only)
-        };
-
         class VolumeData : public AudioCommandData {
         public:
             audio_stream_type_t mStream;
@@ -470,7 +459,6 @@
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
-        ToneGenerator *mpToneGenerator;     // the tone generator
         sp<AudioCommand> mLastCommand;      // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
         wp<AudioPolicyService> mService;
@@ -545,11 +533,6 @@
         // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
         virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
 
-        // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
-        // over a telephony device during a phone call.
-        virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
-        virtual status_t stopTone();
-
         // set down link audio volume.
         virtual status_t setVoiceVolume(float volume, int delayMs = 0);
 
@@ -589,7 +572,7 @@
     public:
                             NotificationClient(const sp<AudioPolicyService>& service,
                                                 const sp<IAudioPolicyServiceClient>& client,
-                                                uid_t uid);
+                                                uid_t uid, pid_t pid);
         virtual             ~NotificationClient();
 
                             void      onAudioPortListUpdate();
@@ -602,6 +585,10 @@
                                         audio_patch_handle_t patchHandle);
                             void      setAudioPortCallbacksEnabled(bool enabled);
 
+                            uid_t uid() {
+                                return mUid;
+                            }
+
                 // IBinder::DeathRecipient
                 virtual     void        binderDied(const wp<IBinder>& who);
 
@@ -611,6 +598,7 @@
 
         const wp<AudioPolicyService>        mService;
         const uid_t                         mUid;
+        const pid_t                         mPid;
         const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
               bool                          mAudioPortCallbacksEnabled;
     };
@@ -637,7 +625,24 @@
         const audio_session_t session;       // audio session ID
         bool active;                   // Capture is active or inactive
         bool isConcurrent;             // is allowed to concurrent capture
-        bool isVirtualDevice;          // uses vitual device: updated by APM::getInputForAttr()
+        bool isVirtualDevice;          // uses virtual device: updated by APM::getInputForAttr()
+        audio_port_handle_t deviceId;  // selected input device port ID
+    };
+
+    // A class automatically clearing and restoring binder caller identity inside
+    // a code block (scoped variable)
+    // Declare one systematically before calling AudioPolicyManager methods so that they are
+    // executed with the same level of privilege as audioserver process.
+    class AutoCallerClear {
+    public:
+            AutoCallerClear() :
+                mToken(IPCThreadState::self()->clearCallingIdentity()) {}
+            ~AutoCallerClear() {
+                IPCThreadState::self()->restoreCallingIdentity(mToken);
+            }
+
+    private:
+        const   int64_t mToken;
     };
 
     // Internal dump utilities.
@@ -651,14 +656,13 @@
     // mLock protects AudioPolicyManager methods that can call into audio flinger
     // and possibly back in to audio policy service and acquire mEffectsLock.
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
-    sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
     AudioPolicyInterface *mAudioPolicyManager;
     AudioPolicyClient *mAudioPolicyClient;
 
-    DefaultKeyedVector< uid_t, sp<NotificationClient> >    mNotificationClients;
+    DefaultKeyedVector< int64_t, sp<NotificationClient> >    mNotificationClients;
     Mutex mNotificationClientsLock;  // protects mNotificationClients
     // Manage all effects configured in audio_effects.conf
     sp<AudioPolicyEffects> mAudioPolicyEffects;
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index a43daea..b739b88 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -6,7 +6,6 @@
   frameworks/av/services/audiopolicy \
   frameworks/av/services/audiopolicy/common/include \
   frameworks/av/services/audiopolicy/engine/interface \
-  frameworks/av/services/audiopolicy/utilities
 
 LOCAL_SHARED_LIBRARIES := \
   libaudiopolicymanagerdefault \
@@ -30,3 +29,26 @@
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 include $(BUILD_NATIVE_TEST)
+
+# system/audio.h utilities test
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+  libbase \
+  liblog \
+  libmedia_helper \
+  libutils
+
+LOCAL_SRC_FILES := \
+  systemaudio_tests.cpp \
+
+LOCAL_MODULE := systemaudio_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index eb8222c..2ff7675 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -60,9 +60,6 @@
                        int /*delayMs*/) override { }
     String8 getParameters(audio_io_handle_t /*ioHandle*/,
                           const String8& /*keys*/) override { return String8(); }
-    status_t startTone(audio_policy_tone_t /*tone*/,
-                       audio_stream_type_t /*stream*/) override { return NO_INIT; }
-    status_t stopTone() override { return NO_INIT; }
     status_t setVoiceVolume(float /*volume*/, int /*delayMs*/) override { return NO_INIT; }
     status_t moveEffects(audio_session_t /*session*/,
                          audio_io_handle_t /*srcOutput*/,
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a9593b8..56af152 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -16,9 +16,15 @@
 
 #include <memory>
 #include <set>
+#include <sys/wait.h>
+#include <unistd.h>
 
 #include <gtest/gtest.h>
 
+#define LOG_TAG "APM_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+
 #include "AudioPolicyTestClient.h"
 #include "AudioPolicyTestManager.h"
 
@@ -132,6 +138,36 @@
     // SetUp must finish with no assertions.
 }
 
+TEST_F(AudioPolicyManagerTest, Dump) {
+    int pipefd[2];
+    ASSERT_NE(-1, pipe(pipefd));
+    pid_t cpid = fork();
+    ASSERT_NE(-1, cpid);
+    if (cpid == 0) {
+        // Child process reads from the pipe and logs.
+        close(pipefd[1]);
+        std::string line;
+        char buf;
+        while (read(pipefd[0], &buf, sizeof(buf)) > 0) {
+            if (buf != '\n') {
+                line += buf;
+            } else {
+                ALOGI("%s", line.c_str());
+                line = "";
+            }
+        }
+        if (!line.empty()) ALOGI("%s", line.c_str());
+        close(pipefd[0]);
+        _exit(EXIT_SUCCESS);
+    } else {
+        // Parent does the dump and checks the status code.
+        close(pipefd[0]);
+        ASSERT_EQ(NO_ERROR, mManager->dump(pipefd[1]));
+        close(pipefd[1]);
+        wait(NULL);  // Wait for the child to exit.
+    }
+}
+
 TEST_F(AudioPolicyManagerTest, CreateAudioPatchFailure) {
     audio_patch patch{};
     audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
@@ -166,29 +202,14 @@
 }
 
 TEST_F(AudioPolicyManagerTest, CreateAudioPatchFromMix) {
-    audio_patch patch{};
     audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
     uid_t uid = 42;
     const size_t patchCountBefore = mClient->getActivePatchesCount();
-    patch.num_sources = 1;
-    {
-        auto& src = patch.sources[0];
-        src.role = AUDIO_PORT_ROLE_SOURCE;
-        src.type = AUDIO_PORT_TYPE_MIX;
-        src.id = mManager->getConfig().getAvailableInputDevices()[0]->getId();
-        // Note: these are the parameters of the output device.
-        src.sample_rate = 44100;
-        src.format = AUDIO_FORMAT_PCM_16_BIT;
-        src.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
-    }
-    patch.num_sinks = 1;
-    {
-        auto& sink = patch.sinks[0];
-        sink.role = AUDIO_PORT_ROLE_SINK;
-        sink.type = AUDIO_PORT_TYPE_DEVICE;
-        sink.id = mManager->getConfig().getDefaultOutputDevice()->getId();
-    }
-    ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(&patch, &handle, uid));
+    ASSERT_FALSE(mManager->getConfig().getAvailableInputDevices().isEmpty());
+    PatchBuilder patchBuilder;
+    patchBuilder.addSource(mManager->getConfig().getAvailableInputDevices()[0]).
+            addSink(mManager->getConfig().getDefaultOutputDevice());
+    ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(patchBuilder.patch(), &handle, uid));
     ASSERT_NE(AUDIO_PATCH_HANDLE_NONE, handle);
     ASSERT_EQ(patchCountBefore + 1, mClient->getActivePatchesCount());
 }
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
new file mode 100644
index 0000000..abaae52
--- /dev/null
+++ b/services/audiopolicy/tests/systemaudio_tests.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#define LOG_TAG "SysAudio_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+#include <system/audio.h>
+
+using namespace android;
+
+TEST(SystemAudioTest, PatchInvalid) {
+    audio_patch patch{};
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
+    patch.num_sinks = 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = 1;
+    patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = 0;
+    patch.num_sinks = 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+}
+
+TEST(SystemAudioTest, PatchValid) {
+    const audio_port_config src = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    // It's OK not to have sinks.
+    ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).patch()));
+    const audio_port_config sink = {
+        .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSink(sink).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSource(src).
+                    addSink(sink).addSink(sink).patch()));
+}
+
+TEST(SystemAudioTest, PatchHwAvSync) {
+    audio_port_config device_src_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+    device_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+    device_src_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+
+    audio_port_config device_sink_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+    device_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+    device_sink_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+
+    audio_port_config mix_sink_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_MIX };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+    mix_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+    mix_sink_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+
+    audio_port_config mix_src_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_MIX };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+    mix_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+    mix_src_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+}
+
+TEST(SystemAudioTest, PatchEqual) {
+    const audio_patch patch1{}, patch2{};
+    // Invalid patches are not equal.
+    ASSERT_FALSE(audio_patches_are_equal(&patch1, &patch2));
+    const audio_port_config src = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    const audio_port_config sink = {
+        .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+    audio_port_config sink_hw_av_sync = sink;
+    sink_hw_av_sync.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    sink_hw_av_sync.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+    ASSERT_TRUE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+}
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 7b86180..96261ab 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -51,6 +51,7 @@
     device3/StatusTracker.cpp \
     device3/Camera3BufferManager.cpp \
     device3/Camera3StreamSplitter.cpp \
+    device3/DistortionMapper.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
     utils/AutoConditionLock.cpp \
@@ -96,3 +97,8 @@
 LOCAL_MODULE:= libcameraservice
 
 include $(BUILD_SHARED_LIBRARY)
+
+# Build tests too
+
+include $(LOCAL_PATH)/tests/Android.mk
+
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 714d50f..c41de82 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -55,6 +55,7 @@
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/String16.h>
+#include <utils/SystemClock.h>
 #include <utils/Trace.h>
 #include <private/android_filesystem_config.h>
 #include <system/camera_vendor_tags.h>
@@ -243,6 +244,7 @@
     }
 
     if (mFlashlight->hasFlashUnit(id)) {
+        Mutex::Autolock al(mTorchStatusMutex);
         mTorchStatusMap.add(id, TorchModeStatus::AVAILABLE_OFF);
     }
 
@@ -253,6 +255,7 @@
 void CameraService::removeStates(const String8 id) {
     updateCameraNumAndIds();
     if (mFlashlight->hasFlashUnit(id)) {
+        Mutex::Autolock al(mTorchStatusMutex);
         mTorchStatusMap.removeItem(id);
     }
 
@@ -2335,10 +2338,13 @@
 
 void CameraService::Client::notifyError(int32_t errorCode,
         const CaptureResultExtras& resultExtras) {
-    (void) errorCode;
     (void) resultExtras;
     if (mRemoteCallback != NULL) {
-        mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
+        int32_t api1ErrorCode = CAMERA_ERROR_RELEASED;
+        if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISABLED) {
+            api1ErrorCode = CAMERA_ERROR_DISABLED;
+        }
+        mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, api1ErrorCode, 0);
     } else {
         ALOGE("mRemoteCallback is NULL!!");
     }
@@ -2428,6 +2434,8 @@
     return isUidActiveLocked(uid, callingPackage);
 }
 
+static const int kPollUidActiveTimeoutMillis = 50;
+
 bool CameraService::UidPolicy::isUidActiveLocked(uid_t uid, String16 callingPackage) {
     // Non-app UIDs are considered always active
     // If activity manager is unreachable, assume everything is active
@@ -2447,7 +2455,28 @@
         ActivityManager am;
         // Okay to access with a lock held as UID changes are dispatched without
         // a lock and we are a higher level component.
-        active = am.isUidActive(uid, callingPackage);
+        int64_t startTimeMillis = 0;
+        do {
+            // TODO: Fix this b/109950150!
+            // Okay this is a hack. There is a race between the UID turning active and
+            // activity being resumed. The proper fix is very risky, so we temporary add
+            // some polling which should happen pretty rarely anyway as the race is hard
+            // to hit.
+            active = am.isUidActive(uid, callingPackage);
+            if (active) {
+                break;
+            }
+            if (startTimeMillis <= 0) {
+                startTimeMillis = uptimeMillis();
+            }
+            int64_t ellapsedTimeMillis = uptimeMillis() - startTimeMillis;
+            int64_t remainingTimeMillis = kPollUidActiveTimeoutMillis - ellapsedTimeMillis;
+            if (remainingTimeMillis <= 0) {
+                break;
+            }
+            usleep(remainingTimeMillis * 1000);
+        } while (true);
+
         if (active) {
             // Now that we found out the UID is actually active, cache that
             mActiveUids.insert(uid);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 65faac9..d59b313 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -102,7 +102,7 @@
     {
         SharedParameters::Lock l(mParameters);
 
-        res = l.mParameters.initialize(&(mDevice->info()), mDeviceVersion);
+        res = l.mParameters.initialize(mDevice.get(), mDeviceVersion);
         if (res != OK) {
             ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
                     __FUNCTION__, mCameraId, strerror(-res), res);
@@ -250,6 +250,7 @@
     switch (p.sceneMode) {
         case ANDROID_CONTROL_SCENE_MODE_DISABLED:
             result.append("AUTO\n"); break;
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY)
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION)
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT)
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_LANDSCAPE)
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 187bea9..683e84d 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -33,7 +33,10 @@
     FrameProcessorBase(device),
     mClient(client),
     mLastFrameNumberOfFaces(0),
-    mLast3AFrameNumber(-1) {
+    mLast3AFrameNumber(-1),
+    mLastAEFrameNumber(-1),
+    mLastAFrameNumber(-1),
+    mLastAWBFrameNumber(-1) {
 
     sp<CameraDeviceBase> d = device.promote();
     mSynthesize3ANotify = !(d->willNotify3A());
@@ -197,7 +200,6 @@
                                 faceRects[i*4 + 2], scalerCrop);
             face.rect[3] = l.mParameters.arrayYToNormalizedWithCrop(
                                 faceRects[i*4 + 3], scalerCrop);
-
             face.score = faceScores[i];
             if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
                 face.id = faceIds[i];
@@ -263,24 +265,73 @@
     bool gotAllStates = true;
 
     // TODO: Also use AE mode, AE trigger ID
-    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
+    bool gotAFState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
             &pendingState.afMode, frameNumber, cameraId);
 
-    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
+    bool gotAWBState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
             &pendingState.awbMode, frameNumber, cameraId);
 
-    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
+    bool gotAEState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
             &pendingState.aeState, frameNumber, cameraId);
 
-    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
+    gotAFState &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
             &pendingState.afState, frameNumber, cameraId);
 
-    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
+    gotAWBState &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
             &pendingState.awbState, frameNumber, cameraId);
 
     pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
     pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
 
+    if (gotAEState && (frameNumber > mLastAEFrameNumber)) {
+        if (pendingState.aeState != m3aState.aeState ||
+                pendingState.aeTriggerId > m3aState.aeTriggerId) {
+            ALOGV("%s: Camera %d: AE state %d->%d",
+                    __FUNCTION__, cameraId,
+                    m3aState.aeState, pendingState.aeState);
+            client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
+
+            m3aState.aeState = pendingState.aeState;
+            m3aState.aeTriggerId = pendingState.aeTriggerId;
+            mLastAEFrameNumber = frameNumber;
+        }
+    }
+
+    if (gotAFState && (frameNumber > mLastAFrameNumber)) {
+        if (pendingState.afState != m3aState.afState ||
+                pendingState.afMode != m3aState.afMode ||
+                pendingState.afTriggerId != m3aState.afTriggerId) {
+            ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
+                    __FUNCTION__, cameraId,
+                    m3aState.afState, pendingState.afState,
+                    m3aState.afMode, pendingState.afMode,
+                    m3aState.afTriggerId, pendingState.afTriggerId);
+            client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
+
+            m3aState.afState = pendingState.afState;
+            m3aState.afMode = pendingState.afMode;
+            m3aState.afTriggerId = pendingState.afTriggerId;
+            mLastAFrameNumber = frameNumber;
+        }
+    }
+
+    if (gotAWBState && (frameNumber > mLastAWBFrameNumber)) {
+        if (pendingState.awbState != m3aState.awbState ||
+                pendingState.awbMode != m3aState.awbMode) {
+            ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
+                    __FUNCTION__, cameraId,
+                    m3aState.awbState, pendingState.awbState,
+                    m3aState.awbMode, pendingState.awbMode);
+            client->notifyAutoWhitebalance(pendingState.awbState,
+                    pendingState.aeTriggerId);
+
+            m3aState.awbMode = pendingState.awbMode;
+            m3aState.awbState = pendingState.awbState;
+            mLastAWBFrameNumber = frameNumber;
+        }
+    }
+
+    gotAllStates &= gotAEState & gotAFState & gotAWBState;
     if (!gotAllStates) {
         // If not all states are received, put the pending state to mPending3AStates.
         if (index == NAME_NOT_FOUND) {
@@ -291,40 +342,10 @@
         return NOT_ENOUGH_DATA;
     }
 
-    // Once all 3A states are received, notify the client about 3A changes.
-    if (pendingState.aeState != m3aState.aeState ||
-            pendingState.aeTriggerId > m3aState.aeTriggerId) {
-        ALOGV("%s: Camera %d: AE state %d->%d",
-                __FUNCTION__, cameraId,
-                m3aState.aeState, pendingState.aeState);
-        client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
-    }
-
-    if (pendingState.afState != m3aState.afState ||
-        pendingState.afMode != m3aState.afMode ||
-        pendingState.afTriggerId != m3aState.afTriggerId) {
-        ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
-                __FUNCTION__, cameraId,
-                m3aState.afState, pendingState.afState,
-                m3aState.afMode, pendingState.afMode,
-                m3aState.afTriggerId, pendingState.afTriggerId);
-        client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
-    }
-    if (pendingState.awbState != m3aState.awbState ||
-        pendingState.awbMode != m3aState.awbMode) {
-        ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
-                __FUNCTION__, cameraId,
-                m3aState.awbState, pendingState.awbState,
-                m3aState.awbMode, pendingState.awbMode);
-        client->notifyAutoWhitebalance(pendingState.awbState,
-                pendingState.aeTriggerId);
-    }
-
     if (index != NAME_NOT_FOUND) {
         mPending3AStates.removeItemsAt(index);
     }
 
-    m3aState = pendingState;
     mLast3AFrameNumber = frameNumber;
 
     return OK;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 62a4e91..8183c12 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -104,8 +104,7 @@
     // Track most recent frame number for which 3A notifications were sent for.
     // Used to filter against sending 3A notifications for the same frame
     // several times.
-    int32_t mLast3AFrameNumber;
-
+    int32_t mLast3AFrameNumber, mLastAEFrameNumber, mLastAFrameNumber, mLastAWBFrameNumber;
     // Emit FaceDetection event to java if faces changed
     void callbackFaceDetection(const sp<Camera2Client>& client,
                                const camera_frame_metadata &metadata);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 8d8bcab..28d186a 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -41,23 +41,29 @@
         int cameraFacing) :
         cameraId(cameraId),
         cameraFacing(cameraFacing),
-        info(NULL) {
+        info(NULL),
+        mDefaultSceneMode(ANDROID_CONTROL_SCENE_MODE_DISABLED) {
 }
 
 Parameters::~Parameters() {
 }
 
-status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) {
+status_t Parameters::initialize(CameraDeviceBase *device, int deviceVersion) {
     status_t res;
+    if (device == nullptr) {
+        ALOGE("%s: device is null!", __FUNCTION__);
+        return BAD_VALUE;
+    }
 
-    if (info->entryCount() == 0) {
+    const CameraMetadata& info = device->info();
+    if (info.entryCount() == 0) {
         ALOGE("%s: No static information provided!", __FUNCTION__);
         return BAD_VALUE;
     }
-    Parameters::info = info;
+    Parameters::info = &info;
     mDeviceVersion = deviceVersion;
 
-    res = buildFastInfo();
+    res = buildFastInfo(device);
     if (res != OK) return res;
 
     res = buildQuirks();
@@ -557,6 +563,10 @@
                     noSceneModes = true;
                     break;
                 case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
+                    // Face priority can be used as alternate default if supported.
+                    // Per API contract it shouldn't override the user set flash,
+                    // white balance and focus modes.
+                    mDefaultSceneMode = availableSceneModes.data.u8[i];
                     // Not in old API
                     addComma = false;
                     break;
@@ -761,17 +771,7 @@
     focusingAreas.clear();
     focusingAreas.add(Parameters::Area(0,0,0,0,0));
 
-    if (fastInfo.isExternalCamera) {
-        params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, -1.0);
-    } else {
-        camera_metadata_ro_entry_t availableFocalLengths =
-            staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
-        if (!availableFocalLengths.count) return NO_INIT;
-
-        float minFocalLength = availableFocalLengths.data.f[0];
-        params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength);
-    }
-
+    params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, fastInfo.defaultFocalLength);
 
     float horizFov, vertFov;
     res = calculatePictureFovs(&horizFov, &vertFov);
@@ -954,13 +954,24 @@
         const uint8_t *caps = availableCapabilities.data.u8;
         for (size_t i = 0; i < availableCapabilities.count; i++) {
             if (ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING ==
-                caps[i]) {
+                    caps[i]) {
                 isZslReprocessPresent = true;
                 break;
             }
         }
     }
 
+    isDistortionCorrectionSupported = false;
+    camera_metadata_ro_entry_t distortionCorrectionModes =
+            staticInfo(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES);
+    for (size_t i = 0; i < distortionCorrectionModes.count; i++) {
+        if (distortionCorrectionModes.data.u8[i] !=
+                ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+            isDistortionCorrectionSupported = true;
+            break;
+        }
+    }
+
     if (isDeviceZslSupported || slowJpegMode ||
             property_get_bool("camera.disable_zsl_mode", false)) {
         ALOGI("Camera %d: Disabling ZSL mode", cameraId);
@@ -982,7 +993,7 @@
     return paramsFlattened;
 }
 
-status_t Parameters::buildFastInfo() {
+status_t Parameters::buildFastInfo(CameraDeviceBase *device) {
 
     camera_metadata_ro_entry_t activeArraySize =
         staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4);
@@ -1098,20 +1109,12 @@
             focusDistanceCalibration.data.u8[0] !=
             ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED);
 
-
-    camera_metadata_ro_entry_t hwLevel = staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
-    if (!hwLevel.count) return NO_INIT;
-    fastInfo.isExternalCamera =
-            hwLevel.data.u8[0] == ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL;
-
-    camera_metadata_ro_entry_t availableFocalLengths =
-        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, /*required*/false);
-    if (!availableFocalLengths.count && !fastInfo.isExternalCamera) return NO_INIT;
+    res = getDefaultFocalLength(device);
+    if (res != OK) return res;
 
     SortedVector<int32_t> availableFormats = getAvailableOutputFormats();
     if (!availableFormats.size()) return NO_INIT;
 
-
     if (sceneModeOverrides.count > 0) {
         // sceneModeOverrides is defined to have 3 entries for each scene mode,
         // which are AE, AWB, and AF override modes the HAL wants for that scene
@@ -1189,19 +1192,6 @@
     fastInfo.bestFaceDetectMode = bestFaceDetectMode;
     fastInfo.maxFaces = maxFaces;
 
-    // Find smallest (widest-angle) focal length to use as basis of still
-    // picture FOV reporting.
-    if (fastInfo.isExternalCamera) {
-        fastInfo.minFocalLength = -1.0;
-    } else {
-        fastInfo.minFocalLength = availableFocalLengths.data.f[0];
-        for (size_t i = 1; i < availableFocalLengths.count; i++) {
-            if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) {
-                fastInfo.minFocalLength = availableFocalLengths.data.f[i];
-            }
-        }
-    }
-
     // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888
     fastInfo.useFlexibleYuv = false;
     for (size_t i = 0; i < availableFormats.size(); i++) {
@@ -1215,6 +1205,35 @@
 
     fastInfo.maxJpegSize = getMaxSize(getAvailableJpegSizes());
 
+    isZslReprocessPresent = false;
+    camera_metadata_ro_entry_t availableCapabilities =
+        staticInfo(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    if (0 < availableCapabilities.count) {
+        const uint8_t *caps = availableCapabilities.data.u8;
+        for (size_t i = 0; i < availableCapabilities.count; i++) {
+            if (ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING ==
+                    caps[i]) {
+                isZslReprocessPresent = true;
+                break;
+            }
+        }
+    }
+    if (isZslReprocessPresent) {
+        Vector<StreamConfiguration> scs = getStreamConfigurations();
+        Size maxPrivInputSize = {0, 0};
+        for (const auto& sc : scs) {
+            if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT &&
+                    sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+                if (sc.width * sc.height > maxPrivInputSize.width * maxPrivInputSize.height) {
+                    maxPrivInputSize = {sc.width, sc.height};
+                }
+            }
+        }
+        fastInfo.maxZslSize = maxPrivInputSize;
+    } else {
+        fastInfo.maxZslSize = {0, 0};
+    }
+
     return OK;
 }
 
@@ -1720,7 +1739,7 @@
 
     // SCENE_MODE
     validatedParams.sceneMode = sceneModeStringToEnum(
-        newParams.get(CameraParameters::KEY_SCENE_MODE) );
+        newParams.get(CameraParameters::KEY_SCENE_MODE), mDefaultSceneMode);
     if (validatedParams.sceneMode != sceneMode &&
             validatedParams.sceneMode !=
             ANDROID_CONTROL_SCENE_MODE_DISABLED) {
@@ -1738,7 +1757,7 @@
         }
     }
     bool sceneModeSet =
-            validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED;
+            validatedParams.sceneMode != mDefaultSceneMode;
 
     // FLASH_MODE
     if (sceneModeSet) {
@@ -2068,15 +2087,24 @@
 
     if (intent.count == 0) return BAD_VALUE;
 
+    uint8_t distortionMode = ANDROID_DISTORTION_CORRECTION_MODE_OFF;
     if (intent.data.u8[0] == ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE) {
         res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
                 fastInfo.bestStillCaptureFpsRange, 2);
+        distortionMode = ANDROID_DISTORTION_CORRECTION_MODE_HIGH_QUALITY;
     } else {
         res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
                 previewFpsRange, 2);
+        distortionMode = ANDROID_DISTORTION_CORRECTION_MODE_FAST;
     }
     if (res != OK) return res;
 
+    if (isDistortionCorrectionSupported) {
+        res = request->update(ANDROID_DISTORTION_CORRECTION_MODE,
+                &distortionMode, 1);
+        if (res != OK) return res;
+    }
+
     if (autoWhiteBalanceLockAvailable) {
         uint8_t reqWbLock = autoWhiteBalanceLock ?
                 ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
@@ -2108,7 +2136,7 @@
     uint8_t reqSceneMode =
             sceneModeActive ? sceneMode :
             enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY :
-            (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED;
+            mDefaultSceneMode;
     res = request->update(ANDROID_CONTROL_SCENE_MODE,
             &reqSceneMode, 1);
     if (res != OK) return res;
@@ -2397,6 +2425,50 @@
     return true;
 }
 
+status_t Parameters::getDefaultFocalLength(CameraDeviceBase *device) {
+    if (device == nullptr) {
+        ALOGE("%s: Camera device is nullptr", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    camera_metadata_ro_entry_t hwLevel = staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
+    if (!hwLevel.count) return NO_INIT;
+    fastInfo.isExternalCamera =
+            hwLevel.data.u8[0] == ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL;
+
+    camera_metadata_ro_entry_t availableFocalLengths =
+        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, /*required*/false);
+    if (!availableFocalLengths.count && !fastInfo.isExternalCamera) return NO_INIT;
+
+    // Find focal length in PREVIEW template to use as default focal length.
+    if (fastInfo.isExternalCamera) {
+        fastInfo.defaultFocalLength = -1.0;
+    } else {
+        // Find smallest (widest-angle) focal length to use as basis of still
+        // picture FOV reporting.
+        fastInfo.defaultFocalLength = availableFocalLengths.data.f[0];
+        for (size_t i = 1; i < availableFocalLengths.count; i++) {
+            if (fastInfo.defaultFocalLength > availableFocalLengths.data.f[i]) {
+                fastInfo.defaultFocalLength = availableFocalLengths.data.f[i];
+            }
+        }
+
+        // Use focal length in preview template if it exists
+        CameraMetadata previewTemplate;
+        status_t res = device->createDefaultRequest(CAMERA3_TEMPLATE_PREVIEW, &previewTemplate);
+        if (res != OK) {
+            ALOGE("%s: Failed to create default PREVIEW request: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        camera_metadata_entry entry = previewTemplate.find(ANDROID_LENS_FOCAL_LENGTH);
+        if (entry.count != 0) {
+            fastInfo.defaultFocalLength = entry.data.f[0];
+        }
+    }
+    return OK;
+}
+
 const char* Parameters::getStateName(State state) {
 #define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
     switch(state) {
@@ -2540,12 +2612,12 @@
         -1;
 }
 
-int Parameters::sceneModeStringToEnum(const char *sceneMode) {
+int Parameters::sceneModeStringToEnum(const char *sceneMode, uint8_t defaultSceneMode) {
     return
         !sceneMode ?
-            ANDROID_CONTROL_SCENE_MODE_DISABLED :
+            defaultSceneMode :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ?
-            ANDROID_CONTROL_SCENE_MODE_DISABLED :
+            defaultSceneMode :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ?
             ANDROID_CONTROL_SCENE_MODE_ACTION :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ?
@@ -3192,12 +3264,12 @@
     if (horizFov != NULL) {
         *horizFov = 180 / M_PI * 2 *
                 atanf(horizCropFactor * sensorSize.data.f[0] /
-                        (2 * fastInfo.minFocalLength));
+                        (2 * fastInfo.defaultFocalLength));
     }
     if (vertFov != NULL) {
         *vertFov = 180 / M_PI * 2 *
                 atanf(vertCropFactor * sensorSize.data.f[1] /
-                        (2 * fastInfo.minFocalLength));
+                        (2 * fastInfo.defaultFocalLength));
     }
     return OK;
 }
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index fe725fd..42e7a47 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -30,6 +30,8 @@
 #include <camera/CameraParameters2.h>
 #include <camera/CameraMetadata.h>
 
+#include "common/CameraDeviceBase.h"
+
 namespace android {
 namespace camera2 {
 
@@ -177,6 +179,8 @@
     bool isZslReprocessPresent;
     // Whether the device supports enableZsl.
     bool isDeviceZslSupported;
+    // Whether the device supports geometric distortion correction
+    bool isDistortionCorrectionSupported;
 
     // Overall camera state
     enum State {
@@ -239,9 +243,10 @@
         };
         DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides;
         bool isExternalCamera;
-        float minFocalLength;
+        float defaultFocalLength;
         bool useFlexibleYuv;
         Size maxJpegSize;
+        Size maxZslSize;
     } fastInfo;
 
     // Quirks information; these are short-lived flags to enable workarounds for
@@ -261,10 +266,10 @@
     ~Parameters();
 
     // Sets up default parameters
-    status_t initialize(const CameraMetadata *info, int deviceVersion);
+    status_t initialize(CameraDeviceBase *device, int deviceVersion);
 
     // Build fast-access device static info from static info
-    status_t buildFastInfo();
+    status_t buildFastInfo(CameraDeviceBase *device);
     // Query for quirks from static info
     status_t buildQuirks();
 
@@ -297,6 +302,9 @@
     // whether zero shutter lag should be used for non-recording operation
     bool useZeroShutterLag() const;
 
+    // Get default focal length
+    status_t getDefaultFocalLength(CameraDeviceBase *camera);
+
     // Calculate the crop region rectangle, either tightly about the preview
     // resolution, or a region just based on the active array; both take
     // into account the current zoom level.
@@ -323,7 +331,7 @@
     static const char* wbModeEnumToString(uint8_t wbMode);
     static int effectModeStringToEnum(const char *effectMode);
     static int abModeStringToEnum(const char *abMode);
-    static int sceneModeStringToEnum(const char *sceneMode);
+    static int sceneModeStringToEnum(const char *sceneMode, uint8_t defaultScene);
     static flashMode_t flashModeStringToEnum(const char *flashMode);
     static const char* flashModeEnumToString(flashMode_t flashMode);
     static focusMode_t focusModeStringToEnum(const char *focusMode);
@@ -431,6 +439,7 @@
     Size getMaxSize(const Vector<Size>& sizes);
 
     int mDeviceVersion;
+    uint8_t mDefaultSceneMode;
 };
 
 // This class encapsulates the Parameters class so that it can only be accessed
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 372a2c5..8dc9863 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -231,63 +231,9 @@
         return INVALID_OPERATION;
     }
 
-    if ((mZslStreamId != NO_STREAM) || (mInputStreamId != NO_STREAM)) {
-        // Check if stream parameters have to change
-        CameraDeviceBase::StreamInfo streamInfo;
-        res = device->getStreamInfo(mZslStreamId, &streamInfo);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Error querying capture output stream info: "
-                    "%s (%d)", __FUNCTION__,
-                    client->getCameraId(), strerror(-res), res);
-            return res;
-        }
-        if (streamInfo.width != (uint32_t)params.fastInfo.arrayWidth ||
-                streamInfo.height != (uint32_t)params.fastInfo.arrayHeight) {
-            if (mZslStreamId != NO_STREAM) {
-                ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
-                      "dimensions changed",
-                    __FUNCTION__, client->getCameraId(), mZslStreamId);
-                res = device->deleteStream(mZslStreamId);
-                if (res == -EBUSY) {
-                    ALOGV("%s: Camera %d: Device is busy, call updateStream again "
-                          " after it becomes idle", __FUNCTION__, mId);
-                    return res;
-                } else if(res != OK) {
-                    ALOGE("%s: Camera %d: Unable to delete old output stream "
-                            "for ZSL: %s (%d)", __FUNCTION__,
-                            client->getCameraId(), strerror(-res), res);
-                    return res;
-                }
-                mZslStreamId = NO_STREAM;
-            }
-
-            if (mInputStreamId != NO_STREAM) {
-                ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
-                      "dimensions changed",
-                    __FUNCTION__, client->getCameraId(), mInputStreamId);
-                res = device->deleteStream(mInputStreamId);
-                if (res == -EBUSY) {
-                    ALOGV("%s: Camera %d: Device is busy, call updateStream again "
-                          " after it becomes idle", __FUNCTION__, mId);
-                    return res;
-                } else if(res != OK) {
-                    ALOGE("%s: Camera %d: Unable to delete old output stream "
-                            "for ZSL: %s (%d)", __FUNCTION__,
-                            client->getCameraId(), strerror(-res), res);
-                    return res;
-                }
-                mInputStreamId = NO_STREAM;
-            }
-            if (nullptr != mInputProducer.get()) {
-                mInputProducer->disconnect(NATIVE_WINDOW_API_CPU);
-                mInputProducer.clear();
-            }
-        }
-    }
-
     if (mInputStreamId == NO_STREAM) {
-        res = device->createInputStream(params.fastInfo.arrayWidth,
-            params.fastInfo.arrayHeight, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+        res = device->createInputStream(params.fastInfo.maxZslSize.width,
+            params.fastInfo.maxZslSize.height, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
             &mInputStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create input stream: "
@@ -309,8 +255,8 @@
         mProducer->setName(String8("Camera2-ZslRingBufferConsumer"));
         sp<Surface> outSurface = new Surface(producer);
 
-        res = device->createStream(outSurface, params.fastInfo.arrayWidth,
-            params.fastInfo.arrayHeight, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+        res = device->createStream(outSurface, params.fastInfo.maxZslSize.width,
+            params.fastInfo.maxZslSize.height, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
             HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId,
             String8());
         if (res != OK) {
@@ -856,29 +802,25 @@
                             __FUNCTION__);
                     continue;
                 }
-                uint8_t afMode = entry.data.u8[0];
-                if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
-                    // Skip all the ZSL buffer for manual AF mode, as we don't really
-                    // know the af state.
-                    continue;
-                }
-
                 // Check AF state if device has focuser and focus mode isn't fixed
-                if (mHasFocuser && !isFixedFocusMode(afMode)) {
-                    // Make sure the candidate frame has good focus.
-                    entry = frame.find(ANDROID_CONTROL_AF_STATE);
-                    if (entry.count == 0) {
-                        ALOGW("%s: ZSL queue frame has no AF state field!",
-                                __FUNCTION__);
-                        continue;
-                    }
-                    uint8_t afState = entry.data.u8[0];
-                    if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
-                            afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
-                            afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
-                        ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
-                                __FUNCTION__, afState);
-                        continue;
+                if (mHasFocuser) {
+                    uint8_t afMode = entry.data.u8[0];
+                    if (!isFixedFocusMode(afMode)) {
+                        // Make sure the candidate frame has good focus.
+                        entry = frame.find(ANDROID_CONTROL_AF_STATE);
+                        if (entry.count == 0) {
+                            ALOGW("%s: ZSL queue frame has no AF state field!",
+                                    __FUNCTION__);
+                            continue;
+                        }
+                        uint8_t afState = entry.data.u8[0];
+                        if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+                                afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+                                afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+                            ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture,"
+                                    " skip it", __FUNCTION__, afState);
+                            continue;
+                        }
                     }
                 }
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index c49de8e..98d0534 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -624,11 +624,19 @@
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
-    if (!checkPhysicalCameraId(physicalCameraId)) {
-        String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
+    if (physicalCameraId.size() > 0) {
+        std::vector<std::string> physicalCameraIds;
+        std::string physicalId(physicalCameraId.string());
+        bool logicalCamera =
+                CameraProviderManager::isLogicalCamera(mDevice->info(), &physicalCameraIds);
+        if (!logicalCamera ||
+                std::find(physicalCameraIds.begin(), physicalCameraIds.end(), physicalId) ==
+                physicalCameraIds.end()) {
+            String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
                     mCameraIdStr.string(), physicalCameraId.string());
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
     }
     std::vector<sp<Surface>> surfaces;
     std::vector<sp<IBinder>> binders;
@@ -1144,43 +1152,6 @@
     return binder::Status::ok();
 }
 
-bool CameraDeviceClient::checkPhysicalCameraId(const String8& physicalCameraId) {
-    if (0 == physicalCameraId.size()) {
-        return true;
-    }
-
-    CameraMetadata staticInfo = mDevice->info();
-    camera_metadata_entry_t entryCap;
-    bool isLogicalCam = false;
-
-    entryCap = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
-    for (size_t i = 0; i < entryCap.count; ++i) {
-        uint8_t capability = entryCap.data.u8[i];
-        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
-            isLogicalCam = true;
-        }
-    }
-    if (!isLogicalCam) {
-        return false;
-    }
-
-    camera_metadata_entry_t entryIds = staticInfo.find(ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS);
-    const uint8_t* ids = entryIds.data.u8;
-    size_t start = 0;
-    for (size_t i = 0; i < entryIds.count; ++i) {
-        if (ids[i] == '\0') {
-            if (start != i) {
-                String8 currentId((const char*)ids+start);
-                if (currentId == physicalCameraId) {
-                    return true;
-                }
-            }
-            start = i+1;
-        }
-    }
-    return false;
-}
-
 bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
         int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
         /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 077e05e..3be6399 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -98,9 +98,14 @@
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     std::vector<std::string> deviceIds;
     for (auto& provider : mProviders) {
-        for (auto& id : provider->mUniqueAPI1CompatibleCameraIds) {
-            deviceIds.push_back(id);
-        }
+        std::vector<std::string> providerDeviceIds = provider->mUniqueAPI1CompatibleCameraIds;
+
+        // API1 app doesn't handle logical and physical camera devices well. So
+        // for each [logical, physical1, physical2, ...] id combo, only take the
+        // first id advertised by HAL, and filter out the rest.
+        filterLogicalCameraIdsLocked(providerDeviceIds);
+
+        deviceIds.insert(deviceIds.end(), providerDeviceIds.begin(), providerDeviceIds.end());
     }
 
     std::sort(deviceIds.begin(), deviceIds.end(),
@@ -172,11 +177,7 @@
 status_t CameraProviderManager::getCameraCharacteristics(const std::string &id,
         CameraMetadata* characteristics) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
-
-    auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
-    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
-
-    return deviceInfo->getCameraCharacteristics(characteristics);
+    return getCameraCharacteristicsLocked(id, characteristics);
 }
 
 status_t CameraProviderManager::getHighestSupportedVersion(const std::string &id,
@@ -335,6 +336,7 @@
         const hardware::hidl_string& /*fqName*/,
         const hardware::hidl_string& name,
         bool /*preexisting*/) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
     {
         std::lock_guard<std::mutex> lock(mInterfaceMutex);
 
@@ -391,6 +393,37 @@
     return ret;
 }
 
+bool CameraProviderManager::isLogicalCamera(const CameraMetadata& staticInfo,
+        std::vector<std::string>* physicalCameraIds) {
+    bool isLogicalCam = false;
+    camera_metadata_ro_entry_t entryCap;
+
+    entryCap = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    for (size_t i = 0; i < entryCap.count; ++i) {
+        uint8_t capability = entryCap.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+            isLogicalCam = true;
+            break;
+        }
+    }
+    if (!isLogicalCam) {
+        return false;
+    }
+
+    camera_metadata_ro_entry_t entryIds = staticInfo.find(ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS);
+    const uint8_t* ids = entryIds.data.u8;
+    size_t start = 0;
+    for (size_t i = 0; i < entryIds.count; ++i) {
+        if (ids[i] == '\0') {
+            if (start != i) {
+                physicalCameraIds->push_back((const char*)ids+start);
+            }
+            start = i+1;
+        }
+    }
+    return true;
+}
+
 status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
     for (const auto& providerInfo : mProviders) {
         if (providerInfo->mProviderName == newProvider) {
@@ -426,6 +459,7 @@
 }
 
 status_t CameraProviderManager::removeProvider(const std::string& provider) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
     std::unique_lock<std::mutex> lock(mInterfaceMutex);
     std::vector<String8> removedDeviceIds;
     status_t res = NAME_NOT_FOUND;
@@ -599,7 +633,12 @@
 
     mUniqueCameraIds.insert(id);
     if (isAPI1Compatible) {
-        mUniqueAPI1CompatibleCameraIds.insert(id);
+        // addDevice can be called more than once for the same camera id if HAL
+        // supports openLegacy.
+        if (std::find(mUniqueAPI1CompatibleCameraIds.begin(), mUniqueAPI1CompatibleCameraIds.end(),
+                id) == mUniqueAPI1CompatibleCameraIds.end()) {
+            mUniqueAPI1CompatibleCameraIds.push_back(id);
+        }
     }
 
     if (parsedId != nullptr) {
@@ -613,7 +652,9 @@
         if ((*it)->mId == id) {
             mUniqueCameraIds.erase(id);
             if ((*it)->isAPI1Compatible()) {
-                mUniqueAPI1CompatibleCameraIds.erase(id);
+                mUniqueAPI1CompatibleCameraIds.erase(std::remove(
+                        mUniqueAPI1CompatibleCameraIds.begin(),
+                        mUniqueAPI1CompatibleCameraIds.end(), id));
             }
             mDevices.erase(it);
             break;
@@ -783,6 +824,18 @@
                 name.c_str(), statusToString(status));
         return nullptr;
     }
+
+    for (auto& conflictName : resourceCost.conflictingDevices) {
+        uint16_t major, minor;
+        std::string type, id;
+        status_t res = parseDeviceName(conflictName, &major, &minor, &type, &id);
+        if (res != OK) {
+            ALOGE("%s: Failed to parse conflicting device %s", __FUNCTION__, conflictName.c_str());
+            return nullptr;
+        }
+        conflictName = id;
+    }
+
     return std::unique_ptr<DeviceInfo>(
         new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
                 cameraInterface));
@@ -1405,5 +1458,51 @@
     return OK;
 }
 
+status_t CameraProviderManager::getCameraCharacteristicsLocked(const std::string &id,
+        CameraMetadata* characteristics) const {
+    auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->getCameraCharacteristics(characteristics);
+}
+
+void CameraProviderManager::filterLogicalCameraIdsLocked(
+        std::vector<std::string>& deviceIds) const
+{
+    std::unordered_set<std::string> removedIds;
+
+    for (auto& deviceId : deviceIds) {
+        CameraMetadata info;
+        status_t res = getCameraCharacteristicsLocked(deviceId, &info);
+        if (res != OK) {
+            ALOGE("%s: Failed to getCameraCharacteristics for id %s", __FUNCTION__,
+                    deviceId.c_str());
+            return;
+        }
+
+        // idCombo contains the ids of a logical camera and its physical cameras
+        std::vector<std::string> idCombo;
+        bool logicalCamera = CameraProviderManager::isLogicalCamera(info, &idCombo);
+        if (!logicalCamera) {
+            continue;
+        }
+        idCombo.push_back(deviceId);
+
+        for (auto& id : deviceIds) {
+            auto foundId = std::find(idCombo.begin(), idCombo.end(), id);
+            if (foundId == idCombo.end()) {
+                continue;
+            }
+
+            idCombo.erase(foundId);
+            removedIds.insert(idCombo.begin(), idCombo.end());
+            break;
+        }
+    }
+
+    deviceIds.erase(std::remove_if(deviceIds.begin(), deviceIds.end(),
+            [&removedIds](const std::string& s) {return removedIds.find(s) != removedIds.end();}),
+            deviceIds.end());
+}
 
 } // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index bbe6789..c523c2d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -230,6 +230,13 @@
             hardware::hidl_version minVersion = hardware::hidl_version{0,0},
             hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
 
+    /*
+     * Check if a camera with staticInfo is a logical camera. And if yes, return
+     * the physical camera ids.
+     */
+    static bool isLogicalCamera(const CameraMetadata& staticInfo,
+            std::vector<std::string>* physicalCameraIds);
+
 private:
     // All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
     mutable std::mutex mInterfaceMutex;
@@ -239,6 +246,9 @@
     wp<StatusListener> mListener;
     ServiceInteractionProxy* mServiceProxy;
 
+    // mProviderLifecycleLock is locked during onRegistration and removeProvider
+    mutable std::mutex mProviderLifecycleLock;
+
     static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
 
     struct ProviderInfo :
@@ -314,7 +324,7 @@
         std::vector<std::unique_ptr<DeviceInfo>> mDevices;
         std::unordered_set<std::string> mUniqueCameraIds;
         int mUniqueDeviceCount;
-        std::unordered_set<std::string> mUniqueAPI1CompatibleCameraIds;
+        std::vector<std::string> mUniqueAPI1CompatibleCameraIds;
 
         // HALv1-specific camera fields, including the actual device interface
         struct DeviceInfo1 : public DeviceInfo {
@@ -414,6 +424,9 @@
     static const char* torchStatusToString(
         const hardware::camera::common::V1_0::TorchModeStatus&);
 
+    status_t getCameraCharacteristicsLocked(const std::string &id,
+            CameraMetadata* characteristics) const;
+    void filterLogicalCameraIdsLocked(std::vector<std::string>& deviceIds) const;
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 67b5e06..543914e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -77,7 +77,8 @@
         mNextShutterFrameNumber(0),
         mNextReprocessShutterFrameNumber(0),
         mListener(NULL),
-        mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
+        mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID),
+        mLastTemplateId(-1)
 {
     ATRACE_CALL();
     camera3_callback_ops::notify = &sNotify;
@@ -248,6 +249,14 @@
         }
     }
 
+    if (DistortionMapper::isDistortionSupported(mDeviceInfo)) {
+        res = mDistortionMapper.setupStaticInfo(mDeviceInfo);
+        if (res != OK) {
+            SET_ERR_L("Unable to read necessary calibration fields for distortion correction");
+            return res;
+        }
+    }
+
     return OK;
 }
 
@@ -1597,6 +1606,18 @@
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
+    // In case the client doesn't include any session parameter, try a
+    // speculative configuration using the values from the last cached
+    // default request.
+    if (sessionParams.isEmpty() &&
+            ((mLastTemplateId > 0) && (mLastTemplateId < CAMERA3_TEMPLATE_COUNT)) &&
+            (!mRequestTemplateCache[mLastTemplateId].isEmpty())) {
+        ALOGV("%s: Speculative session param configuration with template id: %d", __func__,
+                mLastTemplateId);
+        return filterParamsAndConfigureLocked(mRequestTemplateCache[mLastTemplateId],
+                operatingMode);
+    }
+
     return filterParamsAndConfigureLocked(sessionParams, operatingMode);
 }
 
@@ -1673,6 +1694,7 @@
 
         if (!mRequestTemplateCache[templateId].isEmpty()) {
             *request = mRequestTemplateCache[templateId];
+            mLastTemplateId = templateId;
             return OK;
         }
     }
@@ -1697,6 +1719,7 @@
         mRequestTemplateCache[templateId].acquire(rawRequest);
 
         *request = mRequestTemplateCache[templateId];
+        mLastTemplateId = templateId;
     }
     return OK;
 }
@@ -2968,6 +2991,14 @@
         }
     }
 
+    // Fix up some result metadata to account for HAL-level distortion correction
+    status_t res = mDistortionMapper.correctCaptureResult(&captureResult.mMetadata);
+    if (res != OK) {
+        SET_ERR("Unable to correct capture result metadata for frame %d: %s (%d)",
+                frameNumber, strerror(res), res);
+        return;
+    }
+
     mTagMonitor.monitorMetadata(TagMonitor::RESULT,
             frameNumber, timestamp.data.i64[0], captureResult.mMetadata);
 
@@ -4045,6 +4076,7 @@
         mRepeatingLastFrameNumber(
             hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
         mPrepareVideoStream(false),
+        mConstrainedMode(false),
         mRequestLatency(kRequestLatencyBinSize),
         mSessionParamKeys(sessionParamKeys),
         mLatestSessionParams(sessionParamKeys.size()) {
@@ -4068,6 +4100,7 @@
     mLatestSessionParams = sessionParams;
     // Prepare video stream for high speed recording.
     mPrepareVideoStream = isConstrainedHighSpeed;
+    mConstrainedMode = isConstrainedHighSpeed;
 }
 
 status_t Camera3Device::RequestThread::queueRequestList(
@@ -4325,9 +4358,9 @@
     uint32_t numRequestProcessed = 0;
     for (size_t i = 0; i < batchSize; i++) {
         requests[i] = &mNextRequests.editItemAt(i).halRequest;
+        ATRACE_ASYNC_BEGIN("frame capture", mNextRequests[i].halRequest.frame_number);
     }
 
-    ATRACE_ASYNC_BEGIN("batch frame capture", mNextRequests[0].halRequest.frame_number);
     res = mInterface->processBatchCaptureRequests(requests, &numRequestProcessed);
 
     bool triggerRemoveFailed = false;
@@ -4482,6 +4515,17 @@
     return maxExpectedDuration;
 }
 
+bool Camera3Device::RequestThread::skipHFRTargetFPSUpdate(int32_t tag,
+        const camera_metadata_ro_entry_t& newEntry, const camera_metadata_entry_t& currentEntry) {
+    if (mConstrainedMode && (ANDROID_CONTROL_AE_TARGET_FPS_RANGE == tag) &&
+            (newEntry.count == currentEntry.count) && (currentEntry.count == 2) &&
+            (currentEntry.data.i32[1] == newEntry.data.i32[1])) {
+        return true;
+    }
+
+    return false;
+}
+
 bool Camera3Device::RequestThread::updateSessionParameters(const CameraMetadata& settings) {
     ATRACE_CALL();
     bool updatesDetected = false;
@@ -4514,8 +4558,10 @@
 
             if (isDifferent) {
                 ALOGV("%s: Session parameter tag id %d changed", __FUNCTION__, tag);
+                if (!skipHFRTargetFPSUpdate(tag, entry, lastEntry)) {
+                    updatesDetected = true;
+                }
                 mLatestSessionParams.update(entry);
-                updatesDetected = true;
             }
         } else if (lastEntry.count > 0) {
             // Value has been removed
@@ -4675,13 +4721,13 @@
 
         // Insert any queued triggers (before metadata is locked)
         status_t res = insertTriggers(captureRequest);
-
         if (res < 0) {
             SET_ERR("RequestThread: Unable to insert triggers "
                     "(capture request %d, HAL device: %s (%d)",
                     halRequest->frame_number, strerror(-res), res);
             return INVALID_OPERATION;
         }
+
         int triggerCount = res;
         bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
         mPrevTriggers = triggerCount;
@@ -4701,6 +4747,21 @@
                 return INVALID_OPERATION;
             }
 
+            {
+                // Correct metadata regions for distortion correction if enabled
+                sp<Camera3Device> parent = mParent.promote();
+                if (parent != nullptr) {
+                    res = parent->mDistortionMapper.correctCaptureRequest(
+                        &(captureRequest->mSettingsList.begin()->metadata));
+                    if (res != OK) {
+                        SET_ERR("RequestThread: Unable to correct capture requests "
+                                "for lens distortion for request %d: %s (%d)",
+                                halRequest->frame_number, strerror(-res), res);
+                        return INVALID_OPERATION;
+                    }
+                }
+            }
+
             /**
              * The request should be presorted so accesses in HAL
              *   are O(logn). Sidenote, sorting a sorted metadata is nop.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 13b83ba..d8fe19f 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -43,6 +43,7 @@
 #include "common/CameraDeviceBase.h"
 #include "device3/StatusTracker.h"
 #include "device3/Camera3BufferManager.h"
+#include "device3/DistortionMapper.h"
 #include "utils/TagMonitor.h"
 #include "utils/LatencyHistogram.h"
 #include <camera_metadata_hidden.h>
@@ -861,6 +862,11 @@
         // Check and update latest session parameters based on the current request settings.
         bool updateSessionParameters(const CameraMetadata& settings);
 
+        // Check whether FPS range session parameter re-configuration is needed in constrained
+        // high speed recording camera sessions.
+        bool skipHFRTargetFPSUpdate(int32_t tag, const camera_metadata_ro_entry_t& newEntry,
+                const camera_metadata_entry_t& currentEntry);
+
         // Re-configure camera using the latest session parameters.
         bool reconfigureCamera();
 
@@ -919,6 +925,8 @@
         // Flag indicating if we should prepare video stream for video requests.
         bool               mPrepareVideoStream;
 
+        bool               mConstrainedMode;
+
         static const int32_t kRequestLatencyBinSize = 40; // in ms
         CameraLatencyHistogram mRequestLatency;
 
@@ -1172,6 +1180,12 @@
 
     /**** End scope for mInFlightLock ****/
 
+    /**
+     * Distortion correction support
+     */
+
+    camera3::DistortionMapper mDistortionMapper;
+
     // Debug tracker for metadata tag value changes
     // - Enabled with the -m <taglist> option to dumpsys, such as
     //   dumpsys -m android.control.aeState,android.control.aeMode
@@ -1184,6 +1198,9 @@
 
     metadata_vendor_id_t mVendorTagId;
 
+    // Cached last requested template id
+    int mLastTemplateId;
+
     /**
      * Static callback forwarding methods from HAL to instance
      */
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index f4d5a18..8a9402e 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -212,7 +212,11 @@
 
     SP_LOGV("%s: Consumer wants %d buffers, Producer wants %zu", __FUNCTION__,
             maxConsumerBuffers, mMaxHalBuffers);
-    size_t totalBufferCount = maxConsumerBuffers + mMaxHalBuffers;
+    // The output slot count requirement can change depending on the current amount
+    // of outputs and incoming buffer consumption rate. To avoid any issues with
+    // insufficient slots, set their count to the maximum supported. The output
+    // surface buffer allocation is disabled so no real buffers will get allocated.
+    size_t totalBufferCount = BufferQueue::NUM_BUFFER_SLOTS;
     res = native_window_set_buffer_count(outputQueue.get(),
             totalBufferCount);
     if (res != OK) {
@@ -489,6 +493,10 @@
     SP_LOGV("acquired buffer %" PRId64 " from input at slot %d",
             bufferItem.mGraphicBuffer->getId(), bufferItem.mSlot);
 
+    if (bufferItem.mTransformToDisplayInverse) {
+        bufferItem.mTransform |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
+    }
+
     // Attach and queue the buffer to each of the outputs
     BufferTracker& tracker = *(mBuffers[bufferId]);
 
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
new file mode 100644
index 0000000..eef6658
--- /dev/null
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DistMapper"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <algorithm>
+#include <cmath>
+
+#include "device3/DistortionMapper.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Metadata keys to correct when adjusting coordinates for distortion correction
+ */
+
+// Both capture request and result
+constexpr std::array<uint32_t, 3> DistortionMapper::kMeteringRegionsToCorrect = {
+    ANDROID_CONTROL_AF_REGIONS,
+    ANDROID_CONTROL_AE_REGIONS,
+    ANDROID_CONTROL_AWB_REGIONS
+};
+
+// Only capture request
+constexpr std::array<uint32_t, 1> DistortionMapper::kRequestRectsToCorrect = {
+    ANDROID_SCALER_CROP_REGION,
+};
+
+// Only for capture result
+constexpr std::array<uint32_t, 1> DistortionMapper::kResultRectsToCorrect = {
+    ANDROID_SCALER_CROP_REGION,
+};
+
+// Only for capture result
+constexpr std::array<uint32_t, 2> DistortionMapper::kResultPointsToCorrect = {
+    ANDROID_STATISTICS_FACE_RECTANGLES, // Says rectangles, is really points
+    ANDROID_STATISTICS_FACE_LANDMARKS,
+};
+
+
+DistortionMapper::DistortionMapper() : mValidMapping(false), mValidGrids(false) {
+}
+
+bool DistortionMapper::isDistortionSupported(const CameraMetadata &result) {
+    bool isDistortionCorrectionSupported = false;
+    camera_metadata_ro_entry_t distortionCorrectionModes =
+            result.find(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES);
+    for (size_t i = 0; i < distortionCorrectionModes.count; i++) {
+        if (distortionCorrectionModes.data.u8[i] !=
+                ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+            isDistortionCorrectionSupported = true;
+            break;
+        }
+    }
+    return isDistortionCorrectionSupported;
+}
+
+status_t DistortionMapper::setupStaticInfo(const CameraMetadata &deviceInfo) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    camera_metadata_ro_entry_t array;
+
+    array = deviceInfo.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
+    if (array.count != 4) return BAD_VALUE;
+
+    mArrayWidth = array.data.i32[2];
+    mArrayHeight = array.data.i32[3];
+
+    array = deviceInfo.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
+    mActiveWidth = array.data.i32[2];
+    mActiveHeight = array.data.i32[3];
+
+    return updateCalibration(deviceInfo);
+}
+
+bool DistortionMapper::calibrationValid() const {
+    std::lock_guard<std::mutex> lock(mMutex);
+
+    return mValidMapping;
+}
+
+status_t DistortionMapper::correctCaptureRequest(CameraMetadata *request) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    status_t res;
+
+    if (!mValidMapping) return OK;
+
+    camera_metadata_entry_t e;
+    e = request->find(ANDROID_DISTORTION_CORRECTION_MODE);
+    if (e.count != 0 && e.data.u8[0] != ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+        for (auto region : kMeteringRegionsToCorrect) {
+            e = request->find(region);
+            for (size_t j = 0; j < e.count; j += 5) {
+                int32_t weight = e.data.i32[j + 4];
+                if (weight == 0) {
+                    continue;
+                }
+                res = mapCorrectedToRaw(e.data.i32 + j, 2);
+                if (res != OK) return res;
+                for (size_t k = 0; k < 4; k+=2) {
+                    int32_t& x = e.data.i32[j + k];
+                    int32_t& y = e.data.i32[j + k + 1];
+                    // Clamp to within active array
+                    x = std::max(0, x);
+                    x = std::min(mActiveWidth - 1, x);
+                    y = std::max(0, y);
+                    y = std::min(mActiveHeight - 1, y);
+                }
+            }
+        }
+        for (auto rect : kRequestRectsToCorrect) {
+            e = request->find(rect);
+            res = mapCorrectedRectToRaw(e.data.i32, e.count / 4);
+            if (res != OK) return res;
+        }
+    }
+
+    return OK;
+}
+
+status_t DistortionMapper::correctCaptureResult(CameraMetadata *result) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    status_t res;
+
+    if (!mValidMapping) return OK;
+
+    res = updateCalibration(*result);
+    if (res != OK) {
+        ALOGE("Failure to update lens calibration information");
+        return INVALID_OPERATION;
+    }
+
+    camera_metadata_entry_t e;
+    e = result->find(ANDROID_DISTORTION_CORRECTION_MODE);
+    if (e.count != 0 && e.data.u8[0] != ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+        for (auto region : kMeteringRegionsToCorrect) {
+            e = result->find(region);
+            for (size_t j = 0; j < e.count; j += 5) {
+                int32_t weight = e.data.i32[j + 4];
+                if (weight == 0) {
+                    continue;
+                }
+                res = mapRawToCorrected(e.data.i32 + j, 2);
+                if (res != OK) return res;
+                for (size_t k = 0; k < 4; k+=2) {
+                    int32_t& x = e.data.i32[j + k];
+                    int32_t& y = e.data.i32[j + k + 1];
+                    // Clamp to within active array
+                    x = std::max(0, x);
+                    x = std::min(mActiveWidth - 1, x);
+                    y = std::max(0, y);
+                    y = std::min(mActiveHeight - 1, y);
+                }
+            }
+        }
+        for (auto rect : kResultRectsToCorrect) {
+            e = result->find(rect);
+            res = mapRawRectToCorrected(e.data.i32, e.count / 4);
+            if (res != OK) return res;
+        }
+        for (auto pts : kResultPointsToCorrect) {
+            e = result->find(pts);
+            res = mapRawToCorrected(e.data.i32, e.count / 2);
+            if (res != OK) return res;
+        }
+    }
+
+    return OK;
+}
+
+// Utility methods; not guarded by mutex
+
+status_t DistortionMapper::updateCalibration(const CameraMetadata &result) {
+    camera_metadata_ro_entry_t calib, distortion;
+
+    calib = result.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
+    distortion = result.find(ANDROID_LENS_DISTORTION);
+
+    if (calib.count != 5) return BAD_VALUE;
+    if (distortion.count != 5) return BAD_VALUE;
+
+    // Skip redoing work if no change to calibration fields
+    if (mValidMapping &&
+            mFx == calib.data.f[0] &&
+            mFy == calib.data.f[1] &&
+            mCx == calib.data.f[2] &&
+            mCy == calib.data.f[3] &&
+            mS == calib.data.f[4]) {
+        bool noChange = true;
+        for (size_t i = 0; i < distortion.count; i++) {
+            if (mK[i] != distortion.data.f[i]) {
+                noChange = false;
+                break;
+            }
+        }
+        if (noChange) return OK;
+    }
+
+    mFx = calib.data.f[0];
+    mFy = calib.data.f[1];
+    mCx = calib.data.f[2];
+    mCy = calib.data.f[3];
+    mS = calib.data.f[4];
+
+    mInvFx = 1 / mFx;
+    mInvFy = 1 / mFy;
+
+    for (size_t i = 0; i < distortion.count; i++) {
+        mK[i] = distortion.data.f[i];
+    }
+
+    mValidMapping = true;
+    // Need to recalculate grid
+    mValidGrids = false;
+
+    return OK;
+}
+
+status_t DistortionMapper::mapRawToCorrected(int32_t *coordPairs, int coordCount) {
+    if (!mValidMapping) return INVALID_OPERATION;
+
+    if (!mValidGrids) {
+        status_t res = buildGrids();
+        if (res != OK) return res;
+    }
+
+    for (int i = 0; i < coordCount * 2; i += 2) {
+        const GridQuad *quad = findEnclosingQuad(coordPairs + i, mDistortedGrid);
+        if (quad == nullptr) {
+            ALOGE("Raw to corrected mapping failure: No quad found for (%d, %d)",
+                    *(coordPairs + i), *(coordPairs + i + 1));
+            return INVALID_OPERATION;
+        }
+        ALOGV("src xy: %d, %d, enclosing quad: (%f, %f), (%f, %f), (%f, %f), (%f, %f)",
+                coordPairs[i], coordPairs[i+1],
+                quad->coords[0], quad->coords[1],
+                quad->coords[2], quad->coords[3],
+                quad->coords[4], quad->coords[5],
+                quad->coords[6], quad->coords[7]);
+
+        const GridQuad *corrQuad = quad->src;
+        if (corrQuad == nullptr) {
+            ALOGE("Raw to corrected mapping failure: No src quad found");
+            return INVALID_OPERATION;
+        }
+        ALOGV("              corr quad: (%f, %f), (%f, %f), (%f, %f), (%f, %f)",
+                corrQuad->coords[0], corrQuad->coords[1],
+                corrQuad->coords[2], corrQuad->coords[3],
+                corrQuad->coords[4], corrQuad->coords[5],
+                corrQuad->coords[6], corrQuad->coords[7]);
+
+        float u = calculateUorV(coordPairs + i, *quad, /*calculateU*/ true);
+        float v = calculateUorV(coordPairs + i, *quad, /*calculateU*/ false);
+
+        ALOGV("uv: %f, %f", u, v);
+
+        // Interpolate along top edge of corrected quad (which are axis-aligned) for x
+        float corrX = corrQuad->coords[0] + u * (corrQuad->coords[2] - corrQuad->coords[0]);
+        // Interpolate along left edge of corrected quad (which are axis-aligned) for y
+        float corrY = corrQuad->coords[1] + v * (corrQuad->coords[7] - corrQuad->coords[1]);
+
+        coordPairs[i] = static_cast<int32_t>(std::round(corrX));
+        coordPairs[i + 1] = static_cast<int32_t>(std::round(corrY));
+    }
+
+    return OK;
+}
+
+status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount) {
+    if (!mValidMapping) return INVALID_OPERATION;
+    for (int i = 0; i < rectCount * 4; i += 4) {
+        // Map from (l, t, width, height) to (l, t, r, b)
+        int32_t coords[4] = {
+            rects[i],
+            rects[i + 1],
+            rects[i] + rects[i + 2],
+            rects[i + 1] + rects[i + 3]
+        };
+
+        mapRawToCorrected(coords, 2);
+
+        // Map back to (l, t, width, height)
+        rects[i] = coords[0];
+        rects[i + 1] = coords[1];
+        rects[i + 2] = coords[2] - coords[0];
+        rects[i + 3] = coords[3] - coords[1];
+    }
+
+    return OK;
+}
+
+template<typename T>
+status_t DistortionMapper::mapCorrectedToRaw(T *coordPairs, int coordCount) const {
+    if (!mValidMapping) return INVALID_OPERATION;
+
+    for (int i = 0; i < coordCount * 2; i += 2) {
+        // Move to normalized space
+        float ywi = (coordPairs[i + 1] - mCy) * mInvFy;
+        float xwi = (coordPairs[i] - mCx - mS * ywi) * mInvFx;
+        // Apply distortion model to calculate raw image coordinates
+        float rSq = xwi * xwi + ywi * ywi;
+        float Fr = 1.f + (mK[0] * rSq) + (mK[1] * rSq * rSq) + (mK[2] * rSq * rSq * rSq);
+        float xc = xwi * Fr + (mK[3] * 2 * xwi * ywi) + mK[4] * (rSq + 2 * xwi * xwi);
+        float yc = ywi * Fr + (mK[4] * 2 * xwi * ywi) + mK[3] * (rSq + 2 * ywi * ywi);
+        // Move back to image space
+        float xr = mFx * xc + mS * yc + mCx;
+        float yr = mFy * yc + mCy;
+
+        coordPairs[i] = static_cast<T>(std::round(xr));
+        coordPairs[i + 1] = static_cast<T>(std::round(yr));
+    }
+
+    return OK;
+}
+
+template status_t DistortionMapper::mapCorrectedToRaw(int32_t*, int) const;
+template status_t DistortionMapper::mapCorrectedToRaw(float*, int) const;
+
+status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount) const {
+    if (!mValidMapping) return INVALID_OPERATION;
+
+    for (int i = 0; i < rectCount * 4; i += 4) {
+        // Map from (l, t, width, height) to (l, t, r, b)
+        int32_t coords[4] = {
+            rects[i],
+            rects[i + 1],
+            rects[i] + rects[i + 2],
+            rects[i + 1] + rects[i + 3]
+        };
+
+        mapCorrectedToRaw(coords, 2);
+
+        // Map back to (l, t, width, height)
+        rects[i] = coords[0];
+        rects[i + 1] = coords[1];
+        rects[i + 2] = coords[2] - coords[0];
+        rects[i + 3] = coords[3] - coords[1];
+    }
+
+    return OK;
+}
+
+status_t DistortionMapper::buildGrids() {
+    if (mCorrectedGrid.size() != kGridSize * kGridSize) {
+        mCorrectedGrid.resize(kGridSize * kGridSize);
+        mDistortedGrid.resize(kGridSize * kGridSize);
+    }
+
+    float gridMargin = mArrayWidth * kGridMargin;
+    float gridSpacingX = (mArrayWidth + 2 * gridMargin) / kGridSize;
+    float gridSpacingY = (mArrayHeight + 2 * gridMargin) / kGridSize;
+
+    size_t index = 0;
+    float x = -gridMargin;
+    for (size_t i = 0; i < kGridSize; i++, x += gridSpacingX) {
+        float y = -gridMargin;
+        for (size_t j = 0; j < kGridSize; j++, y += gridSpacingY, index++) {
+            mCorrectedGrid[index].src = nullptr;
+            mCorrectedGrid[index].coords = {
+                x, y,
+                x + gridSpacingX, y,
+                x + gridSpacingX, y + gridSpacingY,
+                x, y + gridSpacingY
+            };
+            mDistortedGrid[index].src = &mCorrectedGrid[index];
+            mDistortedGrid[index].coords = mCorrectedGrid[index].coords;
+            status_t res = mapCorrectedToRaw(mDistortedGrid[index].coords.data(), 4);
+            if (res != OK) return res;
+        }
+    }
+
+    mValidGrids = true;
+    return OK;
+}
+
+const DistortionMapper::GridQuad* DistortionMapper::findEnclosingQuad(
+        const int32_t pt[2], const std::vector<GridQuad>& grid) {
+    const float x = pt[0];
+    const float y = pt[1];
+
+    for (const GridQuad& quad : grid) {
+        const float &x1 = quad.coords[0];
+        const float &y1 = quad.coords[1];
+        const float &x2 = quad.coords[2];
+        const float &y2 = quad.coords[3];
+        const float &x3 = quad.coords[4];
+        const float &y3 = quad.coords[5];
+        const float &x4 = quad.coords[6];
+        const float &y4 = quad.coords[7];
+
+        // Point-in-quad test:
+
+        // Quad has corners P1-P4; if P is within the quad, then it is on the same side of all the
+        // edges (or on top of one of the edges or corners), traversed in a consistent direction.
+        // This means that the cross product of edge En = Pn->P(n+1 mod 4) and line Ep = Pn->P must
+        // have the same sign (or be zero) for all edges.
+        // For clockwise traversal, the sign should be negative or zero for Ep x En, indicating that
+        // En is to the left of Ep, or overlapping.
+        float s1 = (x - x1) * (y2 - y1) - (y - y1) * (x2 - x1);
+        if (s1 > 0) continue;
+        float s2 = (x - x2) * (y3 - y2) - (y - y2) * (x3 - x2);
+        if (s2 > 0) continue;
+        float s3 = (x - x3) * (y4 - y3) - (y - y3) * (x4 - x3);
+        if (s3 > 0) continue;
+        float s4 = (x - x4) * (y1 - y4) - (y - y4) * (x1 - x4);
+        if (s4 > 0) continue;
+
+        return &quad;
+    }
+    return nullptr;
+}
+
+float DistortionMapper::calculateUorV(const int32_t pt[2], const GridQuad& quad, bool calculateU) {
+    const float x = pt[0];
+    const float y = pt[1];
+    const float &x1 = quad.coords[0];
+    const float &y1 = quad.coords[1];
+    const float &x2 = calculateU ? quad.coords[2] : quad.coords[6];
+    const float &y2 = calculateU ? quad.coords[3] : quad.coords[7];
+    const float &x3 = quad.coords[4];
+    const float &y3 = quad.coords[5];
+    const float &x4 = calculateU ? quad.coords[6] : quad.coords[2];
+    const float &y4 = calculateU ? quad.coords[7] : quad.coords[3];
+
+    float a = (x1 - x2) * (y1 - y2 + y3 - y4) - (y1 - y2) * (x1 - x2 + x3 - x4);
+    float b = (x - x1) * (y1 - y2 + y3 - y4) + (x1 - x2) * (y4 - y1) -
+              (y - y1) * (x1 - x2 + x3 - x4) - (y1 - y2) * (x4 - x1);
+    float c = (x - x1) * (y4 - y1) - (y - y1) * (x4 - x1);
+
+    if (a == 0) {
+        // One solution may happen if edges are parallel
+        float u0 = -c / b;
+        ALOGV("u0: %.9g, b: %f, c: %f", u0, b, c);
+        return u0;
+    }
+
+    float det = b * b - 4 * a * c;
+    if (det < 0) {
+        // Sanity check - should not happen if pt is within the quad
+        ALOGE("Bad determinant! a: %f, b: %f, c: %f, det: %f", a,b,c,det);
+        return -1;
+    }
+
+    // Select more numerically stable solution
+    float sqdet = b > 0 ? -std::sqrt(det) : std::sqrt(det);
+
+    float u1 = (-b + sqdet) / (2 * a);
+    ALOGV("u1: %.9g", u1);
+    if (0 - kFloatFuzz < u1 && u1 < 1 + kFloatFuzz) return u1;
+
+    float u2 = c / (a * u1);
+    ALOGV("u2: %.9g", u2);
+    if (0 - kFloatFuzz < u2 && u2 < 1 + kFloatFuzz) return u2;
+
+    // Last resort, return the smaller-magnitude solution
+    return fabs(u1) < fabs(u2) ? u1 : u2;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.h b/services/camera/libcameraservice/device3/DistortionMapper.h
new file mode 100644
index 0000000..00cbd32
--- /dev/null
+++ b/services/camera/libcameraservice/device3/DistortionMapper.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_DISTORTIONMAPPER_H
+#define ANDROID_SERVERS_DISTORTIONMAPPER_H
+
+#include <utils/Errors.h>
+#include <array>
+#include <mutex>
+
+#include "camera/CameraMetadata.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Utilities to transform between raw (distorted) and warped (corrected) coordinate systems
+ * for cameras that support geometric distortion
+ */
+class DistortionMapper {
+  public:
+    DistortionMapper();
+
+    /**
+     * Check whether distortion correction is supported by the camera HAL
+     */
+    static bool isDistortionSupported(const CameraMetadata &deviceInfo);
+
+    /**
+     * Update static lens calibration info from camera characteristics
+     */
+    status_t setupStaticInfo(const CameraMetadata &deviceInfo);
+
+    /**
+     * Return whether distortion correction can be applied currently
+     */
+    bool calibrationValid() const;
+
+    /**
+     * Correct capture request if distortion correction is enabled
+     */
+    status_t correctCaptureRequest(CameraMetadata *request);
+
+    /**
+     * Correct capture result if distortion correction is enabled
+     */
+    status_t correctCaptureResult(CameraMetadata *request);
+
+
+  public: // Visible for testing. Not guarded by mutex; do not use concurrently
+    /**
+     * Update lens calibration from capture results or equivalent
+     */
+    status_t updateCalibration(const CameraMetadata &result);
+
+    /**
+     * Transform from distorted (original) to corrected (warped) coordinates.
+     * Coordinates are transformed in-place
+     *
+     *   coordPairs: A pointer to an array of consecutive (x,y) points
+     *   coordCount: Number of (x,y) pairs to transform
+     */
+    status_t mapRawToCorrected(int32_t *coordPairs, int coordCount);
+
+    /**
+     * Transform from distorted (original) to corrected (warped) coordinates.
+     * Coordinates are transformed in-place
+     *
+     *   rects: A pointer to an array of consecutive (x,y, w, h) rectangles
+     *   rectCount: Number of rectangles to transform
+     */
+    status_t mapRawRectToCorrected(int32_t *rects, int rectCount);
+
+    /**
+     * Transform from corrected (warped) to distorted (original) coordinates.
+     * Coordinates are transformed in-place
+     *
+     *   coordPairs: A pointer to an array of consecutive (x,y) points
+     *   coordCount: Number of (x,y) pairs to transform
+     */
+    template<typename T>
+    status_t mapCorrectedToRaw(T* coordPairs, int coordCount) const;
+
+    /**
+     * Transform from corrected (warped) to distorted (original) coordinates.
+     * Coordinates are transformed in-place
+     *
+     *   rects: A pointer to an array of consecutive (x,y, w, h) rectangles
+     *   rectCount: Number of rectangles to transform
+     */
+    status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount) const;
+
+    struct GridQuad {
+        // Source grid quad, or null
+        const GridQuad *src;
+        // x,y coordinates of corners, in
+        // clockwise order
+        std::array<float, 8> coords;
+    };
+
+    // Find which grid quad encloses the point; returns null if none do
+    static const GridQuad* findEnclosingQuad(
+            const int32_t pt[2], const std::vector<GridQuad>& grid);
+
+    // Calculate 'horizontal' interpolation coordinate for the point and the quad
+    // Assumes the point P is within the quad Q.
+    // Given quad with points P1-P4, and edges E12-E41, and considering the edge segments as
+    // functions of U: E12(u), where E12(0) = P1 and E12(1) = P2, then we want to find a u
+    // such that the edge E12(u) -> E43(u) contains point P.
+    // This can be determined by checking if the cross product of vector [E12(u)-E43(u)] and
+    // vector [E12(u)-P] is zero. Solving the equation
+    // [E12(u)-E43(u)] x [E12(u)-P] = 0 gives a quadratic equation in u; the solution in the range
+    // 0 to 1 is the one chosen.
+    // If calculateU is true, then an interpolation coordinate for edges E12 and E43 is found;
+    // if it is false, then an interpolation coordinate for edges E14 and E23 is found.
+    static float calculateUorV(const int32_t pt[2], const GridQuad& quad, bool calculateU);
+
+  private:
+    mutable std::mutex mMutex;
+
+    // Number of quads in each dimension of the mapping grids
+    constexpr static size_t kGridSize = 15;
+    // Margin to expand the grid by to ensure it doesn't clip the domain
+    constexpr static float kGridMargin = 0.05f;
+    // Fuzziness for float inequality tests
+    constexpr static float kFloatFuzz = 1e-4;
+
+    // Metadata key lists to correct
+
+    // Both capture request and result
+    static const std::array<uint32_t, 3> kMeteringRegionsToCorrect;
+
+    // Only capture request
+    static const std::array<uint32_t, 1> kRequestRectsToCorrect;
+
+    // Only capture result
+    static const std::array<uint32_t, 1> kResultRectsToCorrect;
+
+    // Only for capture results
+    static const std::array<uint32_t, 2> kResultPointsToCorrect;
+
+    // Utility to create reverse mapping grids
+    status_t buildGrids();
+
+
+    bool mValidMapping;
+    bool mValidGrids;
+
+    // intrisic parameters, in pixels
+    float mFx, mFy, mCx, mCy, mS;
+    // pre-calculated inverses for speed
+    float mInvFx, mInvFy;
+    // radial/tangential distortion parameters
+    float mK[5];
+
+    // pre-correction active array dimensions
+    int mArrayWidth, mArrayHeight;
+    // active array dimensions
+    int mActiveWidth, mActiveHeight;
+
+    std::vector<GridQuad> mCorrectedGrid;
+    std::vector<GridQuad> mDistortedGrid;
+
+}; // class DistortionMapper
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index 37a05c2..f77069c 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -18,6 +18,7 @@
 LOCAL_SRC_FILES:= $(call all-cpp-files-under, .)
 
 LOCAL_SHARED_LIBRARIES := \
+    libbase \
     libcutils \
     libcameraservice \
     libhidlbase \
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index c1d6e85..ef93d9a 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -190,6 +190,7 @@
             hardware::camera::common::V1_0::CameraDeviceStatus) override {}
     void onTorchStatusChanged(const String8 &,
             hardware::camera::common::V1_0::TorchModeStatus) override {}
+    void onNewProviderRegistered() override {}
 };
 
 TEST(CameraProviderManagerTest, InitializeTest) {
diff --git a/services/camera/libcameraservice/tests/DistortionMapperComp.py b/services/camera/libcameraservice/tests/DistortionMapperComp.py
new file mode 100644
index 0000000..dea36a7
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DistortionMapperComp.py
@@ -0,0 +1,47 @@
+# Calculates comparison output values for DistortionMapperTest.cpp:CompareToOpenCV
+#
+# Assumes a python that has numpy and cv2 (OpenCV) available
+
+import numpy as np
+import cv2
+
+Fx = 1000
+Fy = 1000
+Cx = 500
+Cy = 500
+# s = 0 - not supported by OpenCV
+
+K = np.array([[Fx, 0, Cx],[0, Fy, Cy],[0, 0, 1]])
+
+# Order is k1, k2, t1, t2, k3
+dist = np.array([0.1, -0.003, 0.02, 0.01, 0.004])
+
+np.random.seed(1234)
+
+activeArray = np.array([[1000, 750]])
+
+rawCoords = np.floor(np.random.rand(1000,2) * activeArray)
+
+# OpenCV needs either row count or col count = 1 for some reason
+rawCoords2 = rawCoords.reshape(-1, 1, 2)
+
+# P is the output camera matrix, K is the input; use the same for both
+expCoords = cv2.undistortPoints(rawCoords2, K, dist, P = K)
+
+with open('DistortionMapperTest_OpenCvData.h','w') as f:
+  f.write('// Generated by DistortionMapperComp.py\n');
+  f.write('// for use by DistortionMapperTest.cpp\n\n');
+
+  f.write('namespace openCvData {\n')
+  f.write('std::array<int32_t, %d> rawCoords = {\n' % (rawCoords.shape[0] * rawCoords.shape[1]))
+  for i in range(rawCoords.shape[0]):
+    f.write('  %d, %d,\n' % (rawCoords[i][0], rawCoords[i][1]))
+  f.write('};\n')
+
+  f.write('std::array<int32_t, %d> expCoords = {\n' % (expCoords.shape[0] * expCoords.shape[2]))
+  for i in range(expCoords.shape[0]):
+    f.write('  %d, %d,\n' % (expCoords[i][0][0], expCoords[i][0][1]))
+  f.write('};\n')
+  f.write('} // namespace openCvData\n')
+
+print "DistortionMapperTest_OpenCvData.h generated"
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
new file mode 100644
index 0000000..b489931
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "DistortionMapperTest"
+
+#include <random>
+
+#include <gtest/gtest.h>
+#include <android-base/stringprintf.h>
+#include <android-base/chrono_utils.h>
+
+#include "../device3/DistortionMapper.h"
+
+using namespace android;
+using namespace android::camera3;
+
+
+int32_t testActiveArray[] = {100, 100, 1000, 750};
+
+float testICal[] = { 1000.f, 1000.f, 500.f, 500.f, 0.f };
+
+float identityDistortion[] = { 0.f, 0.f, 0.f, 0.f, 0.f};
+
+std::array<int32_t, 12> basicCoords = {
+    0, 0,
+    testActiveArray[2] - 1, 0,
+    testActiveArray[2] - 1,  testActiveArray[3] - 1,
+    0, testActiveArray[3] - 1,
+    testActiveArray[2] / 2, testActiveArray[3] / 2,
+    251, 403  // A particularly bad coordinate for current grid count/array size
+};
+
+
+void setupTestMapper(DistortionMapper *m, float distortion[5]) {
+    CameraMetadata deviceInfo;
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+            testActiveArray, 4);
+
+    deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
+            testICal, 5);
+
+    deviceInfo.update(ANDROID_LENS_DISTORTION,
+            distortion, 5);
+
+    m->setupStaticInfo(deviceInfo);
+}
+
+TEST(DistortionMapperTest, Initialization) {
+    CameraMetadata deviceInfo;
+
+    ASSERT_FALSE(DistortionMapper::isDistortionSupported(deviceInfo));
+
+    uint8_t distortionModes[] =
+            {ANDROID_DISTORTION_CORRECTION_MODE_OFF,
+             ANDROID_DISTORTION_CORRECTION_MODE_FAST,
+             ANDROID_DISTORTION_CORRECTION_MODE_HIGH_QUALITY};
+
+    deviceInfo.update(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES,
+            distortionModes, 1);
+
+    ASSERT_FALSE(DistortionMapper::isDistortionSupported(deviceInfo));
+
+    deviceInfo.update(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES,
+            distortionModes, 3);
+
+    ASSERT_TRUE(DistortionMapper::isDistortionSupported(deviceInfo));
+
+    DistortionMapper m;
+
+    ASSERT_FALSE(m.calibrationValid());
+
+    ASSERT_NE(m.setupStaticInfo(deviceInfo), OK);
+
+    ASSERT_FALSE(m.calibrationValid());
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+            testActiveArray, 4);
+
+    deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
+            testICal, 5);
+
+    deviceInfo.update(ANDROID_LENS_DISTORTION,
+            identityDistortion, 5);
+
+    ASSERT_EQ(m.setupStaticInfo(deviceInfo), OK);
+
+    ASSERT_TRUE(m.calibrationValid());
+
+    CameraMetadata captureResult;
+
+    ASSERT_NE(m.updateCalibration(captureResult), OK);
+
+    captureResult.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
+            testICal, 5);
+    captureResult.update(ANDROID_LENS_DISTORTION,
+            identityDistortion, 5);
+
+    ASSERT_EQ(m.updateCalibration(captureResult), OK);
+
+}
+
+TEST(DistortionMapperTest, IdentityTransform) {
+    status_t res;
+
+    DistortionMapper m;
+    setupTestMapper(&m, identityDistortion);
+
+    auto coords = basicCoords;
+    res = m.mapCorrectedToRaw(coords.data(), 5);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < coords.size(); i++) {
+        EXPECT_EQ(coords[i], basicCoords[i]);
+    }
+
+    res = m.mapRawToCorrected(coords.data(), 5);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < coords.size(); i++) {
+        EXPECT_EQ(coords[i], basicCoords[i]);
+    }
+
+    std::array<int32_t, 8> rects = {
+        0, 0, 100, 100,
+        testActiveArray[2] - 100, testActiveArray[3]-100, 100, 100
+    };
+
+    auto rectsOrig = rects;
+    res = m.mapCorrectedRectToRaw(rects.data(), 2);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < rects.size(); i++) {
+        EXPECT_EQ(rects[i], rectsOrig[i]);
+    }
+
+    res = m.mapRawRectToCorrected(rects.data(), 2);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < rects.size(); i++) {
+        EXPECT_EQ(rects[i], rectsOrig[i]);
+    }
+}
+
+TEST(DistortionMapperTest, LargeTransform) {
+    status_t res;
+    constexpr int maxAllowedPixelError = 2; // Maximum per-pixel error allowed
+    constexpr int bucketsPerPixel = 3; // Histogram granularity
+
+    unsigned int seed = 1234; // Ensure repeatability for debugging
+    const size_t coordCount = 1e6; // Number of random test points
+
+    float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
+
+    DistortionMapper m;
+    setupTestMapper(&m, bigDistortion);
+
+    std::default_random_engine gen(seed);
+
+    std::uniform_int_distribution<int> x_dist(0, testActiveArray[2] - 1);
+    std::uniform_int_distribution<int> y_dist(0, testActiveArray[3] - 1);
+
+    std::vector<int32_t> randCoords(coordCount * 2);
+
+    for (size_t i = 0; i < randCoords.size(); i += 2) {
+        randCoords[i] = x_dist(gen);
+        randCoords[i + 1] = y_dist(gen);
+    }
+
+    randCoords.insert(randCoords.end(), basicCoords.begin(), basicCoords.end());
+
+    auto origCoords = randCoords;
+
+    base::Timer correctedToRawTimer;
+    res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2);
+    auto correctedToRawDurationMs = correctedToRawTimer.duration();
+    EXPECT_EQ(res, OK);
+
+    base::Timer rawToCorrectedTimer;
+    res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2);
+    auto rawToCorrectedDurationMs = rawToCorrectedTimer.duration();
+    EXPECT_EQ(res, OK);
+
+    float correctedToRawDurationPerCoordUs =
+            (std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(
+                correctedToRawDurationMs) / (randCoords.size() / 2) ).count();
+    float rawToCorrectedDurationPerCoordUs =
+            (std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(
+                rawToCorrectedDurationMs) / (randCoords.size() / 2) ).count();
+
+    RecordProperty("CorrectedToRawDurationPerCoordUs",
+            base::StringPrintf("%f", correctedToRawDurationPerCoordUs));
+    RecordProperty("RawToCorrectedDurationPerCoordUs",
+            base::StringPrintf("%f", rawToCorrectedDurationPerCoordUs));
+
+    // Calculate mapping errors after round trip
+    float totalErrorSq = 0;
+    // Basic histogram; buckets go from [N to N+1)
+    std::array<int, maxAllowedPixelError * bucketsPerPixel> histogram = {0};
+    int outOfHistogram = 0;
+
+    for (size_t i = 0; i < randCoords.size(); i += 2) {
+        int xOrig = origCoords[i];
+        int yOrig = origCoords[i + 1];
+        int xMapped = randCoords[i];
+        int yMapped = randCoords[i + 1];
+
+        float errorSq = (xMapped - xOrig) * (xMapped - xOrig) +
+                (yMapped - yOrig) * (yMapped - yOrig);
+
+        EXPECT_LE(errorSq, maxAllowedPixelError * maxAllowedPixelError) << "( " <<
+                xOrig << "," << yOrig << ") -> (" << xMapped << "," << yMapped << ")";
+
+        // Note: Integer coordinates, so histogram will be clumpy; error distances can only be of
+        // form sqrt(X^2+Y^2) where X, Y are integers, so:
+        //    0, 1, sqrt(2), 2, sqrt(5), sqrt(8), 3, sqrt(10), sqrt(13), 4 ...
+        totalErrorSq += errorSq;
+        float errorDist = std::sqrt(errorSq);
+        if (errorDist < maxAllowedPixelError) {
+            int histBucket = static_cast<int>(errorDist * bucketsPerPixel); // rounds down
+            histogram[histBucket]++;
+        } else {
+            outOfHistogram++;
+        }
+    }
+
+    float rmsError = std::sqrt(totalErrorSq / randCoords.size());
+    RecordProperty("RmsError", base::StringPrintf("%f", rmsError));
+    for (size_t i = 0; i < histogram.size(); i++) {
+        std::string label = base::StringPrintf("HistogramBin[%f,%f)",
+                (float)i/bucketsPerPixel, (float)(i + 1)/bucketsPerPixel);
+        RecordProperty(label, histogram[i]);
+    }
+    RecordProperty("HistogramOutOfRange", outOfHistogram);
+}
+
+// Compare against values calculated by OpenCV
+// undistortPoints() method, which is the same as mapRawToCorrected
+// See script DistortionMapperComp.py
+#include "DistortionMapperTest_OpenCvData.h"
+
+TEST(DistortionMapperTest, CompareToOpenCV) {
+    status_t res;
+
+    float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
+
+    // Expect to match within sqrt(2) radius pixels
+    const int32_t maxSqError = 2;
+
+    DistortionMapper m;
+    setupTestMapper(&m, bigDistortion);
+
+    using namespace openCvData;
+
+    res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2);
+
+    for (size_t i = 0; i < rawCoords.size(); i+=2) {
+        int32_t dist = (rawCoords[i] - expCoords[i]) * (rawCoords[i] - expCoords[i]) +
+               (rawCoords[i + 1] - expCoords[i + 1]) * (rawCoords[i + 1] - expCoords[i + 1]);
+        EXPECT_LE(dist, maxSqError)
+                << "(" << rawCoords[i] << ", " << rawCoords[i + 1] << ") != ("
+                << expCoords[i] << ", " << expCoords[i + 1] << ")";
+    }
+}
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest_OpenCvData.h b/services/camera/libcameraservice/tests/DistortionMapperTest_OpenCvData.h
new file mode 100644
index 0000000..f996bd5
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest_OpenCvData.h
@@ -0,0 +1,2009 @@
+// Generated by DistortionMapperComp.py
+// for use by DistortionMapperTest.cpp
+
+namespace openCvData {
+std::array<int32_t, 2000> rawCoords = {
+  191, 466,
+  437, 589,
+  779, 204,
+  276, 601,
+  958, 656,
+  357, 375,
+  683, 534,
+  370, 420,
+  503, 10,
+  772, 661,
+  364, 461,
+  75, 276,
+  933, 488,
+  397, 591,
+  316, 426,
+  869, 327,
+  802, 107,
+  704, 528,
+  218, 693,
+  442, 681,
+  59, 138,
+  47, 506,
+  594, 399,
+  43, 421,
+  329, 377,
+  111, 455,
+  565, 5,
+  617, 684,
+  790, 744,
+  958, 593,
+  285, 468,
+  478, 146,
+  382, 40,
+  451, 736,
+  123, 89,
+  738, 440,
+  471, 80,
+  229, 674,
+  416, 401,
+  6, 225,
+  436, 459,
+  918, 469,
+  705, 112,
+  746, 623,
+  633, 328,
+  152, 426,
+  528, 713,
+  480, 376,
+  536, 614,
+  57, 502,
+  767, 531,
+  796, 418,
+  965, 110,
+  29, 445,
+  114, 713,
+  325, 145,
+  457, 690,
+  879, 189,
+  348, 136,
+  901, 529,
+  726, 675,
+  779, 449,
+  291, 113,
+  335, 493,
+  73, 41,
+  323, 442,
+  853, 215,
+  173, 100,
+  994, 134,
+  317, 426,
+  9, 675,
+  977, 417,
+  84, 249,
+  728, 106,
+  552, 204,
+  974, 500,
+  255, 81,
+  776, 586,
+  761, 685,
+  658, 426,
+  201, 523,
+  952, 667,
+  993, 614,
+  545, 338,
+  890, 729,
+  593, 274,
+  323, 653,
+  215, 551,
+  365, 601,
+  782, 526,
+  622, 370,
+  840, 534,
+  443, 23,
+  363, 548,
+  475, 258,
+  640, 94,
+  171, 552,
+  127, 277,
+  604, 77,
+  802, 709,
+  979, 660,
+  627, 697,
+  724, 537,
+  41, 329,
+  282, 251,
+  83, 570,
+  509, 495,
+  630, 278,
+  446, 311,
+  480, 737,
+  373, 9,
+  921, 654,
+  351, 472,
+  357, 159,
+  223, 314,
+  72, 488,
+  755, 699,
+  376, 222,
+  371, 620,
+  901, 320,
+  2, 31,
+  141, 448,
+  106, 222,
+  58, 465,
+  22, 641,
+  306, 569,
+  563, 28,
+  565, 494,
+  183, 598,
+  612, 416,
+  629, 514,
+  240, 590,
+  856, 545,
+  692, 354,
+  856, 498,
+  333, 388,
+  394, 23,
+  576, 637,
+  951, 512,
+  76, 307,
+  41, 130,
+  565, 129,
+  566, 385,
+  868, 540,
+  903, 208,
+  256, 524,
+  901, 737,
+  640, 247,
+  606, 616,
+  627, 88,
+  285, 740,
+  431, 430,
+  527, 148,
+  165, 375,
+  990, 295,
+  714, 131,
+  70, 118,
+  161, 212,
+  536, 579,
+  880, 478,
+  852, 448,
+  693, 402,
+  889, 39,
+  783, 108,
+  58, 44,
+  51, 386,
+  404, 749,
+  108, 245,
+  997, 300,
+  883, 426,
+  953, 664,
+  779, 23,
+  962, 389,
+  207, 656,
+  224, 103,
+  725, 730,
+  535, 333,
+  22, 454,
+  964, 725,
+  930, 138,
+  622, 309,
+  363, 27,
+  868, 504,
+  87, 665,
+  782, 237,
+  818, 380,
+  21, 325,
+  446, 179,
+  830, 558,
+  586, 369,
+  487, 200,
+  605, 565,
+  270, 391,
+  98, 535,
+  884, 425,
+  994, 134,
+  12, 342,
+  931, 634,
+  473, 676,
+  225, 228,
+  714, 543,
+  18, 214,
+  580, 698,
+  338, 90,
+  516, 524,
+  298, 646,
+  905, 576,
+  261, 703,
+  938, 558,
+  910, 177,
+  494, 607,
+  954, 478,
+  910, 519,
+  42, 625,
+  369, 702,
+  483, 93,
+  964, 12,
+  676, 105,
+  155, 487,
+  981, 521,
+  761, 318,
+  138, 162,
+  764, 40,
+  490, 135,
+  630, 413,
+  567, 613,
+  938, 144,
+  712, 523,
+  258, 686,
+  532, 418,
+  322, 253,
+  332, 734,
+  203, 500,
+  574, 38,
+  542, 155,
+  91, 652,
+  27, 726,
+  327, 307,
+  135, 95,
+  413, 463,
+  132, 730,
+  180, 570,
+  482, 576,
+  302, 11,
+  463, 527,
+  66, 501,
+  345, 443,
+  229, 200,
+  932, 619,
+  145, 485,
+  883, 556,
+  515, 101,
+  39, 625,
+  117, 392,
+  873, 29,
+  692, 357,
+  169, 47,
+  32, 181,
+  112, 303,
+  42, 694,
+  935, 607,
+  188, 440,
+  903, 725,
+  67, 238,
+  696, 480,
+  632, 621,
+  713, 251,
+  167, 573,
+  359, 66,
+  729, 660,
+  41, 131,
+  420, 255,
+  44, 84,
+  438, 53,
+  816, 649,
+  131, 144,
+  437, 728,
+  773, 98,
+  927, 620,
+  392, 105,
+  52, 69,
+  886, 126,
+  362, 490,
+  604, 296,
+  374, 47,
+  428, 539,
+  768, 145,
+  862, 21,
+  902, 177,
+  128, 238,
+  848, 624,
+  345, 179,
+  535, 203,
+  34, 470,
+  520, 31,
+  757, 741,
+  801, 257,
+  335, 263,
+  442, 434,
+  617, 132,
+  864, 532,
+  170, 641,
+  19, 481,
+  162, 193,
+  342, 4,
+  438, 597,
+  675, 408,
+  564, 10,
+  608, 741,
+  224, 440,
+  835, 594,
+  329, 267,
+  960, 167,
+  211, 115,
+  495, 195,
+  555, 54,
+  378, 345,
+  778, 540,
+  231, 18,
+  257, 307,
+  8, 353,
+  852, 692,
+  231, 743,
+  5, 251,
+  789, 73,
+  442, 285,
+  832, 533,
+  355, 18,
+  693, 315,
+  858, 431,
+  940, 660,
+  458, 12,
+  68, 240,
+  592, 457,
+  512, 108,
+  876, 553,
+  373, 621,
+  90, 48,
+  505, 700,
+  148, 427,
+  59, 126,
+  69, 679,
+  447, 79,
+  665, 376,
+  409, 545,
+  172, 288,
+  267, 231,
+  177, 361,
+  629, 44,
+  196, 209,
+  707, 245,
+  425, 528,
+  159, 329,
+  629, 693,
+  356, 614,
+  640, 536,
+  738, 728,
+  246, 31,
+  247, 33,
+  705, 626,
+  934, 353,
+  512, 197,
+  98, 599,
+  142, 604,
+  879, 374,
+  914, 309,
+  200, 482,
+  399, 460,
+  437, 444,
+  951, 414,
+  903, 103,
+  1, 459,
+  541, 585,
+  821, 715,
+  609, 217,
+  826, 282,
+  609, 465,
+  441, 149,
+  443, 693,
+  671, 61,
+  572, 90,
+  999, 748,
+  694, 280,
+  401, 693,
+  244, 498,
+  481, 26,
+  110, 671,
+  892, 686,
+  307, 223,
+  659, 446,
+  984, 461,
+  154, 623,
+  815, 69,
+  887, 12,
+  863, 674,
+  850, 489,
+  328, 409,
+  237, 653,
+  176, 277,
+  759, 229,
+  616, 164,
+  872, 485,
+  473, 175,
+  833, 73,
+  205, 176,
+  596, 471,
+  982, 132,
+  799, 116,
+  360, 716,
+  130, 204,
+  809, 724,
+  92, 437,
+  405, 674,
+  84, 135,
+  50, 225,
+  190, 6,
+  127, 84,
+  730, 179,
+  901, 246,
+  874, 177,
+  378, 406,
+  515, 310,
+  388, 255,
+  402, 342,
+  382, 493,
+  881, 429,
+  428, 193,
+  865, 129,
+  579, 545,
+  730, 302,
+  117, 572,
+  533, 541,
+  597, 317,
+  933, 745,
+  547, 547,
+  27, 647,
+  998, 243,
+  850, 458,
+  441, 395,
+  779, 188,
+  462, 635,
+  678, 275,
+  137, 302,
+  931, 504,
+  419, 426,
+  41, 746,
+  619, 584,
+  931, 256,
+  811, 654,
+  665, 441,
+  893, 336,
+  244, 610,
+  697, 307,
+  5, 715,
+  605, 4,
+  498, 448,
+  300, 346,
+  495, 439,
+  869, 624,
+  778, 411,
+  613, 550,
+  566, 581,
+  986, 591,
+  614, 118,
+  476, 212,
+  153, 582,
+  58, 59,
+  664, 392,
+  446, 230,
+  17, 220,
+  267, 27,
+  804, 250,
+  891, 607,
+  964, 718,
+  591, 233,
+  918, 37,
+  225, 272,
+  412, 708,
+  579, 140,
+  596, 700,
+  134, 736,
+  153, 615,
+  677, 303,
+  217, 580,
+  847, 397,
+  409, 13,
+  148, 603,
+  419, 254,
+  297, 538,
+  997, 413,
+  889, 126,
+  883, 527,
+  422, 647,
+  235, 422,
+  26, 285,
+  361, 68,
+  45, 256,
+  355, 746,
+  944, 98,
+  518, 357,
+  401, 697,
+  515, 607,
+  881, 572,
+  464, 55,
+  470, 150,
+  208, 133,
+  354, 683,
+  433, 133,
+  752, 37,
+  82, 28,
+  465, 452,
+  181, 389,
+  710, 693,
+  529, 728,
+  547, 4,
+  472, 391,
+  152, 490,
+  130, 340,
+  982, 99,
+  60, 50,
+  96, 614,
+  629, 587,
+  77, 728,
+  882, 472,
+  929, 298,
+  488, 514,
+  281, 507,
+  495, 593,
+  218, 559,
+  454, 306,
+  922, 113,
+  130, 286,
+  541, 708,
+  323, 73,
+  947, 642,
+  26, 88,
+  829, 103,
+  569, 358,
+  306, 42,
+  936, 678,
+  722, 490,
+  392, 730,
+  711, 369,
+  326, 86,
+  972, 205,
+  187, 161,
+  760, 708,
+  501, 496,
+  347, 96,
+  681, 293,
+  26, 375,
+  528, 167,
+  1, 334,
+  505, 60,
+  822, 180,
+  9, 168,
+  84, 619,
+  714, 183,
+  63, 320,
+  706, 538,
+  193, 555,
+  956, 386,
+  430, 17,
+  507, 514,
+  138, 504,
+  39, 323,
+  854, 316,
+  88, 42,
+  103, 363,
+  674, 68,
+  832, 582,
+  61, 241,
+  377, 376,
+  449, 350,
+  104, 280,
+  21, 336,
+  893, 581,
+  249, 548,
+  315, 372,
+  50, 436,
+  282, 220,
+  126, 669,
+  451, 488,
+  809, 212,
+  273, 289,
+  421, 699,
+  867, 333,
+  29, 80,
+  196, 178,
+  824, 672,
+  27, 429,
+  805, 315,
+  525, 214,
+  658, 67,
+  822, 605,
+  191, 478,
+  832, 352,
+  580, 81,
+  462, 664,
+  464, 349,
+  196, 29,
+  615, 423,
+  108, 556,
+  183, 261,
+  480, 482,
+  621, 570,
+  286, 369,
+  681, 382,
+  768, 224,
+  546, 183,
+  443, 607,
+  103, 172,
+  791, 424,
+  827, 731,
+  965, 712,
+  551, 69,
+  740, 423,
+  745, 341,
+  155, 746,
+  889, 602,
+  411, 159,
+  294, 467,
+  248, 599,
+  18, 360,
+  734, 512,
+  421, 519,
+  367, 174,
+  785, 545,
+  706, 23,
+  239, 278,
+  581, 65,
+  232, 609,
+  752, 603,
+  294, 585,
+  224, 217,
+  848, 558,
+  332, 425,
+  699, 68,
+  53, 647,
+  629, 652,
+  87, 649,
+  41, 718,
+  227, 563,
+  400, 302,
+  253, 380,
+  184, 42,
+  366, 539,
+  474, 691,
+  170, 538,
+  869, 96,
+  974, 565,
+  916, 28,
+  285, 617,
+  274, 38,
+  147, 12,
+  782, 261,
+  749, 41,
+  78, 592,
+  370, 83,
+  405, 488,
+  436, 151,
+  443, 556,
+  96, 383,
+  843, 745,
+  630, 214,
+  126, 10,
+  338, 363,
+  546, 27,
+  61, 17,
+  507, 199,
+  445, 730,
+  797, 213,
+  555, 148,
+  790, 65,
+  837, 180,
+  434, 320,
+  102, 681,
+  149, 680,
+  10, 130,
+  839, 232,
+  848, 683,
+  899, 650,
+  837, 190,
+  843, 463,
+  984, 457,
+  651, 490,
+  552, 139,
+  980, 71,
+  748, 393,
+  290, 171,
+  503, 698,
+  574, 742,
+  429, 312,
+  627, 680,
+  69, 412,
+  154, 538,
+  135, 3,
+  537, 12,
+  535, 34,
+  153, 632,
+  797, 227,
+  398, 336,
+  20, 463,
+  804, 175,
+  400, 369,
+  501, 250,
+  105, 480,
+  151, 146,
+  57, 686,
+  830, 119,
+  867, 380,
+  128, 84,
+  222, 667,
+  450, 522,
+  390, 466,
+  716, 375,
+  760, 624,
+  559, 407,
+  587, 18,
+  989, 53,
+  817, 102,
+  153, 269,
+  253, 164,
+  563, 360,
+  93, 385,
+  197, 360,
+  277, 7,
+  887, 280,
+  416, 658,
+  760, 411,
+  902, 690,
+  465, 424,
+  28, 105,
+  399, 620,
+  455, 520,
+  637, 491,
+  769, 0,
+  300, 521,
+  90, 392,
+  894, 722,
+  705, 573,
+  344, 188,
+  667, 111,
+  470, 16,
+  759, 154,
+  840, 581,
+  176, 663,
+  93, 151,
+  372, 130,
+  345, 425,
+  156, 581,
+  33, 8,
+  320, 395,
+  629, 661,
+  641, 17,
+  695, 663,
+  751, 197,
+  507, 93,
+  608, 519,
+  77, 303,
+  513, 605,
+  98, 354,
+  567, 401,
+  184, 440,
+  785, 748,
+  52, 32,
+  528, 452,
+  82, 532,
+  116, 147,
+  779, 341,
+  308, 275,
+  763, 135,
+  137, 375,
+  14, 260,
+  337, 378,
+  492, 262,
+  202, 119,
+  561, 334,
+  855, 683,
+  876, 724,
+  202, 544,
+  571, 437,
+  456, 436,
+  67, 4,
+  468, 592,
+  922, 540,
+  125, 539,
+  615, 290,
+  785, 76,
+  402, 556,
+  12, 696,
+  460, 52,
+  909, 92,
+  894, 153,
+  931, 373,
+  360, 120,
+  726, 626,
+  318, 733,
+  472, 424,
+  146, 74,
+  86, 564,
+  742, 236,
+  845, 400,
+  832, 139,
+  275, 437,
+  929, 42,
+  818, 123,
+  439, 274,
+  65, 590,
+  512, 132,
+  520, 443,
+  444, 107,
+  961, 313,
+  130, 488,
+  587, 191,
+  287, 603,
+  56, 208,
+  936, 628,
+  908, 445,
+  773, 258,
+  383, 283,
+  425, 530,
+  244, 133,
+  216, 543,
+  631, 595,
+  785, 108,
+  87, 192,
+  640, 427,
+  889, 688,
+  152, 89,
+  10, 209,
+  122, 343,
+  188, 5,
+  896, 748,
+  806, 22,
+  535, 457,
+  851, 307,
+  261, 566,
+  791, 590,
+  947, 300,
+  658, 394,
+  418, 305,
+  371, 632,
+  470, 438,
+  165, 410,
+  538, 380,
+  643, 408,
+  318, 591,
+  564, 311,
+  327, 690,
+  930, 8,
+  93, 100,
+  627, 196,
+  582, 416,
+  200, 492,
+  943, 267,
+  31, 355,
+  67, 374,
+  692, 57,
+  229, 373,
+  542, 371,
+  801, 230,
+  114, 420,
+  769, 326,
+  83, 448,
+  846, 137,
+  912, 77,
+  126, 3,
+  784, 420,
+  660, 391,
+  795, 188,
+  530, 42,
+  137, 106,
+  663, 80,
+  757, 340,
+  694, 267,
+  768, 612,
+  926, 155,
+  600, 25,
+  292, 31,
+  97, 225,
+  60, 437,
+  724, 563,
+  698, 85,
+  286, 196,
+  66, 1,
+  269, 25,
+  467, 405,
+  204, 171,
+  653, 14,
+  299, 360,
+  521, 719,
+  760, 602,
+  329, 282,
+  687, 530,
+  110, 200,
+  30, 300,
+  6, 501,
+  868, 281,
+  281, 76,
+  805, 363,
+  876, 114,
+  219, 549,
+  65, 611,
+  859, 23,
+  66, 354,
+  205, 169,
+  434, 174,
+  828, 668,
+  814, 720,
+  663, 34,
+  875, 707,
+  969, 561,
+  932, 66,
+  834, 548,
+  961, 86,
+  263, 148,
+  145, 202,
+  83, 146,
+  947, 727,
+  3, 138,
+  927, 514,
+  814, 742,
+  80, 430,
+  866, 184,
+  593, 731,
+  193, 219,
+  496, 490,
+  606, 530,
+  314, 334,
+  301, 327,
+  50, 715,
+  178, 57,
+  936, 626,
+  972, 617,
+  33, 427,
+  147, 435,
+  83, 341,
+  859, 244,
+  337, 688,
+  637, 124,
+  874, 71,
+  590, 474,
+  332, 120,
+  640, 290,
+  816, 171,
+  665, 431,
+  79, 31,
+  857, 110,
+  103, 79,
+  293, 397,
+  866, 651,
+  356, 73,
+  438, 710,
+  41, 233,
+  782, 596,
+  852, 407,
+  590, 104,
+  34, 116,
+  756, 276,
+  282, 181,
+  871, 275,
+  888, 712,
+  872, 279,
+  645, 324,
+  730, 524,
+  430, 302,
+  601, 486,
+  114, 529,
+  359, 317,
+  313, 426,
+  33, 732,
+  970, 211,
+  657, 582,
+  945, 501,
+  450, 630,
+  822, 697,
+  702, 600,
+  958, 289,
+  732, 96,
+  205, 662,
+  695, 533,
+  369, 433,
+  83, 445,
+  176, 315,
+  239, 95,
+  895, 682,
+  628, 118,
+  730, 741,
+  779, 734,
+  804, 314,
+  465, 567,
+  810, 106,
+  81, 268,
+  968, 518,
+  22, 159,
+  726, 504,
+  38, 269,
+  751, 649,
+  954, 659,
+};
+std::array<int32_t, 2000> expCoords = {
+  190, 464,
+  437, 588,
+  774, 203,
+  276, 599,
+  939, 646,
+  356, 373,
+  681, 533,
+  369, 419,
+  500, 7,
+  765, 655,
+  363, 460,
+  75, 272,
+  920, 484,
+  397, 590,
+  315, 424,
+  861, 326,
+  795, 107,
+  701, 526,
+  220, 688,
+  442, 678,
+  59, 134,
+  50, 501,
+  593, 398,
+  44, 417,
+  327, 375,
+  111, 452,
+  562, 3,
+  614, 680,
+  780, 734,
+  941, 586,
+  284, 467,
+  476, 142,
+  379, 36,
+  451, 731,
+  122, 85,
+  735, 439,
+  469, 76,
+  231, 670,
+  415, 400,
+  8, 221,
+  435, 458,
+  906, 466,
+  701, 111,
+  741, 619,
+  632, 327,
+  151, 423,
+  527, 709,
+  479, 375,
+  535, 612,
+  59, 498,
+  762, 529,
+  791, 417,
+  948, 113,
+  31, 441,
+  119, 705,
+  323, 141,
+  457, 687,
+  869, 189,
+  346, 132,
+  890, 525,
+  721, 670,
+  775, 448,
+  288, 108,
+  334, 492,
+  74, 38,
+  322, 441,
+  845, 215,
+  171, 96,
+  975, 137,
+  316, 425,
+  17, 665,
+  961, 414,
+  83, 245,
+  723, 105,
+  551, 201,
+  958, 495,
+  253, 77,
+  770, 583,
+  754, 679,
+  657, 425,
+  201, 521,
+  934, 657,
+  973, 605,
+  544, 336,
+  875, 717,
+  592, 272,
+  323, 650,
+  215, 549,
+  365, 600,
+  777, 524,
+  621, 369,
+  832, 531,
+  440, 19,
+  362, 547,
+  474, 255,
+  637, 92,
+  171, 549,
+  126, 273,
+  601, 74,
+  792, 701,
+  959, 649,
+  624, 692,
+  721, 535,
+  42, 325,
+  280, 247,
+  86, 565,
+  508, 494,
+  629, 276,
+  445, 309,
+  479, 732,
+  370, 5,
+  905, 645,
+  350, 471,
+  355, 155,
+  221, 311,
+  73, 484,
+  748, 692,
+  374, 218,
+  371, 618,
+  891, 319,
+  6, 29,
+  141, 445,
+  105, 218,
+  60, 461,
+  28, 633,
+  306, 567,
+  560, 25,
+  564, 493,
+  184, 595,
+  611, 415,
+  628, 513,
+  240, 587,
+  847, 541,
+  690, 353,
+  848, 495,
+  331, 386,
+  391, 19,
+  575, 635,
+  936, 507,
+  76, 303,
+  42, 126,
+  563, 126,
+  565, 384,
+  858, 536,
+  892, 209,
+  255, 522,
+  884, 724,
+  639, 245,
+  604, 614,
+  624, 86,
+  287, 734,
+  430, 429,
+  525, 145,
+  164, 372,
+  974, 295,
+  710, 130,
+  70, 114,
+  159, 208,
+  535, 578,
+  871, 475,
+  844, 446,
+  691, 401,
+  876, 43,
+  777, 108,
+  59, 41,
+  52, 382,
+  404, 744,
+  107, 241,
+  980, 300,
+  874, 424,
+  935, 654,
+  771, 24,
+  948, 387,
+  209, 652,
+  222, 99,
+  718, 722,
+  534, 331,
+  25, 449,
+  943, 711,
+  916, 140,
+  621, 307,
+  360, 23,
+  859, 501,
+  92, 658,
+  777, 236,
+  812, 379,
+  22, 321,
+  444, 175,
+  822, 554,
+  585, 368,
+  486, 197,
+  604, 564,
+  268, 389,
+  100, 531,
+  875, 423,
+  975, 137,
+  14, 338,
+  915, 626,
+  472, 673,
+  223, 224,
+  711, 541,
+  19, 210,
+  578, 694,
+  336, 85,
+  515, 523,
+  298, 643,
+  892, 570,
+  263, 698,
+  923, 552,
+  898, 178,
+  493, 606,
+  940, 474,
+  898, 515,
+  47, 618,
+  369, 698,
+  481, 89,
+  945, 18,
+  673, 103,
+  155, 484,
+  964, 516,
+  757, 317,
+  136, 158,
+  757, 41,
+  488, 131,
+  629, 412,
+  566, 611,
+  924, 146,
+  709, 521,
+  259, 682,
+  531, 417,
+  320, 250,
+  333, 729,
+  202, 498,
+  571, 35,
+  540, 152,
+  95, 645,
+  36, 715,
+  325, 304,
+  134, 91,
+  412, 462,
+  137, 722,
+  181, 567,
+  481, 575,
+  300, 7,
+  462, 526,
+  68, 497,
+  344, 442,
+  227, 196,
+  917, 611,
+  145, 482,
+  872, 551,
+  513, 97,
+  44, 618,
+  116, 389,
+  861, 32,
+  690, 356,
+  168, 43,
+  33, 177,
+  111, 299,
+  49, 685,
+  920, 600,
+  187, 438,
+  887, 713,
+  67, 234,
+  694, 479,
+  630, 619,
+  710, 250,
+  168, 570,
+  356, 62,
+  724, 655,
+  42, 127,
+  418, 252,
+  45, 81,
+  436, 49,
+  807, 643,
+  130, 140,
+  437, 723,
+  767, 98,
+  912, 612,
+  390, 101,
+  53, 66,
+  875, 128,
+  361, 489,
+  603, 294,
+  371, 43,
+  427, 538,
+  763, 144,
+  850, 24,
+  891, 178,
+  126, 234,
+  838, 618,
+  343, 175,
+  534, 200,
+  36, 466,
+  517, 28,
+  749, 732,
+  796, 256,
+  333, 260,
+  441, 433,
+  615, 129,
+  855, 528,
+  172, 636,
+  22, 476,
+  160, 189,
+  339, 0,
+  438, 596,
+  673, 407,
+  561, 8,
+  605, 735,
+  223, 438,
+  826, 589,
+  327, 264,
+  945, 169,
+  209, 111,
+  494, 192,
+  552, 51,
+  377, 343,
+  773, 537,
+  229, 14,
+  255, 304,
+  10, 349,
+  840, 683,
+  234, 736,
+  7, 247,
+  782, 73,
+  441, 282,
+  824, 530,
+  352, 14,
+  691, 314,
+  850, 429,
+  923, 650,
+  455, 8,
+  68, 236,
+  591, 456,
+  510, 104,
+  866, 549,
+  373, 619,
+  90, 45,
+  504, 696,
+  147, 424,
+  59, 122,
+  75, 671,
+  445, 75,
+  664, 375,
+  408, 544,
+  170, 284,
+  265, 227,
+  175, 358,
+  625, 42,
+  194, 205,
+  704, 244,
+  424, 527,
+  157, 326,
+  626, 689,
+  356, 612,
+  638, 535,
+  731, 720,
+  244, 27,
+  245, 29,
+  701, 623,
+  922, 352,
+  511, 194,
+  101, 594,
+  144, 600,
+  870, 373,
+  903, 308,
+  199, 480,
+  398, 459,
+  436, 443,
+  938, 412,
+  891, 105,
+  4, 454,
+  540, 584,
+  810, 706,
+  608, 215,
+  820, 281,
+  608, 464,
+  439, 145,
+  443, 690,
+  667, 60,
+  570, 87,
+  974, 731,
+  692, 279,
+  401, 690,
+  243, 496,
+  478, 22,
+  114, 664,
+  878, 676,
+  305, 219,
+  658, 445,
+  967, 457,
+  156, 619,
+  807, 70,
+  873, 16,
+  851, 666,
+  842, 486,
+  327, 407,
+  238, 649,
+  174, 273,
+  755, 228,
+  614, 161,
+  863, 482,
+  471, 171,
+  824, 74,
+  203, 172,
+  595, 470,
+  964, 135,
+  792, 116,
+  361, 712,
+  128, 200,
+  799, 715,
+  92, 434,
+  405, 671,
+  84, 131,
+  50, 221,
+  189, 3,
+  126, 80,
+  726, 178,
+  891, 246,
+  865, 178,
+  377, 405,
+  514, 308,
+  386, 252,
+  401, 340,
+  381, 492,
+  872, 427,
+  426, 189,
+  855, 130,
+  578, 544,
+  727, 301,
+  119, 568,
+  532, 540,
+  596, 315,
+  914, 731,
+  546, 546,
+  33, 639,
+  981, 244,
+  842, 456,
+  440, 394,
+  774, 187,
+  462, 633,
+  676, 273,
+  135, 298,
+  918, 500,
+  418, 425,
+  50, 734,
+  617, 582,
+  919, 256,
+  802, 648,
+  664, 440,
+  884, 335,
+  244, 607,
+  695, 306,
+  14, 704,
+  601, 2,
+  497, 447,
+  298, 344,
+  494, 438,
+  858, 618,
+  774, 410,
+  612, 549,
+  565, 580,
+  967, 583,
+  612, 115,
+  475, 209,
+  154, 578,
+  59, 56,
+  663, 391,
+  444, 227,
+  18, 216,
+  265, 23,
+  799, 249,
+  879, 601,
+  943, 704,
+  590, 231,
+  903, 41,
+  223, 268,
+  412, 704,
+  577, 137,
+  594, 696,
+  139, 727,
+  155, 611,
+  675, 302,
+  217, 577,
+  840, 396,
+  406, 9,
+  150, 599,
+  417, 251,
+  296, 537,
+  980, 410,
+  878, 128,
+  873, 523,
+  422, 645,
+  234, 420,
+  27, 281,
+  358, 64,
+  45, 252,
+  356, 740,
+  929, 101,
+  517, 356,
+  401, 694,
+  514, 606,
+  870, 567,
+  462, 51,
+  468, 146,
+  206, 129,
+  354, 680,
+  431, 129,
+  745, 37,
+  83, 25,
+  464, 451,
+  180, 386,
+  705, 687,
+  528, 723,
+  544, 1,
+  471, 390,
+  152, 487,
+  129, 337,
+  964, 103,
+  61, 47,
+  99, 609,
+  627, 585,
+  84, 718,
+  873, 469,
+  917, 298,
+  488, 513,
+  280, 506,
+  494, 592,
+  218, 557,
+  453, 304,
+  908, 115,
+  129, 282,
+  540, 704,
+  320, 69,
+  930, 633,
+  28, 85,
+  821, 104,
+  568, 357,
+  304, 38,
+  919, 667,
+  719, 489,
+  392, 725,
+  709, 368,
+  323, 81,
+  956, 206,
+  185, 157,
+  753, 701,
+  500, 495,
+  345, 92,
+  679, 292,
+  27, 371,
+  526, 164,
+  3, 330,
+  503, 56,
+  815, 180,
+  11, 164,
+  88, 613,
+  711, 182,
+  63, 316,
+  703, 536,
+  193, 552,
+  942, 384,
+  427, 13,
+  506, 513,
+  138, 501,
+  40, 319,
+  847, 315,
+  88, 39,
+  102, 360,
+  670, 66,
+  824, 578,
+  61, 237,
+  376, 374,
+  448, 348,
+  103, 276,
+  22, 332,
+  881, 576,
+  249, 546,
+  313, 370,
+  51, 432,
+  280, 216,
+  130, 663,
+  450, 487,
+  803, 211,
+  271, 286,
+  421, 696,
+  859, 332,
+  31, 77,
+  194, 174,
+  814, 665,
+  29, 425,
+  800, 314,
+  524, 211,
+  654, 65,
+  814, 600,
+  190, 476,
+  826, 351,
+  577, 78,
+  462, 662,
+  463, 347,
+  194, 25,
+  614, 422,
+  110, 552,
+  181, 257,
+  479, 481,
+  619, 569,
+  284, 367,
+  679, 381,
+  764, 223,
+  545, 180,
+  443, 606,
+  102, 168,
+  786, 423,
+  816, 721,
+  944, 699,
+  548, 66,
+  737, 422,
+  742, 340,
+  160, 737,
+  877, 596,
+  409, 155,
+  293, 466,
+  248, 596,
+  20, 356,
+  731, 510,
+  420, 518,
+  365, 170,
+  779, 542,
+  700, 23,
+  237, 275,
+  578, 62,
+  233, 606,
+  747, 599,
+  294, 583,
+  222, 213,
+  839, 554,
+  331, 424,
+  694, 67,
+  58, 640,
+  627, 649,
+  91, 642,
+  49, 708,
+  227, 561,
+  398, 299,
+  251, 378,
+  182, 38,
+  365, 538,
+  473, 688,
+  170, 535,
+  858, 98,
+  956, 558,
+  901, 33,
+  285, 615,
+  272, 34,
+  146, 9,
+  778, 260,
+  743, 41,
+  81, 587,
+  368, 79,
+  404, 487,
+  434, 147,
+  443, 555,
+  96, 380,
+  830, 734,
+  628, 212,
+  126, 7,
+  336, 361,
+  543, 24,
+  62, 15,
+  506, 196,
+  445, 725,
+  792, 212,
+  553, 145,
+  783, 66,
+  829, 180,
+  433, 318,
+  107, 674,
+  152, 674,
+  12, 127,
+  832, 232,
+  837, 675,
+  885, 642,
+  830, 190,
+  836, 461,
+  967, 453,
+  650, 489,
+  550, 136,
+  961, 76,
+  745, 392,
+  288, 167,
+  502, 694,
+  572, 736,
+  428, 310,
+  624, 676,
+  70, 408,
+  155, 535,
+  135, 0,
+  534, 9,
+  532, 31,
+  155, 627,
+  792, 226,
+  397, 334,
+  23, 458,
+  798, 175,
+  399, 367,
+  500, 247,
+  106, 477,
+  149, 142,
+  63, 677,
+  822, 120,
+  859, 379,
+  127, 80,
+  224, 663,
+  449, 521,
+  389, 465,
+  714, 374,
+  754, 620,
+  558, 406,
+  584, 16,
+  969, 59,
+  809, 103,
+  151, 265,
+  251, 160,
+  562, 359,
+  93, 382,
+  195, 357,
+  275, 3,
+  878, 280,
+  416, 656,
+  756, 410,
+  887, 680,
+  464, 423,
+  30, 102,
+  399, 618,
+  454, 519,
+  636, 490,
+  761, 2,
+  299, 520,
+  90, 388,
+  878, 710,
+  702, 571,
+  342, 184,
+  664, 109,
+  467, 12,
+  754, 153,
+  831, 577,
+  178, 658,
+  92, 147,
+  370, 126,
+  344, 424,
+  157, 577,
+  36, 7,
+  318, 393,
+  626, 658,
+  637, 16,
+  691, 659,
+  747, 196,
+  505, 89,
+  607, 518,
+  77, 299,
+  512, 604,
+  97, 350,
+  566, 400,
+  183, 438,
+  775, 738,
+  54, 30,
+  527, 451,
+  84, 528,
+  115, 143,
+  775, 340,
+  306, 272,
+  758, 134,
+  136, 372,
+  15, 256,
+  335, 376,
+  491, 259,
+  200, 115,
+  560, 332,
+  843, 675,
+  862, 713,
+  202, 541,
+  570, 436,
+  455, 435,
+  68, 2,
+  468, 591,
+  909, 535,
+  126, 535,
+  614, 288,
+  778, 76,
+  402, 555,
+  20, 686,
+  458, 48,
+  896, 95,
+  883, 154,
+  919, 372,
+  358, 116,
+  721, 622,
+  319, 728,
+  471, 423,
+  145, 70,
+  88, 559,
+  739, 235,
+  838, 399,
+  824, 139,
+  274, 435,
+  913, 46,
+  810, 123,
+  438, 271,
+  69, 584,
+  510, 128,
+  519, 442,
+  442, 103,
+  947, 312,
+  130, 485,
+  585, 188,
+  287, 601,
+  56, 204,
+  920, 620,
+  897, 443,
+  769, 257,
+  381, 280,
+  424, 529,
+  242, 129,
+  216, 541,
+  629, 593,
+  779, 108,
+  86, 188,
+  639, 426,
+  875, 678,
+  151, 85,
+  12, 205,
+  121, 340,
+  187, 2,
+  879, 735,
+  797, 24,
+  534, 456,
+  844, 306,
+  261, 564,
+  784, 586,
+  934, 300,
+  657, 393,
+  417, 303,
+  371, 630,
+  469, 437,
+  164, 407,
+  537, 379,
+  642, 407,
+  318, 589,
+  563, 309,
+  328, 686,
+  913, 14,
+  93, 96,
+  625, 194,
+  581, 415,
+  199, 490,
+  930, 267,
+  32, 351,
+  67, 370,
+  687, 56,
+  227, 370,
+  541, 370,
+  796, 229,
+  114, 417,
+  765, 325,
+  84, 444,
+  837, 138,
+  898, 80,
+  126, 0,
+  780, 419,
+  659, 390,
+  789, 187,
+  527, 39,
+  136, 102,
+  659, 78,
+  754, 339,
+  692, 266,
+  762, 608,
+  913, 157,
+  597, 23,
+  290, 27,
+  96, 221,
+  61, 433,
+  720, 561,
+  694, 84,
+  284, 192,
+  68, 0,
+  267, 21,
+  466, 404,
+  202, 167,
+  649, 13,
+  297, 358,
+  520, 715,
+  755, 598,
+  327, 279,
+  685, 529,
+  109, 196,
+  31, 296,
+  10, 496,
+  860, 280,
+  279, 72,
+  800, 362,
+  865, 116,
+  219, 547,
+  69, 605,
+  847, 26,
+  66, 350,
+  203, 165,
+  432, 170,
+  818, 661,
+  804, 711,
+  659, 33,
+  861, 697,
+  952, 555,
+  917, 70,
+  826, 544,
+  944, 90,
+  260, 144,
+  143, 198,
+  83, 142,
+  927, 713,
+  5, 135,
+  914, 510,
+  803, 732,
+  81, 426,
+  857, 184,
+  590, 726,
+  191, 215,
+  495, 489,
+  605, 529,
+  312, 332,
+  299, 324,
+  57, 705,
+  176, 53,
+  920, 618,
+  953, 608,
+  35, 423,
+  146, 432,
+  83, 337,
+  851, 244,
+  338, 684,
+  634, 122,
+  863, 73,
+  589, 473,
+  330, 116,
+  639, 288,
+  809, 171,
+  664, 430,
+  80, 28,
+  847, 111,
+  103, 75,
+  291, 395,
+  854, 644,
+  353, 69,
+  438, 706,
+  41, 229,
+  776, 592,
+  845, 405,
+  588, 101,
+  35, 113,
+  752, 275,
+  280, 177,
+  863, 275,
+  873, 701,
+  864, 279,
+  644, 323,
+  727, 522,
+  429, 300,
+  600, 485,
+  115, 525,
+  357, 315,
+  312, 424,
+  42, 721,
+  955, 212,
+  655, 580,
+  931, 497,
+  450, 628,
+  812, 689,
+  698, 597,
+  944, 289,
+  727, 95,
+  207, 657,
+  692, 531,
+  368, 432,
+  84, 441,
+  174, 312,
+  237, 91,
+  881, 672,
+  625, 116,
+  723, 733,
+  770, 725,
+  799, 313,
+  465, 566,
+  803, 106,
+  80, 264,
+  952, 513,
+  23, 155,
+  723, 502,
+  39, 265,
+  745, 644,
+  936, 649,
+};
+} // namespace openCvData
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 6d84a42..4b05395 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -481,6 +481,7 @@
 
 static std::string allowedKeys[] =
 {
+    "audiopolicy",
     "audiorecord",
     "audiotrack",
     "codec",
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 8631c39..db5f0ff 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -27,9 +27,8 @@
 include $(CLEAR_VARS)
 # seccomp is not required for coverage build.
 ifneq ($(NATIVE_COVERAGE),true)
-LOCAL_REQUIRED_MODULES := crash_dump.policy
-LOCAL_REQUIRED_MODULES_arm := mediacodec.policy
-LOCAL_REQUIRED_MODULES_x86 := mediacodec.policy
+LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediacodec.policy
+LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediacodec.policy
 endif
 LOCAL_SRC_FILES := main_codecservice.cpp
 LOCAL_SHARED_LIBRARIES := \
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 701ca6e..51619f6 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -25,6 +25,9 @@
 #include <media/stagefright/omx/1.0/Omx.h>
 #include <media/stagefright/omx/1.0/OmxStore.h>
 
+#include <media/CodecServiceRegistrant.h>
+#include <dlfcn.h>
+
 using namespace android;
 
 // Must match location in Android.mk.
@@ -45,20 +48,37 @@
 
     ::android::hardware::configureRpcThreadpool(64, false);
 
-    using namespace ::android::hardware::media::omx::V1_0;
-    sp<IOmxStore> omxStore = new implementation::OmxStore();
-    if (omxStore == nullptr) {
-        LOG(ERROR) << "Cannot create IOmxStore HAL service.";
-    } else if (omxStore->registerAsService() != OK) {
-        LOG(ERROR) << "Cannot register IOmxStore HAL service.";
-    }
-    sp<IOmx> omx = new implementation::Omx();
-    if (omx == nullptr) {
-        LOG(ERROR) << "Cannot create IOmx HAL service.";
-    } else if (omx->registerAsService() != OK) {
-        LOG(ERROR) << "Cannot register IOmx HAL service.";
+    // Registration of customized codec services
+    void *registrantLib = dlopen(
+            "libmedia_codecserviceregistrant.so",
+            RTLD_NOW | RTLD_LOCAL);
+    if (registrantLib) {
+        RegisterCodecServicesFunc registerCodecServices =
+                reinterpret_cast<RegisterCodecServicesFunc>(
+                dlsym(registrantLib, "RegisterCodecServices"));
+        if (registerCodecServices) {
+            registerCodecServices();
+        } else {
+            LOG(WARNING) << "Cannot register additional services "
+                    "-- corrupted library.";
+        }
     } else {
-        LOG(INFO) << "IOmx HAL service created.";
+        // Default codec services
+        using namespace ::android::hardware::media::omx::V1_0;
+        sp<IOmxStore> omxStore = new implementation::OmxStore();
+        if (omxStore == nullptr) {
+            LOG(ERROR) << "Cannot create IOmxStore HAL service.";
+        } else if (omxStore->registerAsService() != OK) {
+            LOG(ERROR) << "Cannot register IOmxStore HAL service.";
+        }
+        sp<IOmx> omx = new implementation::Omx();
+        if (omx == nullptr) {
+            LOG(ERROR) << "Cannot create IOmx HAL service.";
+        } else if (omx->registerAsService() != OK) {
+            LOG(ERROR) << "Cannot register IOmx HAL service.";
+        } else {
+            LOG(INFO) << "IOmx HAL service created.";
+        }
     }
 
     ::android::hardware::joinRpcThreadpool();
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 5b7571c..73c9535 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -15,10 +15,10 @@
 # service executable
 include $(CLEAR_VARS)
 # seccomp filters are defined for the following architectures:
-LOCAL_REQUIRED_MODULES := crash_dump.policy
-LOCAL_REQUIRED_MODULES_arm := mediaextractor.policy
-LOCAL_REQUIRED_MODULES_arm64 := mediaextractor.policy
-LOCAL_REQUIRED_MODULES_x86 := mediaextractor.policy
+LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaextractor.policy
+LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaextractor.policy
+LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaextractor.policy
+LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaextractor.policy
 
 # extractor libraries
 LOCAL_REQUIRED_MODULES += \
@@ -32,7 +32,6 @@
     libmpeg2extractor \
     liboggextractor \
     libwavextractor \
-    MediaComponents \
 
 LOCAL_SRC_FILES := main_extractorservice.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
@@ -45,7 +44,7 @@
 include $(BUILD_EXECUTABLE)
 
 # service seccomp filter
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86))
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86 x86_64))
 include $(CLEAR_VARS)
 LOCAL_MODULE := mediaextractor.policy
 LOCAL_MODULE_CLASS := ETC
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
new file mode 100644
index 0000000..6d9ed6f
--- /dev/null
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
@@ -0,0 +1,51 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+munmap: 1
+mmap: 1
+madvise: 1
+openat: 1
+clock_gettime: 1
+writev: 1
+brk: 1
+mprotect: 1
+read: 1
+lseek: 1
+clone: 1
+getuid: 1
+setpriority: 1
+sigaltstack: 1
+fstatfs: 1
+newfstatat: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+faccessat: 1
+sched_setscheduler: 1
+getrlimit: 1
+nanosleep: 1
+getrandom: 1
+
+# for dynamically loading extractors
+getdents64: 1
+readlinkat: 1
+pread64: 1
+mremap: 1
+
+# for FileSource
+readlinkat: 1
+
+# Required by AddressSanitizer
+gettid: 1
+sched_yield: 1
+getpid: 1
+gettid: 1
+
+@include /system/etc/seccomp_policy/crash_dump.x86_64.policy
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index 29e6dfc..ca96f62 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -9,7 +9,9 @@
     shared_libs: [
         "libaudioutils",
         "libbinder",
+        "libcutils",
         "liblog",
+        "libmediautils",
         "libnbaio",
         "libnblog",
         "libutils",
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index 1be5544..e58dff7 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -21,7 +21,7 @@
 #include <utils/Log.h>
 #include <binder/PermissionCache.h>
 #include <media/nblog/NBLog.h>
-#include <private/android_filesystem_config.h>
+#include <mediautils/ServiceUtilities.h>
 #include "MediaLogService.h"
 
 namespace android {
@@ -53,7 +53,7 @@
 
 void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name)
 {
-    if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0 ||
+    if (!isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid()) || shared == 0 ||
             size < kMinSize || size > kMaxSize || name == NULL ||
             shared->size() < NBLog::Timeline::sharedSize(size)) {
         return;
@@ -67,7 +67,7 @@
 
 void MediaLogService::unregisterWriter(const sp<IMemory>& shared)
 {
-    if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0) {
+    if (!isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid()) || shared == 0) {
         return;
     }
     Mutex::Autolock _l(mLock);
@@ -95,10 +95,8 @@
 
 status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused)
 {
-    // FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp
-    static const String16 sDump("android.permission.DUMP");
-    if (!(IPCThreadState::self()->getCallingUid() == AID_AUDIOSERVER ||
-            PermissionCache::checkCallingPermission(sDump))) {
+    if (!(isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid())
+            || dumpAllowed())) {
         dprintf(fd, "Permission Denial: can't dump media.log from pid=%d, uid=%d\n",
                 IPCThreadState::self()->getCallingPid(),
                 IPCThreadState::self()->getCallingUid());
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 78bb587..28bfd3f 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -31,7 +31,8 @@
 
 #include "ResourceManagerService.h"
 #include "ServiceLog.h"
-
+#include "mediautils/SchedulingPolicyService.h"
+#include <cutils/sched_policy.h>
 namespace android {
 
 namespace {
@@ -111,6 +112,7 @@
     ResourceInfo info;
     info.clientId = clientId;
     info.client = client;
+    info.cpuBoost = false;
     infos.push_back(info);
     return infos.editItemAt(infos.size() - 1);
 }
@@ -201,7 +203,8 @@
     : mProcessInfo(processInfo),
       mServiceLog(new ServiceLog()),
       mSupportsMultipleSecureCodecs(true),
-      mSupportsSecureWithNonSecureCodec(true) {}
+      mSupportsSecureWithNonSecureCodec(true),
+      mCpuBoostCount(0) {}
 
 ResourceManagerService::~ResourceManagerService() {}
 
@@ -239,6 +242,19 @@
     ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
     // TODO: do the merge instead of append.
     info.resources.appendVector(resources);
+
+    for (size_t i = 0; i < resources.size(); ++i) {
+        if (resources[i].mType == MediaResource::kCpuBoost && !info.cpuBoost) {
+            info.cpuBoost = true;
+            // Request it on every new instance of kCpuBoost, as the media.codec
+            // could have died, if we only do it the first time subsequent instances
+            // never gets the boost.
+            if (requestCpusetBoost(true, this) != OK) {
+                ALOGW("couldn't request cpuset boost");
+            }
+            mCpuBoostCount++;
+        }
+    }
     if (info.deathNotifier == nullptr) {
         info.deathNotifier = new DeathNotifier(this, pid, clientId);
         IInterface::asBinder(client)->linkToDeath(info.deathNotifier);
@@ -270,6 +286,11 @@
     ResourceInfos &infos = mMap.editValueAt(index);
     for (size_t j = 0; j < infos.size(); ++j) {
         if (infos[j].clientId == clientId) {
+            if (infos[j].cpuBoost && mCpuBoostCount > 0) {
+                if (--mCpuBoostCount == 0) {
+                    requestCpusetBoost(false, this);
+                }
+            }
             IInterface::asBinder(infos[j].client)->unlinkToDeath(infos[j].deathNotifier);
             j = infos.removeAt(j);
             found = true;
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 9e97ac0..82d2a0b 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -38,6 +38,7 @@
     sp<IResourceManagerClient> client;
     sp<IBinder::DeathRecipient> deathNotifier;
     Vector<MediaResource> resources;
+    bool cpuBoost;
 };
 
 typedef Vector<ResourceInfo> ResourceInfos;
@@ -112,6 +113,7 @@
     PidResourceInfosMap mMap;
     bool mSupportsMultipleSecureCodecs;
     bool mSupportsSecureWithNonSecureCodec;
+    int32_t mCpuBoostCount;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/services/oboeservice/AAudioClientTracker.cpp b/services/oboeservice/AAudioClientTracker.cpp
index 549a4e9..7264a9b 100644
--- a/services/oboeservice/AAudioClientTracker.cpp
+++ b/services/oboeservice/AAudioClientTracker.cpp
@@ -21,6 +21,8 @@
 
 #include <assert.h>
 #include <binder/IPCThreadState.h>
+#include <iomanip>
+#include <iostream>
 #include <map>
 #include <mutex>
 #include <utils/Singleton.h>
@@ -39,7 +41,6 @@
         : Singleton<AAudioClientTracker>() {
 }
 
-
 std::string AAudioClientTracker::dump() const {
     std::stringstream result;
     const bool isLocked = AAudio_tryUntilTrue(
@@ -198,7 +199,9 @@
 
     result << "  client: pid = " << mProcessId << " has " << mStreams.size() << " streams\n";
     for (const auto& serviceStream : mStreams) {
-        result << "     stream: 0x" << std::hex << serviceStream->getHandle() << std::dec << "\n";
+        result << "     stream: 0x" << std::setfill('0') << std::setw(8) << std::hex
+               << serviceStream->getHandle()
+               << std::dec << std::setfill(' ') << "\n";
     }
 
     if (isLocked) {
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 11fd9f6..04fee13 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -67,11 +67,17 @@
 
         result << "Exclusive MMAP Endpoints: " << mExclusiveStreams.size() << "\n";
         index = 0;
-        for (const auto &output : mExclusiveStreams) {
+        for (const auto &stream : mExclusiveStreams) {
             result << "  #" << index++ << ":";
-            result << output->dump() << "\n";
+            result << stream->dump() << "\n";
         }
 
+        result << "  ExclusiveSearchCount:  " << mExclusiveSearchCount << "\n";
+        result << "  ExclusiveFoundCount:   " << mExclusiveFoundCount << "\n";
+        result << "  ExclusiveOpenCount:    " << mExclusiveOpenCount << "\n";
+        result << "  ExclusiveCloseCount:   " << mExclusiveCloseCount << "\n";
+        result << "\n";
+
         if (isExclusiveLocked) {
             mExclusiveLock.unlock();
         }
@@ -79,11 +85,17 @@
 
     result << "Shared Endpoints: " << mSharedStreams.size() << "\n";
     index = 0;
-    for (const auto &input : mSharedStreams) {
+    for (const auto &stream : mSharedStreams) {
         result << "  #" << index++ << ":";
-        result << input->dump() << "\n";
+        result << stream->dump() << "\n";
     }
 
+    result << "  SharedSearchCount:     " << mSharedSearchCount << "\n";
+    result << "  SharedFoundCount:      " << mSharedFoundCount << "\n";
+    result << "  SharedOpenCount:       " << mSharedOpenCount << "\n";
+    result << "  SharedCloseCount:      " << mSharedCloseCount << "\n";
+    result << "\n";
+
     if (isSharedLocked) {
         mSharedLock.unlock();
     }
@@ -95,8 +107,10 @@
 sp<AAudioServiceEndpoint> AAudioEndpointManager::findExclusiveEndpoint_l(
         const AAudioStreamConfiguration &configuration) {
     sp<AAudioServiceEndpoint> endpoint;
+    mExclusiveSearchCount++;
     for (const auto ep : mExclusiveStreams) {
         if (ep->matches(configuration)) {
+            mExclusiveFoundCount++;
             endpoint = ep;
             break;
         }
@@ -111,8 +125,10 @@
 sp<AAudioServiceEndpointShared> AAudioEndpointManager::findSharedEndpoint_l(
         const AAudioStreamConfiguration &configuration) {
     sp<AAudioServiceEndpointShared> endpoint;
+    mSharedSearchCount++;
     for (const auto ep  : mSharedStreams) {
         if (ep->matches(configuration)) {
+            mSharedFoundCount++;
             endpoint = ep;
             break;
         }
@@ -134,7 +150,7 @@
 }
 
 sp<AAudioServiceEndpoint> AAudioEndpointManager::openExclusiveEndpoint(
-        AAudioService &aaudioService __unused,
+        AAudioService &aaudioService,
         const aaudio::AAudioStreamRequest &request) {
 
     std::lock_guard<std::mutex> lock(mExclusiveLock);
@@ -146,12 +162,12 @@
 
     // If we find an existing one then this one cannot be exclusive.
     if (endpoint.get() != nullptr) {
-        ALOGE("openExclusiveEndpoint() already in use");
+        ALOGW("openExclusiveEndpoint() already in use");
         // Already open so do not allow a second stream.
         return nullptr;
     } else {
-        sp<AAudioServiceEndpointMMAP> endpointMMap = new AAudioServiceEndpointMMAP();
-        ALOGD("openExclusiveEndpoint(), no match so try to open MMAP %p for dev %d",
+        sp<AAudioServiceEndpointMMAP> endpointMMap = new AAudioServiceEndpointMMAP(aaudioService);
+        ALOGV("openExclusiveEndpoint(), no match so try to open MMAP %p for dev %d",
               endpointMMap.get(), configuration.getDeviceId());
         endpoint = endpointMMap;
 
@@ -161,6 +177,7 @@
             endpoint.clear();
         } else {
             mExclusiveStreams.push_back(endpointMMap);
+            mExclusiveOpenCount++;
         }
     }
 
@@ -201,13 +218,13 @@
         if (endpoint.get() != nullptr) {
             aaudio_result_t result = endpoint->open(request);
             if (result != AAUDIO_OK) {
-                ALOGE("%s(), open failed", __func__);
                 endpoint.clear();
             } else {
                 mSharedStreams.push_back(endpoint);
+                mSharedOpenCount++;
             }
         }
-        ALOGD("%s(), created endpoint %p, requested device = %d, dir = %d",
+        ALOGV("%s(), created endpoint %p, requested device = %d, dir = %d",
               __func__, endpoint.get(), configuration.getDeviceId(), (int)direction);
         IPCThreadState::self()->restoreCallingIdentity(token);
     }
@@ -244,7 +261,8 @@
                 mExclusiveStreams.end());
 
         serviceEndpoint->close();
-        ALOGD("%s() %p for device %d",
+        mExclusiveCloseCount++;
+        ALOGV("%s() %p for device %d",
               __func__, serviceEndpoint.get(), serviceEndpoint->getDeviceId());
     }
 }
@@ -266,7 +284,8 @@
                 mSharedStreams.end());
 
         serviceEndpoint->close();
-        ALOGD("%s() %p for device %d",
+        mSharedCloseCount++;
+        ALOGV("%s() %p for device %d",
               __func__, serviceEndpoint.get(), serviceEndpoint->getDeviceId());
     }
 }
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index f6aeb5a..193bdee 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -87,8 +87,17 @@
     mutable std::mutex                                     mExclusiveLock;
     std::vector<android::sp<AAudioServiceEndpointMMAP>>    mExclusiveStreams;
 
+    // Modified under a lock.
+    int32_t mExclusiveSearchCount = 0; // number of times we SEARCHED for an exclusive endpoint
+    int32_t mExclusiveFoundCount  = 0; // number of times we FOUND an exclusive endpoint
+    int32_t mExclusiveOpenCount   = 0; // number of times we OPENED an exclusive endpoint
+    int32_t mExclusiveCloseCount  = 0; // number of times we CLOSED an exclusive endpoint
+    // Same as above but for SHARED endpoints.
+    int32_t mSharedSearchCount    = 0;
+    int32_t mSharedFoundCount     = 0;
+    int32_t mSharedOpenCount      = 0;
+    int32_t mSharedCloseCount     = 0;
 };
-
 } /* namespace aaudio */
 
 #endif //AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index ad5bb3a..94440b1 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -24,6 +24,7 @@
 
 #include <aaudio/AAudio.h>
 #include <mediautils/SchedulingPolicyService.h>
+#include <mediautils/ServiceUtilities.h>
 #include <utils/String16.h>
 
 #include "binding/AAudioServiceMessage.h"
@@ -33,7 +34,6 @@
 #include "AAudioServiceStreamMMAP.h"
 #include "AAudioServiceStreamShared.h"
 #include "binding/IAAudioService.h"
-#include "ServiceUtilities.h"
 
 using namespace android;
 using namespace aaudio;
@@ -144,15 +144,14 @@
 // If a close request is pending then close the stream
 bool AAudioService::releaseStream(const sp<AAudioServiceStreamBase> &serviceStream) {
     bool closed = false;
-    if ((serviceStream->decrementServiceReferenceCount() == 0) && serviceStream->isCloseNeeded()) {
-        // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
-        // then only one will get the pointer and do the close.
-        sp<AAudioServiceStreamBase> foundStream = mStreamTracker.removeStreamByHandle(serviceStream->getHandle());
-        if (foundStream.get() != nullptr) {
-            foundStream->close();
-            pid_t pid = foundStream->getOwnerProcessId();
-            AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
-        }
+    // decrementAndRemoveStreamByHandle() uses a lock so that if there are two simultaneous closes
+    // then only one will get the pointer and do the close.
+    sp<AAudioServiceStreamBase> foundStream = mStreamTracker.decrementAndRemoveStreamByHandle(
+            serviceStream->getHandle());
+    if (foundStream.get() != nullptr) {
+        foundStream->close();
+        pid_t pid = foundStream->getOwnerProcessId();
+        AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
         closed = true;
     }
     return closed;
@@ -175,14 +174,15 @@
     pid_t pid = serviceStream->getOwnerProcessId();
     AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
 
-    serviceStream->setCloseNeeded(true);
+    serviceStream->markCloseNeeded();
     (void) releaseStream(serviceStream);
     return AAUDIO_OK;
 }
 
 sp<AAudioServiceStreamBase> AAudioService::convertHandleToServiceStream(
         aaudio_handle_t streamHandle) {
-    sp<AAudioServiceStreamBase> serviceStream = mStreamTracker.getStreamByHandle(streamHandle);
+    sp<AAudioServiceStreamBase> serviceStream = mStreamTracker.getStreamByHandleAndIncrement(
+            streamHandle);
     if (serviceStream.get() != nullptr) {
         // Only allow owner or the aaudio service to access the stream.
         const uid_t callingUserId = IPCThreadState::self()->getCallingUid();
@@ -194,9 +194,9 @@
         if (!allowed) {
             ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
                   callingUserId, streamHandle, ownerUserId);
+            // We incremented the reference count so we must check if it needs to be closed.
+            checkForPendingClose(serviceStream, AAUDIO_OK);
             serviceStream.clear();
-        } else {
-            serviceStream->incrementServiceReferenceCount();
         }
     }
     return serviceStream;
@@ -288,11 +288,11 @@
     aaudio_result_t result = AAUDIO_OK;
     sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
     if (serviceStream.get() == nullptr) {
-        ALOGE("unregisterAudioThread(), illegal stream handle = 0x%0x", streamHandle);
+        ALOGE("%s(), illegal stream handle = 0x%0x", __func__, streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     if (serviceStream->getRegisteredThread() != clientThreadId) {
-        ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+        ALOGE("%s(), wrong thread", __func__);
         result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     } else {
         serviceStream->setRegisteredThread(0);
@@ -305,7 +305,7 @@
                                   audio_port_handle_t *clientHandle) {
     sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
     if (serviceStream.get() == nullptr) {
-        ALOGE("startClient(), illegal stream handle = 0x%0x", streamHandle);
+        ALOGE("%s(), illegal stream handle = 0x%0x", __func__, streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     aaudio_result_t result = serviceStream->startClient(client, clientHandle);
@@ -313,12 +313,27 @@
 }
 
 aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
-                                          audio_port_handle_t clientHandle) {
+                                          audio_port_handle_t portHandle) {
     sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
     if (serviceStream.get() == nullptr) {
-        ALOGE("stopClient(), illegal stream handle = 0x%0x", streamHandle);
+        ALOGE("%s(), illegal stream handle = 0x%0x", __func__, streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
-    aaudio_result_t result = serviceStream->stopClient(clientHandle);
+    aaudio_result_t result = serviceStream->stopClient(portHandle);
+    return checkForPendingClose(serviceStream, result);
+}
+
+// This is only called internally when AudioFlinger wants to tear down a stream.
+// So we do not have to check permissions.
+aaudio_result_t AAudioService::disconnectStreamByPortHandle(audio_port_handle_t portHandle) {
+    ALOGD("%s(%d) called", __func__, portHandle);
+    sp<AAudioServiceStreamBase> serviceStream =
+            mStreamTracker.findStreamByPortHandleAndIncrement(portHandle);
+    if (serviceStream.get() == nullptr) {
+        ALOGE("%s(), could not find stream with portHandle = %d", __func__, portHandle);
+        return AAUDIO_ERROR_INVALID_HANDLE;
+    }
+    aaudio_result_t result = serviceStream->stop();
+    serviceStream->disconnect();
     return checkForPendingClose(serviceStream, result);
 }
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index bdd9e0b..d21b1cd 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -83,6 +83,8 @@
     aaudio_result_t stopClient(aaudio::aaudio_handle_t streamHandle,
                                        audio_port_handle_t clientHandle) override;
 
+    aaudio_result_t disconnectStreamByPortHandle(audio_port_handle_t portHandle);
+
 private:
 
     /**
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 96e621a..0349034 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -78,6 +78,17 @@
     return result.str();
 }
 
+// @return true if stream found
+bool AAudioServiceEndpoint::isStreamRegistered(audio_port_handle_t portHandle) {
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    for (const auto stream : mRegisteredStreams) {
+        if (stream->getPortHandle() == portHandle) {
+            return true;
+        }
+    }
+    return false;
+}
+
 void AAudioServiceEndpoint::disconnectRegisteredStreams() {
     std::lock_guard<std::mutex> lock(mLockStreams);
     mConnected.store(false);
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 6312c51..253f290 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -49,9 +49,9 @@
 
     virtual aaudio_result_t close() = 0;
 
-    virtual aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
+    aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
 
-    virtual aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
+    aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
 
     virtual aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
                                         audio_port_handle_t *clientHandle) = 0;
@@ -102,6 +102,13 @@
     }
 
 protected:
+
+    /**
+     * @param portHandle
+     * @return return true if a stream with the given portHandle is registered
+     */
+    bool                     isStreamRegistered(audio_port_handle_t portHandle);
+
     void                     disconnectRegisteredStreams();
 
     mutable std::mutex       mLockStreams;
@@ -116,7 +123,6 @@
     int32_t                  mRequestedDeviceId = 0;
 
     std::atomic<bool>        mConnected{true};
-
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 52990da..f9e21fb 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -48,8 +48,10 @@
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
-AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP()
-        :  mMmapStream(nullptr) {}
+
+AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP(AAudioService &audioService)
+        : mMmapStream(nullptr)
+        , mAAudioService(audioService) {}
 
 AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {}
 
@@ -84,6 +86,7 @@
 
     const audio_content_type_t contentType =
             AAudioConvert_contentTypeToInternal(getContentType());
+    // Usage only used for OUTPUT
     const audio_usage_t usage = (direction == AAUDIO_DIRECTION_OUTPUT)
             ? AAudioConvert_usageToInternal(getUsage())
             : AUDIO_USAGE_UNKNOWN;
@@ -276,14 +279,21 @@
 }
 
 aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream,
-                                                   audio_port_handle_t *clientHandle) {
+                                                   audio_port_handle_t *clientHandle __unused) {
     // Start the client on behalf of the AAudio service.
     // Use the port handle that was provided by openMmapStream().
-    return startClient(mMmapClient, &mPortHandle);
+    audio_port_handle_t tempHandle = mPortHandle;
+    aaudio_result_t result = startClient(mMmapClient, &tempHandle);
+    // When AudioFlinger is passed a valid port handle then it should not change it.
+    LOG_ALWAYS_FATAL_IF(tempHandle != mPortHandle,
+                        "%s() port handle not expected to change from %d to %d",
+                        __func__, mPortHandle, tempHandle);
+    ALOGV("%s(%p) mPortHandle = %d", __func__, stream.get(), mPortHandle);
+    return result;
 }
 
 aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream,
-                                                  audio_port_handle_t clientHandle) {
+                                                  audio_port_handle_t clientHandle __unused) {
     mFramesTransferred.reset32();
 
     // Round 64-bit counter up to a multiple of the buffer capacity.
@@ -292,24 +302,27 @@
     // when the stream is stopped.
     mFramesTransferred.roundUp64(getBufferCapacity());
 
+    // Use the port handle that was provided by openMmapStream().
+    ALOGV("%s(%p) mPortHandle = %d", __func__, stream.get(), mPortHandle);
     return stopClient(mPortHandle);
 }
 
 aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client,
                                                        audio_port_handle_t *clientHandle) {
     if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
-    ALOGV("%s(%p(uid=%d, pid=%d))", __func__, &client, client.clientUid, client.clientPid);
+    ALOGD("%s(%p(uid=%d, pid=%d))", __func__, &client, client.clientUid, client.clientPid);
     audio_port_handle_t originalHandle =  *clientHandle;
     status_t status = mMmapStream->start(client, clientHandle);
     aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
-    ALOGV("%s() , %d => %d returns %d", __func__, originalHandle, *clientHandle, result);
+    ALOGD("%s() , portHandle %d => %d, returns %d", __func__, originalHandle, *clientHandle, result);
     return result;
 }
 
 aaudio_result_t AAudioServiceEndpointMMAP::stopClient(audio_port_handle_t clientHandle) {
+    ALOGD("%s(portHandle = %d), called", __func__, clientHandle);
     if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
     aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
-    ALOGV("%s(%d) returns %d", __func__, clientHandle, result);
+    ALOGD("%s(portHandle = %d), returns %d", __func__, clientHandle, result);
     return result;
 }
 
@@ -342,10 +355,19 @@
     return 0; // TODO
 }
 
-
-void AAudioServiceEndpointMMAP::onTearDown() {
-    ALOGD("%s(%p) called", __func__, this);
-    disconnectRegisteredStreams();
+// This is called by AudioFlinger when it wants to destroy a stream.
+void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t portHandle) {
+    ALOGD("%s(portHandle = %d) called", __func__, portHandle);
+    // Are we tearing down the EXCLUSIVE MMAP stream?
+    if (isStreamRegistered(portHandle)) {
+        ALOGD("%s(%d) tearing down this entire MMAP endpoint", __func__, portHandle);
+        disconnectRegisteredStreams();
+    } else {
+        // Must be a SHARED stream?
+        ALOGD("%s(%d) disconnect a specific stream", __func__, portHandle);
+        aaudio_result_t result = mAAudioService.disconnectStreamByPortHandle(portHandle);
+        ALOGD("%s(%d) disconnectStreamByPortHandle returned %d", __func__, portHandle, result);
+    }
 };
 
 void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels,
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 16b6269..5e815e0 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -42,7 +42,7 @@
         , public android::MmapStreamCallback {
 
 public:
-    AAudioServiceEndpointMMAP();
+    explicit AAudioServiceEndpointMMAP(android::AAudioService &audioService);
 
     virtual ~AAudioServiceEndpointMMAP();
 
@@ -68,7 +68,7 @@
     aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
 
     // -------------- Callback functions for MmapStreamCallback ---------------------
-    void onTearDown() override;
+    void onTearDown(audio_port_handle_t handle) override;
 
     void onVolumeChanged(audio_channel_mask_t channels,
                          android::Vector<float> values) override;
@@ -88,8 +88,12 @@
     // Interface to the AudioFlinger MMAP support.
     android::sp<android::MmapStreamInterface> mMmapStream;
     struct audio_mmap_buffer_info             mMmapBufferinfo;
+
+    // There is only one port associated with an MMAP endpoint.
     audio_port_handle_t                       mPortHandle = AUDIO_PORT_HANDLE_NONE;
 
+    android::AAudioService                    &mAAudioService;
+
     android::base::unique_fd                  mAudioDataFileDescriptor;
 
     int64_t                                   mHardwareTimeOffsetNanos = 0; // TODO get from HAL
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index f08a52f..63b9983 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -78,7 +78,6 @@
     setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
     setDeviceId(mStreamInternal->getDeviceId());
     setSessionId(mStreamInternal->getSessionId());
-    ALOGD("open() deviceId = %d, sessionId = %d", getDeviceId(), getSessionId());
     mFramesPerBurst = mStreamInternal->getFramesPerBurst();
 
     return result;
diff --git a/services/oboeservice/AAudioServiceEndpointShared.h b/services/oboeservice/AAudioServiceEndpointShared.h
index 227250c..d671710 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.h
+++ b/services/oboeservice/AAudioServiceEndpointShared.h
@@ -30,7 +30,7 @@
 namespace aaudio {
 
 /**
- * This manages an internal stream that is shared by multiple Client streams.
+ * This manages an AudioStreamInternal that is shared by multiple Client streams.
  */
 class AAudioServiceEndpointShared : public AAudioServiceEndpoint {
 
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 18f14ee..9af8af3 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -61,7 +61,7 @@
 }
 
 std::string AAudioServiceStreamBase::dumpHeader() {
-    return std::string("    T   Handle   UId Run State Format Burst Chan Capacity");
+    return std::string("    T   Handle   UId   Port Run State Format Burst Chan Capacity");
 }
 
 std::string AAudioServiceStreamBase::dump() const {
@@ -70,6 +70,7 @@
     result << "    0x" << std::setfill('0') << std::setw(8) << std::hex << mHandle
            << std::dec << std::setfill(' ') ;
     result << std::setw(6) << mMmapClient.clientUid;
+    result << std::setw(7) << mClientHandle;
     result << std::setw(4) << (isRunning() ? "yes" : " no");
     result << std::setw(6) << getState();
     result << std::setw(7) << getFormat();
@@ -104,6 +105,9 @@
             goto error;
         }
 
+        // This is not protected by a lock because the stream cannot be
+        // referenced until the service returns a handle to the client.
+        // So only one thread can open a stream.
         mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService,
                                                          request,
                                                          sharingMode);
@@ -112,6 +116,9 @@
             result = AAUDIO_ERROR_UNAVAILABLE;
             goto error;
         }
+        // Save a weak pointer that we will use to access the endpoint.
+        mServiceEndpointWeak = mServiceEndpoint;
+
         mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
         copyFrom(*mServiceEndpoint);
     }
@@ -130,13 +137,16 @@
 
     stop();
 
-    if (mServiceEndpoint == nullptr) {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
         result = AAUDIO_ERROR_INVALID_STATE;
     } else {
-        mServiceEndpoint->unregisterStream(this);
-        AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
-        mEndpointManager.closeEndpoint(mServiceEndpoint);
-        mServiceEndpoint.clear();
+        endpoint->unregisterStream(this);
+        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
+        endpointManager.closeEndpoint(endpoint);
+
+        // AAudioService::closeStream() prevents two threads from closing at the same time.
+        mServiceEndpoint.clear(); // endpoint will hold the pointer until this method returns.
     }
 
     {
@@ -152,7 +162,12 @@
 
 aaudio_result_t AAudioServiceStreamBase::startDevice() {
     mClientHandle = AUDIO_PORT_HANDLE_NONE;
-    return mServiceEndpoint->startStream(this, &mClientHandle);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    return endpoint->startStream(this, &mClientHandle);
 }
 
 /**
@@ -162,16 +177,11 @@
  */
 aaudio_result_t AAudioServiceStreamBase::start() {
     aaudio_result_t result = AAUDIO_OK;
+
     if (isRunning()) {
         return AAUDIO_OK;
     }
 
-    if (mServiceEndpoint == nullptr) {
-        ALOGE("%s() missing endpoint", __func__);
-        result = AAUDIO_ERROR_INVALID_STATE;
-        goto error;
-    }
-
     setFlowing(false);
 
     // Start with fresh presentation timestamps.
@@ -200,10 +210,6 @@
     if (!isRunning()) {
         return result;
     }
-    if (mServiceEndpoint == nullptr) {
-        ALOGE("%s() missing endpoint", __func__);
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
 
     // Send it now because the timestamp gets rounded up when stopStream() is called below.
     // Also we don't need the timestamps while we are shutting down.
@@ -215,7 +221,12 @@
         return result;
     }
 
-    result = mServiceEndpoint->stopStream(this, mClientHandle);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    result = endpoint->stopStream(this, mClientHandle);
     if (result != AAUDIO_OK) {
         ALOGE("%s() mServiceEndpoint returned %d, %s", __func__, result, getTypeText());
         disconnect(); // TODO should we return or pause Base first?
@@ -232,11 +243,6 @@
         return result;
     }
 
-    if (mServiceEndpoint == nullptr) {
-        ALOGE("%s() missing endpoint", __func__);
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-
     setState(AAUDIO_STREAM_STATE_STOPPING);
 
     // Send it now because the timestamp gets rounded up when stopStream() is called below.
@@ -248,10 +254,15 @@
         return result;
     }
 
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
     // TODO wait for data to be played out
-    result = mServiceEndpoint->stopStream(this, mClientHandle);
+    result = endpoint->stopStream(this, mClientHandle);
     if (result != AAUDIO_OK) {
-        ALOGE("%s() mServiceEndpoint returned %d, %s", __func__, result, getTypeText());
+        ALOGE("%s() stopStream returned %d, %s", __func__, result, getTypeText());
         disconnect();
         // TODO what to do with result here?
     }
@@ -403,12 +414,13 @@
     sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
 }
 
-int32_t AAudioServiceStreamBase::incrementServiceReferenceCount() {
-    std::lock_guard<std::mutex> lock(mCallingCountLock);
+int32_t AAudioServiceStreamBase::incrementServiceReferenceCount_l() {
     return ++mCallingCount;
 }
 
-int32_t AAudioServiceStreamBase::decrementServiceReferenceCount() {
-    std::lock_guard<std::mutex> lock(mCallingCountLock);
-    return --mCallingCount;
+int32_t AAudioServiceStreamBase::decrementServiceReferenceCount_l() {
+    int32_t count = --mCallingCount;
+    // Each call to increment should be balanced with one call to decrement.
+    assert(count >= 0);
+    return count;
 }
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 3720596..a1815d0 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -180,6 +180,10 @@
         mHandle = handle;
     }
 
+    audio_port_handle_t getPortHandle() const {
+        return mClientHandle;
+    }
+
     aaudio_stream_state_t getState() const {
         return mState;
     }
@@ -201,22 +205,33 @@
 
     /**
      * Atomically increment the number of active references to the stream by AAudioService.
+     *
+     * This is called under a global lock in AAudioStreamTracker.
+     *
      * @return value after the increment
      */
-    int32_t incrementServiceReferenceCount();
+    int32_t incrementServiceReferenceCount_l();
 
     /**
      * Atomically decrement the number of active references to the stream by AAudioService.
+     * This should only be called after incrementServiceReferenceCount_l().
+     *
+     * This is called under a global lock in AAudioStreamTracker.
+     *
      * @return value after the decrement
      */
-    int32_t decrementServiceReferenceCount();
+    int32_t decrementServiceReferenceCount_l();
 
     bool isCloseNeeded() const {
         return mCloseNeeded.load();
     }
 
-    void setCloseNeeded(bool needed) {
-        mCloseNeeded.store(needed);
+    /**
+     * Mark this stream as needing to be closed.
+     * Once marked for closing, it cannot be unmarked.
+     */
+    void markCloseNeeded() {
+        mCloseNeeded.store(true);
     }
 
     virtual const char *getTypeText() const { return "Base"; }
@@ -269,19 +284,26 @@
 
     int32_t                 mFramesPerBurst = 0;
     android::AudioClient    mMmapClient; // set in open, used in MMAP start()
+    // TODO rename mClientHandle to mPortHandle to be more consistent with AudioFlinger.
     audio_port_handle_t     mClientHandle = AUDIO_PORT_HANDLE_NONE;
 
     SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
 
     android::AAudioService &mAudioService;
+
+    // The mServiceEndpoint variable can be accessed by multiple threads.
+    // So we access it by locally promoting a weak pointer to a smart pointer,
+    // which is thread-safe.
     android::sp<AAudioServiceEndpoint> mServiceEndpoint;
+    android::wp<AAudioServiceEndpoint> mServiceEndpointWeak;
 
 private:
     aaudio_handle_t         mHandle = -1;
     bool                    mFlowing = false;
 
-    std::mutex              mCallingCountLock;
-    std::atomic<int32_t>    mCallingCount{0};
+    // This is modified under a global lock in AAudioStreamTracker.
+    int32_t                 mCallingCount = 0;
+
     std::atomic<bool>       mCloseNeeded{false};
 };
 
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 34ddb4b..c845309 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -70,14 +70,19 @@
         return result;
     }
 
-    result = mServiceEndpoint->registerStream(keep);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
+    result = endpoint->registerStream(keep);
     if (result != AAUDIO_OK) {
-        goto error;
+        return result;
     }
 
     setState(AAUDIO_STREAM_STATE_OPEN);
 
-error:
     return AAUDIO_OK;
 }
 
@@ -118,21 +123,37 @@
 
 aaudio_result_t AAudioServiceStreamMMAP::startClient(const android::AudioClient& client,
                                                        audio_port_handle_t *clientHandle) {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
     // Start the client on behalf of the application. Generate a new porthandle.
-    aaudio_result_t result = mServiceEndpoint->startClient(client, clientHandle);
+    aaudio_result_t result = endpoint->startClient(client, clientHandle);
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamMMAP::stopClient(audio_port_handle_t clientHandle) {
-    aaudio_result_t result = mServiceEndpoint->stopClient(clientHandle);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_result_t result = endpoint->stopClient(clientHandle);
     return result;
 }
 
 // Get free-running DSP or DMA hardware position from the HAL.
 aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
-    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
-            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+            static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
+
     aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
     if (result == AAUDIO_OK) {
         Timestamp timestamp(*positionFrames, *timeNanos);
@@ -148,8 +169,15 @@
 // Get timestamp that was written by getFreeRunningPosition()
 aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
-    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
-            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+            static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
+
     // TODO Get presentation timestamp from the HAL
     if (mAtomicTimestamp.isValid()) {
         Timestamp timestamp = mAtomicTimestamp.read();
@@ -165,7 +193,12 @@
 aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription(
         AudioEndpointParcelable &parcelable)
 {
-    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
-            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+            static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
     return serviceEndpointMMAP->getDownDataDescription(parcelable);
 }
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 75d88cf..05c5735 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -122,39 +122,44 @@
 
     aaudio_result_t result = AAudioServiceStreamBase::open(request, AAUDIO_SHARING_MODE_SHARED);
     if (result != AAUDIO_OK) {
-        ALOGE("open() returned %d", result);
+        ALOGE("%s() returned %d", __func__, result);
         return result;
     }
 
     const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
 
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        result = AAUDIO_ERROR_INVALID_STATE;
+        goto error;
+    }
 
     // Is the request compatible with the shared endpoint?
     setFormat(configurationInput.getFormat());
     if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
         setFormat(AAUDIO_FORMAT_PCM_FLOAT);
     } else if (getFormat() != AAUDIO_FORMAT_PCM_FLOAT) {
-        ALOGE("open() mAudioFormat = %d, need FLOAT", getFormat());
+        ALOGD("%s() mAudioFormat = %d, need FLOAT", __func__, getFormat());
         result = AAUDIO_ERROR_INVALID_FORMAT;
         goto error;
     }
 
     setSampleRate(configurationInput.getSampleRate());
     if (getSampleRate() == AAUDIO_UNSPECIFIED) {
-        setSampleRate(mServiceEndpoint->getSampleRate());
-    } else if (getSampleRate() != mServiceEndpoint->getSampleRate()) {
-        ALOGE("open() mSampleRate = %d, need %d",
-              getSampleRate(), mServiceEndpoint->getSampleRate());
+        setSampleRate(endpoint->getSampleRate());
+    } else if (getSampleRate() != endpoint->getSampleRate()) {
+        ALOGD("%s() mSampleRate = %d, need %d",
+              __func__, getSampleRate(), endpoint->getSampleRate());
         result = AAUDIO_ERROR_INVALID_RATE;
         goto error;
     }
 
     setSamplesPerFrame(configurationInput.getSamplesPerFrame());
     if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
-        setSamplesPerFrame(mServiceEndpoint->getSamplesPerFrame());
-    } else if (getSamplesPerFrame() != mServiceEndpoint->getSamplesPerFrame()) {
-        ALOGE("open() mSamplesPerFrame = %d, need %d",
-              getSamplesPerFrame(), mServiceEndpoint->getSamplesPerFrame());
+        setSamplesPerFrame(endpoint->getSamplesPerFrame());
+    } else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
+        ALOGD("%s() mSamplesPerFrame = %d, need %d",
+              __func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
         result = AAUDIO_ERROR_OUT_OF_RANGE;
         goto error;
     }
@@ -173,17 +178,17 @@
         mAudioDataQueue = new SharedRingBuffer();
         result = mAudioDataQueue->allocate(calculateBytesPerFrame(), getBufferCapacity());
         if (result != AAUDIO_OK) {
-            ALOGE("open() could not allocate FIFO with %d frames",
-                  getBufferCapacity());
+            ALOGE("%s() could not allocate FIFO with %d frames",
+                  __func__, getBufferCapacity());
             result = AAUDIO_ERROR_NO_MEMORY;
             goto error;
         }
     }
 
-    ALOGD("open() actual rate = %d, channels = %d, deviceId = %d",
-          getSampleRate(), getSamplesPerFrame(), mServiceEndpoint->getDeviceId());
+    ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d",
+          getSampleRate(), getSamplesPerFrame(), endpoint->getDeviceId());
 
-    result = mServiceEndpoint->registerStream(keep);
+    result = endpoint->registerStream(keep);
     if (result != AAUDIO_OK) {
         goto error;
     }
@@ -217,7 +222,7 @@
 {
     std::lock_guard<std::mutex> lock(mAudioDataQueueLock);
     if (mAudioDataQueue == nullptr) {
-        ALOGE("getAudioDataDescription(): mUpMessageQueue null! - stream not open");
+        ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
         return AAUDIO_ERROR_NULL;
     }
     // Gather information on the data queue.
@@ -250,13 +255,19 @@
                                                                 int64_t *timeNanos) {
 
     int64_t position = 0;
-    aaudio_result_t result = mServiceEndpoint->getTimestamp(&position, timeNanos);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
+    aaudio_result_t result = endpoint->getTimestamp(&position, timeNanos);
     if (result == AAUDIO_OK) {
         int64_t offset = mTimestampPositionOffset.load();
         // TODO, do not go below starting value
         position -= offset; // Offset from shared MMAP stream
-        ALOGV("getHardwareTimestamp() %8lld = %8lld - %8lld",
-              (long long) position, (long long) (position + offset), (long long) offset);
+        ALOGV("%s() %8lld = %8lld - %8lld",
+              __func__, (long long) position, (long long) (position + offset), (long long) offset);
     }
     *positionFrames = position;
     return result;
diff --git a/services/oboeservice/AAudioStreamTracker.cpp b/services/oboeservice/AAudioStreamTracker.cpp
index ef88b34..3328159 100644
--- a/services/oboeservice/AAudioStreamTracker.cpp
+++ b/services/oboeservice/AAudioStreamTracker.cpp
@@ -30,25 +30,52 @@
 using namespace android;
 using namespace aaudio;
 
-sp<AAudioServiceStreamBase> AAudioStreamTracker::removeStreamByHandle(
+sp<AAudioServiceStreamBase> AAudioStreamTracker::decrementAndRemoveStreamByHandle(
         aaudio_handle_t streamHandle) {
     std::lock_guard<std::mutex> lock(mHandleLock);
     sp<AAudioServiceStreamBase> serviceStream;
     auto it = mStreamsByHandle.find(streamHandle);
     if (it != mStreamsByHandle.end()) {
-        serviceStream = it->second;
-        mStreamsByHandle.erase(it);
+        sp<AAudioServiceStreamBase> tempStream = it->second;
+        // Does the caller need to close the stream?
+        // The reference count should never be negative.
+        // But it is safer to check for <= 0 than == 0.
+        if ((tempStream->decrementServiceReferenceCount_l() <= 0) && tempStream->isCloseNeeded()) {
+            serviceStream = tempStream; // Only return stream if ready to be closed.
+            mStreamsByHandle.erase(it);
+        }
     }
     return serviceStream;
 }
 
-sp<AAudioServiceStreamBase> AAudioStreamTracker::getStreamByHandle(
+sp<AAudioServiceStreamBase> AAudioStreamTracker::getStreamByHandleAndIncrement(
         aaudio_handle_t streamHandle) {
     std::lock_guard<std::mutex> lock(mHandleLock);
     sp<AAudioServiceStreamBase> serviceStream;
     auto it = mStreamsByHandle.find(streamHandle);
     if (it != mStreamsByHandle.end()) {
         serviceStream = it->second;
+        serviceStream->incrementServiceReferenceCount_l();
+    }
+    return serviceStream;
+}
+
+// The port handle is only available when the stream is started.
+// So we have to iterate over all the streams.
+// Luckily this rarely happens.
+sp<AAudioServiceStreamBase> AAudioStreamTracker::findStreamByPortHandleAndIncrement(
+        audio_port_handle_t portHandle) {
+    std::lock_guard<std::mutex> lock(mHandleLock);
+    sp<AAudioServiceStreamBase> serviceStream;
+    auto it = mStreamsByHandle.begin();
+    while (it != mStreamsByHandle.end()) {
+        auto candidate = it->second;
+        if (candidate->getPortHandle() == portHandle) {
+            serviceStream = candidate;
+            serviceStream->incrementServiceReferenceCount_l();
+            break;
+        }
+        it++;
     }
     return serviceStream;
 }
@@ -66,7 +93,7 @@
 
 aaudio_handle_t AAudioStreamTracker::addStreamForHandle(sp<AAudioServiceStreamBase> serviceStream) {
     std::lock_guard<std::mutex> lock(mHandleLock);
-    aaudio_handle_t handle = mPreviousHandle.load();
+    aaudio_handle_t handle = mPreviousHandle;
     // Assign a unique handle.
     while (true) {
         handle = bumpHandle(handle);
@@ -78,7 +105,7 @@
             break;
         }
     }
-    mPreviousHandle.store(handle);
+    mPreviousHandle = handle;
     return handle;
 }
 
diff --git a/services/oboeservice/AAudioStreamTracker.h b/services/oboeservice/AAudioStreamTracker.h
index 70d440d..57ec426 100644
--- a/services/oboeservice/AAudioStreamTracker.h
+++ b/services/oboeservice/AAudioStreamTracker.h
@@ -32,18 +32,36 @@
 
 public:
     /**
-     * Remove the stream associated with the handle.
+     * Find the stream associated with the handle.
+     * Decrement its reference counter. If zero and the stream needs
+     * to be closed then remove the stream and return a pointer to the stream.
+     * Otherwise return null if it does not need to be closed.
+     *
      * @param streamHandle
-     * @return strong pointer to the stream if found or to nullptr
+     * @return strong pointer to the stream if it needs to be closed, or nullptr
      */
-    android::sp<AAudioServiceStreamBase> removeStreamByHandle(aaudio_handle_t streamHandle);
+    android::sp<AAudioServiceStreamBase> decrementAndRemoveStreamByHandle(
+            aaudio_handle_t streamHandle);
 
     /**
      * Look up a stream based on the handle.
+     * Increment its service reference count if found.
+     *
      * @param streamHandle
-     * @return strong pointer to the stream if found or to nullptr
+     * @return strong pointer to the stream if found, or nullptr
      */
-    android::sp<aaudio::AAudioServiceStreamBase> getStreamByHandle(aaudio_handle_t streamHandle);
+    android::sp<aaudio::AAudioServiceStreamBase> getStreamByHandleAndIncrement(
+            aaudio_handle_t streamHandle);
+
+    /**
+     * Look up a stream based on the AudioPolicy portHandle.
+     * Increment its service reference count if found.
+     *
+     * @param portHandle
+     * @return strong pointer to the stream if found, or nullptr
+     */
+    android::sp<aaudio::AAudioServiceStreamBase> findStreamByPortHandleAndIncrement(
+            audio_port_handle_t portHandle);
 
     /**
      * Store a strong pointer to the stream and return a unique handle for future reference.
@@ -63,7 +81,9 @@
 
     // Track stream using a unique handle that wraps. Only use positive half.
     mutable std::mutex                mHandleLock;
-    std::atomic<aaudio_handle_t>      mPreviousHandle{0};
+    // protected by mHandleLock
+    aaudio_handle_t                   mPreviousHandle = 0;
+    // protected by mHandleLock
     std::map<aaudio_handle_t, android::sp<aaudio::AAudioServiceStreamBase>> mStreamsByHandle;
 };
 
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index 584b2ef..3d5f140 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -53,7 +53,6 @@
     libbinder \
     libcutils \
     libmediautils \
-    libserviceutility \
     libutils \
     liblog
 
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index ad3666e..3c7d29d 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -34,8 +34,7 @@
     libhardware \
     libsoundtrigger \
     libaudioclient \
-    libserviceutility
-
+    libmediautils \
 
 ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
 # libhardware configuration
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index a7d6e83..eb9cd1d 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -27,13 +27,13 @@
 #include <cutils/properties.h>
 #include <hardware/hardware.h>
 #include <media/AudioSystem.h>
+#include <mediautils/ServiceUtilities.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <binder/IServiceManager.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <system/sound_trigger.h>
-#include <ServiceUtilities.h>
 #include "SoundTriggerHwService.h"
 
 #ifdef SOUND_TRIGGER_USE_STUB_MODULE
@@ -562,10 +562,7 @@
     if (mHalInterface == 0) {
         return NO_INIT;
     }
-    if (modelMemory == 0 || modelMemory->pointer() == NULL) {
-        ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()");
-        return BAD_VALUE;
-    }
+
     struct sound_trigger_sound_model *sound_model =
             (struct sound_trigger_sound_model *)modelMemory->pointer();
 
@@ -659,11 +656,6 @@
     if (mHalInterface == 0) {
         return NO_INIT;
     }
-    if (dataMemory == 0 || dataMemory->pointer() == NULL) {
-        ALOGE("startRecognition() dataMemory is 0 or has NULL pointer()");
-        return BAD_VALUE;
-
-    }
 
     struct sound_trigger_recognition_config *config =
             (struct sound_trigger_recognition_config *)dataMemory->pointer();
@@ -966,6 +958,9 @@
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
+    if (checkIMemory(modelMemory) != NO_ERROR) {
+        return BAD_VALUE;
+    }
 
     sp<Module> module = mModule.promote();
     if (module == 0) {
@@ -997,6 +992,9 @@
                                IPCThreadState::self()->getCallingUid())) {
         return PERMISSION_DENIED;
     }
+    if (checkIMemory(dataMemory) != NO_ERROR) {
+        return BAD_VALUE;
+    }
 
     sp<Module> module = mModule.promote();
     if (module == 0) {