Merge "audiopolicy: fix for ringtone and offload music playback concurrency"
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index 4c28789..a86cc87 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -566,7 +566,7 @@
 
     for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
         uint32_t tag = tagArray[i];
-        String8 sectionString = tagToSectionMap.valueFor(tag);
+        const String8& sectionString = tagToSectionMap.valueFor(tag);
 
         // Set up tag to section index map
         ssize_t index = sections.indexOf(sectionString);
diff --git a/camera/cameraserver/cameraserver.rc b/camera/cameraserver/cameraserver.rc
index fea5a1d..a9aae0b 100644
--- a/camera/cameraserver/cameraserver.rc
+++ b/camera/cameraserver/cameraserver.rc
@@ -4,3 +4,4 @@
     group audio camera input drmrpc
     ioprio rt 4
     writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks
+    rlimit rtprio 10 10
diff --git a/camera/ndk/NdkCameraManager.cpp b/camera/ndk/NdkCameraManager.cpp
index 60b4763..f14a4c9 100644
--- a/camera/ndk/NdkCameraManager.cpp
+++ b/camera/ndk/NdkCameraManager.cpp
@@ -23,6 +23,7 @@
 
 #include <camera/NdkCameraManager.h>
 #include "impl/ACameraManager.h"
+#include "impl/ACameraMetadata.h"
 
 using namespace android;
 
@@ -107,7 +108,14 @@
                 __FUNCTION__, mgr, cameraId, chars);
         return ACAMERA_ERROR_INVALID_PARAMETER;
     }
-    return mgr->getCameraCharacteristics(cameraId, chars);
+    sp<ACameraMetadata> spChars;
+    camera_status_t status = mgr->getCameraCharacteristics(cameraId, &spChars);
+    if (status != ACAMERA_OK) {
+        return status;
+    }
+    spChars->incStrong((void*) ACameraManager_getCameraCharacteristics);
+    *chars = spChars.get();
+    return ACAMERA_OK;
 }
 
 EXPORT
diff --git a/camera/ndk/NdkCameraMetadata.cpp b/camera/ndk/NdkCameraMetadata.cpp
index 65de81f..34ec2da 100644
--- a/camera/ndk/NdkCameraMetadata.cpp
+++ b/camera/ndk/NdkCameraMetadata.cpp
@@ -57,13 +57,15 @@
         ALOGE("%s: src is null!", __FUNCTION__);
         return nullptr;
     }
-    return new ACameraMetadata(*src);
+    ACameraMetadata* copy = new ACameraMetadata(*src);
+    copy->incStrong((void*) ACameraMetadata_copy);
+    return copy;
 }
 
 EXPORT
 void ACameraMetadata_free(ACameraMetadata* metadata) {
     ATRACE_CALL();
     if (metadata != nullptr) {
-        delete metadata;
+        metadata->decStrong((void*) ACameraMetadata_free);
     }
 }
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
index ac1856b..ddb69d7 100644
--- a/camera/ndk/NdkCaptureRequest.cpp
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -137,7 +137,7 @@
     if (request == nullptr) {
         return;
     }
-    delete request->settings;
+    request->settings.clear();
     delete request->targets;
     delete request;
     return;
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 907debc..c908323 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -50,11 +50,11 @@
 CameraDevice::CameraDevice(
         const char* id,
         ACameraDevice_StateCallbacks* cb,
-        std::unique_ptr<ACameraMetadata> chars,
+        sp<ACameraMetadata> chars,
         ACameraDevice* wrapper) :
         mCameraId(id),
         mAppCallbacks(*cb),
-        mChars(std::move(chars)),
+        mChars(chars),
         mServiceCallback(new ServiceCallback(this)),
         mWrapper(wrapper),
         mInError(false),
@@ -436,7 +436,7 @@
     if (req == nullptr) {
         return;
     }
-    delete req->settings;
+    req->settings.clear();
     delete req->targets;
     delete req;
 }
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 1369148..7d64081 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -48,7 +48,7 @@
 class CameraDevice final : public RefBase {
   public:
     CameraDevice(const char* id, ACameraDevice_StateCallbacks* cb,
-                  std::unique_ptr<ACameraMetadata> chars,
+                  sp<ACameraMetadata> chars,
                   ACameraDevice* wrapper);
     ~CameraDevice();
 
@@ -156,7 +156,7 @@
     mutable Mutex mDeviceLock;
     const String8 mCameraId;                          // Camera ID
     const ACameraDevice_StateCallbacks mAppCallbacks; // Callback to app
-    const std::unique_ptr<ACameraMetadata> mChars;    // Camera characteristics
+    const sp<ACameraMetadata> mChars;                 // Camera characteristics
     const sp<ServiceCallback> mServiceCallback;
     ACameraDevice* mWrapper;
 
@@ -294,8 +294,8 @@
  */
 struct ACameraDevice {
     ACameraDevice(const char* id, ACameraDevice_StateCallbacks* cb,
-                  std::unique_ptr<ACameraMetadata> chars) :
-            mDevice(new CameraDevice(id, cb, std::move(chars), this)) {}
+                  sp<ACameraMetadata> chars) :
+            mDevice(new CameraDevice(id, cb, chars, this)) {}
 
     ~ACameraDevice() {};
 
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index c59d0e7..ee67677 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -402,7 +402,7 @@
 }
 
 camera_status_t ACameraManager::getCameraCharacteristics(
-        const char *cameraIdStr, ACameraMetadata **characteristics) {
+        const char* cameraIdStr, sp<ACameraMetadata>* characteristics) {
     Mutex::Autolock _l(mLock);
 
     sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
@@ -437,18 +437,16 @@
         const char* cameraId,
         ACameraDevice_StateCallbacks* callback,
         /*out*/ACameraDevice** outDevice) {
-    ACameraMetadata* rawChars;
-    camera_status_t ret = getCameraCharacteristics(cameraId, &rawChars);
+    sp<ACameraMetadata> chars;
+    camera_status_t ret = getCameraCharacteristics(cameraId, &chars);
     Mutex::Autolock _l(mLock);
     if (ret != ACAMERA_OK) {
         ALOGE("%s: cannot get camera characteristics for camera %s. err %d",
                 __FUNCTION__, cameraId, ret);
         return ACAMERA_ERROR_INVALID_PARAMETER;
     }
-    std::unique_ptr<ACameraMetadata> chars(rawChars);
-    rawChars = nullptr;
 
-    ACameraDevice* device = new ACameraDevice(cameraId, callback, std::move(chars));
+    ACameraDevice* device = new ACameraDevice(cameraId, callback, chars);
 
     sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
     if (cs == nullptr) {
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index cc42f77..ce65769 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -186,7 +186,7 @@
     static void     deleteCameraIdList(ACameraIdList* cameraIdList);
 
     camera_status_t getCameraCharacteristics(
-            const char *cameraId, ACameraMetadata **characteristics);
+            const char* cameraId, android::sp<ACameraMetadata>* characteristics);
     camera_status_t openCamera(const char* cameraId,
                                ACameraDevice_StateCallbacks* callback,
                                /*out*/ACameraDevice** device);
diff --git a/camera/ndk/impl/ACaptureRequest.h b/camera/ndk/impl/ACaptureRequest.h
index 06b2cc3..b11dafb 100644
--- a/camera/ndk/impl/ACaptureRequest.h
+++ b/camera/ndk/impl/ACaptureRequest.h
@@ -55,7 +55,7 @@
         return ACAMERA_OK;
     }
 
-    ACameraMetadata*      settings;
+    sp<ACameraMetadata>   settings;
     ACameraOutputTargets* targets;
     void*                 context;
 };
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 9bf8247..5e0db60 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -45,6 +45,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraCaptureSession is an opaque type that manages frame captures of a camera device.
  *
@@ -591,6 +593,10 @@
 camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session)
         __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 28
+
 typedef struct ACaptureSessionOutput ACaptureSessionOutput;
 
 /**
@@ -635,6 +641,7 @@
  */
 camera_status_t ACameraCaptureSession_updateSharedOutput(ACameraCaptureSession* session,
         ACaptureSessionOutput* output) __INTRODUCED_IN(28);
+#endif /* __ANDROID_API__ >= 28 */
 
 __END_DECLS
 
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index bdd27f9..7c13b34 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraDevice is opaque type that provides access to a camera device.
  *
@@ -666,6 +668,10 @@
         const ACameraCaptureSession_stateCallbacks* callbacks,
         /*out*/ACameraCaptureSession** session) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 28
+
 /**
  * Create a shared ACaptureSessionOutput object.
  *
@@ -757,6 +763,8 @@
         const ACameraCaptureSession_stateCallbacks* callbacks,
         /*out*/ACameraCaptureSession** session) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_DEVICE_H */
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index e19ce36..6b58155 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -40,6 +40,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 typedef enum {
     ACAMERA_OK = 0,
 
@@ -130,6 +132,8 @@
     ACAMERA_ERROR_PERMISSION_DENIED     = ACAMERA_ERROR_BASE - 13,
 } camera_status_t;
 
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_ERROR_H */
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index a1cca4d..ea76738 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraManager is opaque type that provides access to camera service.
  *
@@ -119,7 +121,7 @@
  *                 this callback returns.
  */
 typedef void (*ACameraManager_AvailabilityCallback)(void* context,
-        const char* cameraId) __INTRODUCED_IN(24);
+        const char* cameraId);
 
 /**
  * A listener for camera devices becoming available or unavailable to open.
@@ -274,6 +276,8 @@
         ACameraDevice_StateCallbacks* callback,
         /*out*/ACameraDevice** device) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_MANAGER_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index 2078da7..611e270 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraMetadata is opaque type that provides access to read-only camera metadata like camera
  * characteristics (via {@link ACameraManager_getCameraCharacteristics}) or capture results (via
@@ -229,6 +231,8 @@
  */
 void ACameraMetadata_free(ACameraMetadata* metadata) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_METADATA_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 7398f78..c1f5ddc 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -40,6 +40,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 typedef enum acamera_metadata_section {
     ACAMERA_COLOR_CORRECTION,
     ACAMERA_CONTROL,
@@ -2310,7 +2312,9 @@
      * <p>If this device is the largest or only camera device with a given facing, then this
      * position will be <code>(0, 0, 0)</code>; a camera device with a lens optical center located 3 cm
      * from the main sensor along the +X axis (to the right from the user's perspective) will
-     * report <code>(0.03, 0, 0)</code>.</p>
+     * report <code>(0.03, 0, 0)</code>.  Note that this means that, for many computer vision
+     * applications, the position needs to be negated to convert it to a translation from the
+     * camera to the origin.</p>
      * <p>To transform a pixel coordinates between two cameras facing the same direction, first
      * the source camera ACAMERA_LENS_DISTORTION must be corrected for.  Then the source
      * camera ACAMERA_LENS_INTRINSIC_CALIBRATION needs to be applied, followed by the
@@ -2322,7 +2326,8 @@
      * <p>To compare this against a real image from the destination camera, the destination camera
      * image then needs to be corrected for radial distortion before comparison or sampling.</p>
      * <p>When ACAMERA_LENS_POSE_REFERENCE is GYROSCOPE, then this position is relative to
-     * the center of the primary gyroscope on the device.</p>
+     * the center of the primary gyroscope on the device. The axis definitions are the same as
+     * with PRIMARY_CAMERA.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
@@ -2416,13 +2421,15 @@
      * </code></pre>
      * <p>which can then be combined with the camera pose rotation
      * <code>R</code> and translation <code>t</code> (ACAMERA_LENS_POSE_ROTATION and
-     * ACAMERA_LENS_POSE_TRANSLATION, respective) to calculate the
+     * ACAMERA_LENS_POSE_TRANSLATION, respectively) to calculate the
      * complete transform from world coordinates to pixel
      * coordinates:</p>
-     * <pre><code>P = [ K 0   * [ R t
-     *      0 1 ]     0 1 ]
+     * <pre><code>P = [ K 0   * [ R -Rt
+     *      0 1 ]      0 1 ]
      * </code></pre>
-     * <p>and with <code>p_w</code> being a point in the world coordinate system
+     * <p>(Note the negation of poseTranslation when mapping from camera
+     * to world coordinates, and multiplication by the rotation).</p>
+     * <p>With <code>p_w</code> being a point in the world coordinate system
      * and <code>p_s</code> being a point in the camera active pixel array
      * coordinate system, and with the mapping including the
      * homogeneous division by z:</p>
@@ -2444,6 +2451,13 @@
      * activeArraySize rectangle), to determine the final pixel
      * coordinate of the world point for processed (non-RAW)
      * output buffers.</p>
+     * <p>For camera devices, the center of pixel <code>(x,y)</code> is located at
+     * coordinate <code>(x + 0.5, y + 0.5)</code>.  So on a device with a
+     * precorrection active array of size <code>(10,10)</code>, the valid pixel
+     * indices go from <code>(0,0)-(9,9)</code>, and an perfectly-built camera would
+     * have an optical center at the exact center of the pixel grid, at
+     * coordinates <code>(5.0, 5.0)</code>, which is the top-left corner of pixel
+     * <code>(5,5)</code>.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_POSE_ROTATION
@@ -3057,7 +3071,7 @@
      * outputs will crop horizontally (pillarbox), and 16:9
      * streams will match exactly. These additional crops will
      * be centered within the crop region.</p>
-     * <p>If the coordinate system is android.sensor.info.activeArraysSize, the width and height
+     * <p>If the coordinate system is ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, the width and height
      * of the crop region cannot be set to be smaller than
      * <code>floor( activeArraySize.width / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code> and
      * <code>floor( activeArraySize.height / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code>, respectively.</p>
@@ -4673,8 +4687,8 @@
     ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE =                  // byte (acamera_metadata_enum_android_statistics_lens_shading_map_mode_t)
             ACAMERA_STATISTICS_START + 16,
     /**
-     * <p>A control for selecting whether OIS position information is included in output
-     * result metadata.</p>
+     * <p>A control for selecting whether optical stabilization (OIS) position
+     * information is included in output result metadata.</p>
      *
      * <p>Type: byte (acamera_metadata_enum_android_statistics_ois_data_mode_t)</p>
      *
@@ -4684,6 +4698,12 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
+     * <p>Since optical image stabilization generally involves motion much faster than the duration
+     * of individualq image exposure, multiple OIS samples can be included for a single capture
+     * result. For example, if the OIS reporting operates at 200 Hz, a typical camera operating
+     * at 30fps may have 6-7 OIS samples per capture result. This information can be combined
+     * with the rolling shutter skew to account for lens motion during image exposure in
+     * post-processing algorithms.</p>
      */
     ACAMERA_STATISTICS_OIS_DATA_MODE =                          // byte (acamera_metadata_enum_android_statistics_ois_data_mode_t)
             ACAMERA_STATISTICS_START + 17,
@@ -4715,11 +4735,15 @@
      * </ul></p>
      *
      * <p>The array contains the amount of shifts in x direction, in pixels, based on OIS samples.
-     * A positive value is a shift from left to right in active array coordinate system. For
-     * example, if the optical center is (1000, 500) in active array coordinates, a shift of
-     * (3, 0) puts the new optical center at (1003, 500).</p>
+     * A positive value is a shift from left to right in the pre-correction active array
+     * coordinate system. For example, if the optical center is (1000, 500) in pre-correction
+     * active array coordinates, a shift of (3, 0) puts the new optical center at (1003, 500).</p>
      * <p>The number of shifts must match the number of timestamps in
      * ACAMERA_STATISTICS_OIS_TIMESTAMPS.</p>
+     * <p>The OIS samples are not affected by whether lens distortion correction is enabled (on
+     * supporting devices). They are always reported in pre-correction active array coordinates,
+     * since the scaling of OIS shifts would depend on the specific spot on the sensor the shift
+     * is needed.</p>
      *
      * @see ACAMERA_STATISTICS_OIS_TIMESTAMPS
      */
@@ -4736,11 +4760,15 @@
      * </ul></p>
      *
      * <p>The array contains the amount of shifts in y direction, in pixels, based on OIS samples.
-     * A positive value is a shift from top to bottom in active array coordinate system. For
-     * example, if the optical center is (1000, 500) in active array coordinates, a shift of
-     * (0, 5) puts the new optical center at (1000, 505).</p>
+     * A positive value is a shift from top to bottom in pre-correction active array coordinate
+     * system. For example, if the optical center is (1000, 500) in active array coordinates, a
+     * shift of (0, 5) puts the new optical center at (1000, 505).</p>
      * <p>The number of shifts must match the number of timestamps in
      * ACAMERA_STATISTICS_OIS_TIMESTAMPS.</p>
+     * <p>The OIS samples are not affected by whether lens distortion correction is enabled (on
+     * supporting devices). They are always reported in pre-correction active array coordinates,
+     * since the scaling of OIS shifts would depend on the specific spot on the sensor the shift
+     * is needed.</p>
      *
      * @see ACAMERA_STATISTICS_OIS_TIMESTAMPS
      */
@@ -5092,12 +5120,26 @@
      * the following code snippet can be used:</p>
      * <pre><code>// Returns true if the device supports the required hardware level, or better.
      * boolean isHardwareLevelSupported(CameraCharacteristics c, int requiredLevel) {
+     *     final int[] sortedHwLevels = {
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_FULL,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_3
+     *     };
      *     int deviceLevel = c.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL);
-     *     if (deviceLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
-     *         return requiredLevel == deviceLevel;
+     *     if (requiredLevel == deviceLevel) {
+     *         return true;
      *     }
-     *     // deviceLevel is not LEGACY, can use numerical sort
-     *     return requiredLevel &lt;= deviceLevel;
+     *
+     *     for (int sortedlevel : sortedHwLevels) {
+     *         if (sortedlevel == requiredLevel) {
+     *             return true;
+     *         } else if (sortedlevel == deviceLevel) {
+     *             return false;
+     *         }
+     *     }
+     *     return false; // Should never reach here
      * }
      * </code></pre>
      * <p>At a high level, the levels are:</p>
@@ -5111,6 +5153,8 @@
      *   post-processing settings, and image capture at a high rate.</li>
      * <li><code>LEVEL_3</code> devices additionally support YUV reprocessing and RAW image capture, along
      *   with additional output stream configurations.</li>
+     * <li><code>EXTERNAL</code> devices are similar to <code>LIMITED</code> devices with exceptions like some sensor or
+     *   lens information not reorted or less stable framerates.</li>
      * </ul>
      * <p>See the individual level enums for full descriptions of the supported capabilities.  The
      * ACAMERA_REQUEST_AVAILABLE_CAPABILITIES entry describes the device's capabilities at a
@@ -5414,16 +5458,34 @@
      * any correction at all would slow down capture rate.  Every output stream will have a
      * similar amount of enhancement applied.</p>
      * <p>The correction only applies to processed outputs such as YUV, JPEG, or DEPTH16; it is not
-     * applied to any RAW output. Metadata coordinates such as face rectangles or metering
-     * regions are also not affected by correction.</p>
+     * applied to any RAW output.</p>
      * <p>This control will be on by default on devices that support this control. Applications
      * disabling distortion correction need to pay extra attention with the coordinate system of
      * metering regions, crop region, and face rectangles. When distortion correction is OFF,
      * metadata coordinates follow the coordinate system of
      * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE. When distortion is not OFF, metadata
-     * coordinates follow the coordinate system of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     * coordinates follow the coordinate system of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.  The
+     * camera device will map these metadata fields to match the corrected image produced by the
+     * camera device, for both capture requests and results.  However, this mapping is not very
+     * precise, since rectangles do not generally map to rectangles when corrected.  Only linear
+     * scaling between the active array and precorrection active array coordinates is
+     * performed. Applications that require precise correction of metadata need to undo that
+     * linear scaling, and apply a more complete correction that takes into the account the app's
+     * own requirements.</p>
+     * <p>The full list of metadata that is affected in this way by distortion correction is:</p>
+     * <ul>
+     * <li>ACAMERA_CONTROL_AF_REGIONS</li>
+     * <li>ACAMERA_CONTROL_AE_REGIONS</li>
+     * <li>ACAMERA_CONTROL_AWB_REGIONS</li>
+     * <li>ACAMERA_SCALER_CROP_REGION</li>
+     * <li>android.statistics.faces</li>
+     * </ul>
      *
+     * @see ACAMERA_CONTROL_AE_REGIONS
+     * @see ACAMERA_CONTROL_AF_REGIONS
+     * @see ACAMERA_CONTROL_AWB_REGIONS
      * @see ACAMERA_LENS_DISTORTION
+     * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
@@ -7155,11 +7217,11 @@
      * camera in the list of supported camera devices.</p>
      * <p>This capability requires the camera device to support the following:</p>
      * <ul>
-     * <li>This camera device must list the following static metadata entries in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html">CameraCharacteristics</a>:<ul>
-     * <li>android.logicalMultiCamera.physicalIds</li>
-     * <li>ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE</li>
-     * </ul>
-     * </li>
+     * <li>The IDs of underlying physical cameras are returned via
+     *   <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getPhysicalCameraIds">CameraCharacteristics#getPhysicalCameraIds</a>.</li>
+     * <li>This camera device must list static metadata
+     *   ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE in
+     *   <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html">CameraCharacteristics</a>.</li>
      * <li>The underlying physical cameras' static metadata must list the following entries,
      *   so that the application can correlate pixels from the physical streams:<ul>
      * <li>ACAMERA_LENS_POSE_REFERENCE</li>
@@ -7878,6 +7940,9 @@
 
 } acamera_metadata_enum_android_distortion_correction_mode_t;
 
+
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_METADATA_TAGS_H */
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index 2fb5d12..5340e76 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 // Container for output targets
 typedef struct ACameraOutputTargets ACameraOutputTargets;
 
@@ -302,6 +304,10 @@
  */
 void ACaptureRequest_free(ACaptureRequest* request) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 28
+
 /**
  * Associate an arbitrary user context pointer to the {@link ACaptureRequest}
  *
@@ -350,6 +356,8 @@
  */
 ACaptureRequest* ACaptureRequest_copy(const ACaptureRequest* src) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 __END_DECLS
 
 #endif /* _NDK_CAPTURE_REQUEST_H */
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index a7ac2d7..c559ff9 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -140,14 +140,6 @@
 }
 
 /*
- * Returns "true" if the device is rotated 90 degrees.
- */
-static bool isDeviceRotated(int orientation) {
-    return orientation != DISPLAY_ORIENTATION_0 &&
-            orientation != DISPLAY_ORIENTATION_180;
-}
-
-/*
  * Configures and starts the MediaCodec encoder.  Obtains an input surface
  * from the codec.
  */
@@ -242,22 +234,11 @@
         const DisplayInfo& mainDpyInfo) {
 
     // Set the region of the layer stack we're interested in, which in our
-    // case is "all of it".  If the app is rotated (so that the width of the
-    // app is based on the height of the display), reverse width/height.
-    bool deviceRotated = isDeviceRotated(mainDpyInfo.orientation);
-    uint32_t sourceWidth, sourceHeight;
-    if (!deviceRotated) {
-        sourceWidth = mainDpyInfo.w;
-        sourceHeight = mainDpyInfo.h;
-    } else {
-        ALOGV("using rotated width/height");
-        sourceHeight = mainDpyInfo.w;
-        sourceWidth = mainDpyInfo.h;
-    }
-    Rect layerStackRect(sourceWidth, sourceHeight);
+    // case is "all of it".
+    Rect layerStackRect(mainDpyInfo.w, mainDpyInfo.h);
 
     // We need to preserve the aspect ratio of the display.
-    float displayAspect = (float) sourceHeight / (float) sourceWidth;
+    float displayAspect = (float) mainDpyInfo.h / (float) mainDpyInfo.w;
 
 
     // Set the way we map the output onto the display surface (which will
@@ -334,6 +315,22 @@
 }
 
 /*
+ * Set the main display width and height to the actual width and height
+ */
+static status_t getActualDisplaySize(const sp<IBinder>& mainDpy, DisplayInfo* mainDpyInfo) {
+    Rect viewport;
+    status_t err = SurfaceComposerClient::getDisplayViewport(mainDpy, &viewport);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to get display viewport\n");
+        return err;
+    }
+    mainDpyInfo->w = viewport.width();
+    mainDpyInfo->h = viewport.height();
+
+    return NO_ERROR;
+}
+
+/*
  * Runs the MediaCodec encoder, sending the output to the MediaMuxer.  The
  * input frames are coming from the virtual display as fast as SurfaceFlinger
  * wants to send them.
@@ -403,14 +400,22 @@
                     // useful stuff is hard to get at without a Dalvik VM.
                     err = SurfaceComposerClient::getDisplayInfo(mainDpy,
                             &mainDpyInfo);
-                    if (err != NO_ERROR) {
+                    if (err == NO_ERROR) {
+                        err = getActualDisplaySize(mainDpy, &mainDpyInfo);
+                        if (err != NO_ERROR) {
+                            fprintf(stderr, "ERROR: unable to set actual display size\n");
+                            return err;
+                        }
+
+                        if (orientation != mainDpyInfo.orientation) {
+                            ALOGD("orientation changed, now %d", mainDpyInfo.orientation);
+                            SurfaceComposerClient::Transaction t;
+                            setDisplayProjection(t, virtualDpy, mainDpyInfo);
+                            t.apply();
+                            orientation = mainDpyInfo.orientation;
+                        }
+                    } else {
                         ALOGW("getDisplayInfo(main) failed: %d", err);
-                    } else if (orientation != mainDpyInfo.orientation) {
-                        ALOGD("orientation changed, now %d", mainDpyInfo.orientation);
-                        SurfaceComposerClient::Transaction t;
-                        setDisplayProjection(t, virtualDpy, mainDpyInfo);
-                        t.apply();
-                        orientation = mainDpyInfo.orientation;
                     }
                 }
 
@@ -552,6 +557,10 @@
     return rawFp;
 }
 
+static inline uint32_t floorToEven(uint32_t num) {
+    return num & ~1;
+}
+
 /*
  * Main "do work" start point.
  *
@@ -579,6 +588,13 @@
         fprintf(stderr, "ERROR: unable to get display characteristics\n");
         return err;
     }
+
+    err = getActualDisplaySize(mainDpy, &mainDpyInfo);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to set actual display size\n");
+        return err;
+    }
+
     if (gVerbose) {
         printf("Main display is %dx%d @%.2ffps (orientation=%u)\n",
                 mainDpyInfo.w, mainDpyInfo.h, mainDpyInfo.fps,
@@ -586,12 +602,12 @@
         fflush(stdout);
     }
 
-    bool rotated = isDeviceRotated(mainDpyInfo.orientation);
+    // Encoder can't take odd number as config
     if (gVideoWidth == 0) {
-        gVideoWidth = rotated ? mainDpyInfo.h : mainDpyInfo.w;
+        gVideoWidth = floorToEven(mainDpyInfo.w);
     }
     if (gVideoHeight == 0) {
-        gVideoHeight = rotated ? mainDpyInfo.w : mainDpyInfo.h;
+        gVideoHeight = floorToEven(mainDpyInfo.h);
     }
 
     // Configure and start the encoder.
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 44b0015..95a16f3 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -17,7 +17,6 @@
 #include "SineSource.h"
 
 #include <binder/ProcessState.h>
-#include <media/MediaExtractor.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -28,6 +27,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/SimpleDecodingSource.h>
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 2c088e6..0d331df 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -32,7 +32,6 @@
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
 #include <media/DataSource.h>
-#include <media/MediaExtractor.h>
 #include <media/MediaSource.h>
 #include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
@@ -50,6 +49,7 @@
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/SimpleDecodingSource.h>
@@ -78,6 +78,7 @@
 static bool gPlaybackAudio;
 static bool gWriteMP4;
 static bool gDisplayHistogram;
+static bool gVerbose = false;
 static bool showProgress = true;
 static String8 gWriteMP4Filename;
 static String8 gComponentNameOverride;
@@ -159,6 +160,11 @@
             break;
         }
 
+        if (gVerbose) {
+            MetaDataBase &meta = mbuf->meta_data();
+            fprintf(stdout, "sample format: %s\n", meta.toString().c_str());
+        }
+
         CHECK_EQ(
                 fwrite((const uint8_t *)mbuf->data() + mbuf->range_offset(),
                        1,
@@ -348,7 +354,10 @@
                     decodeTimesUs.push(delayDecodeUs);
                 }
 
-                if (showProgress && (n++ % 16) == 0) {
+                if (gVerbose) {
+                    MetaDataBase &meta = buffer->meta_data();
+                    fprintf(stdout, "%ld sample format: %s\n", numFrames, meta.toString().c_str());
+                } else if (showProgress && (n++ % 16) == 0) {
                     printf(".");
                     fflush(stdout);
                 }
@@ -630,6 +639,7 @@
     fprintf(stderr, "       -T allocate buffers from a surface texture\n");
     fprintf(stderr, "       -d(ump) output_filename (raw stream data to a file)\n");
     fprintf(stderr, "       -D(ump) output_filename (decoded PCM data to a file)\n");
+    fprintf(stderr, "       -v be more verbose\n");
 }
 
 static void dumpCodecProfiles(bool queryDecoders) {
@@ -640,7 +650,8 @@
         MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
         MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
         MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
-        MEDIA_MIMETYPE_VIDEO_DOLBY_VISION
+        MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+        MEDIA_MIMETYPE_AUDIO_EAC3, MEDIA_MIMETYPE_AUDIO_AC4
     };
 
     const char *codecType = queryDecoders? "decoder" : "encoder";
@@ -708,7 +719,7 @@
     sp<ALooper> looper;
 
     int res;
-    while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
+    while ((res = getopt(argc, argv, "vhaqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
         switch (res) {
             case 'a':
             {
@@ -832,6 +843,12 @@
                 break;
             }
 
+            case 'v':
+            {
+                gVerbose = true;
+                break;
+            }
+
             case '?':
             case 'h':
             default:
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index b0199d8..b2f39dc 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -24,7 +24,6 @@
 #include <media/DataSource.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IStreamSource.h>
-#include <media/MediaExtractor.h>
 #include <media/mediaplayer.h>
 #include <media/MediaSource.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -32,6 +31,7 @@
 #include <media/stagefright/DataSourceFactory.h>
 #include <media/stagefright/InterfaceUtils.h>
 #include <media/stagefright/MPEG2TSWriter.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MetaData.h>
 
diff --git a/drm/common/Android.bp b/drm/common/Android.bp
index 1552c3f..272684c 100644
--- a/drm/common/Android.bp
+++ b/drm/common/Android.bp
@@ -35,7 +35,7 @@
 
     cflags: ["-Wall", "-Werror"],
 
-    static_libs: ["libbinder"],
+    shared_libs: ["libbinder"],
 
     export_include_dirs: ["include"],
 }
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 73ecda1..a2594aa 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -225,8 +225,13 @@
 
 void BnCrypto::readVector(const Parcel &data, Vector<uint8_t> &vector) const {
     uint32_t size = data.readInt32();
-    vector.insertAt((size_t)0, size);
-    data.read(vector.editArray(), size);
+    if (vector.insertAt((size_t)0, size) < 0) {
+        vector.clear();
+    }
+    if (data.read(vector.editArray(), size) != NO_ERROR) {
+        vector.clear();
+        android_errorWriteWithInfoLog(0x534e4554, "62872384", -1, NULL, 0);
+    }
 }
 
 void BnCrypto::writeVector(Parcel *reply, Vector<uint8_t> const &vector) const {
diff --git a/include/media/EventLog.h b/include/media/EventLog.h
new file mode 120000
index 0000000..9b2c4bf
--- /dev/null
+++ b/include/media/EventLog.h
@@ -0,0 +1 @@
+../../media/utils/include/mediautils/EventLog.h
\ No newline at end of file
diff --git a/include/media/MediaExtractor.h b/include/media/MediaExtractor.h
deleted file mode 120000
index 4b35fe1..0000000
--- a/include/media/MediaExtractor.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmediaextractor/include/media/MediaExtractor.h
\ No newline at end of file
diff --git a/include/media/MediaExtractorPluginApi.h b/include/media/MediaExtractorPluginApi.h
new file mode 100644
index 0000000..930b6e2
--- /dev/null
+++ b/include/media/MediaExtractorPluginApi.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_EXTRACTOR_PLUGIN_API_H_
+#define MEDIA_EXTRACTOR_PLUGIN_API_H_
+
+#include <utils/Errors.h> // for status_t
+
+namespace android {
+
+struct MediaTrack;
+class MetaDataBase;
+
+extern "C" {
+
+struct CDataSource {
+    ssize_t (*readAt)(void *handle, off64_t offset, void *data, size_t size);
+    status_t (*getSize)(void *handle, off64_t *size);
+    uint32_t (*flags)(void *handle );
+    bool (*getUri)(void *handle, char *uriString, size_t bufferSize);
+    void *handle;
+};
+
+struct CMediaExtractor {
+    void *data;
+
+    void (*free)(void *data);
+    size_t (*countTracks)(void *data);
+    MediaTrack* (*getTrack)(void *data, size_t index);
+    status_t (*getTrackMetaData)(
+            void *data,
+            MetaDataBase& meta,
+            size_t index, uint32_t flags);
+
+    status_t (*getMetaData)(void *data, MetaDataBase& meta);
+    uint32_t (*flags)(void *data);
+    status_t (*setMediaCas)(void *data, const uint8_t* casToken, size_t size);
+    const char * (*name)(void *data);
+};
+
+typedef CMediaExtractor* (*CreatorFunc)(CDataSource *source, void *meta);
+typedef void (*FreeMetaFunc)(void *meta);
+
+// The sniffer can optionally fill in an opaque object, "meta", that helps
+// the corresponding extractor initialize its state without duplicating
+// effort already exerted by the sniffer. If "freeMeta" is given, it will be
+// called against the opaque object when it is no longer used.
+typedef CreatorFunc (*SnifferFunc)(
+        CDataSource *source, float *confidence,
+        void **meta, FreeMetaFunc *freeMeta);
+
+typedef struct {
+    const uint8_t b[16];
+} media_uuid_t;
+
+typedef struct {
+    // version number of this structure
+    const uint32_t def_version;
+
+    // A unique identifier for this extractor.
+    // See below for a convenience macro to create this from a string.
+    media_uuid_t extractor_uuid;
+
+    // Version number of this extractor. When two extractors with the same
+    // uuid are encountered, the one with the largest version number will
+    // be used.
+    const uint32_t extractor_version;
+
+    // a human readable name
+    const char *extractor_name;
+
+    // the sniffer function
+    const SnifferFunc sniff;
+} ExtractorDef;
+
+const uint32_t EXTRACTORDEF_VERSION = 1;
+
+// each plugin library exports one function of this type
+typedef ExtractorDef (*GetExtractorDef)();
+
+} // extern "C"
+
+}  // namespace android
+
+#endif  // MEDIA_EXTRACTOR_PLUGIN_API_H_
diff --git a/include/media/MediaExtractorPluginHelper.h b/include/media/MediaExtractorPluginHelper.h
new file mode 100644
index 0000000..c817b30
--- /dev/null
+++ b/include/media/MediaExtractorPluginHelper.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_EXTRACTOR_PLUGIN_HELPER_H_
+
+#define MEDIA_EXTRACTOR_PLUGIN_HELPER_H_
+
+#include <arpa/inet.h>
+#include <stdio.h>
+#include <vector>
+
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <media/MediaExtractorPluginApi.h>
+
+namespace android {
+
+class DataSourceBase;
+class MetaDataBase;
+struct MediaTrack;
+
+// extractor plugins can derive from this class which looks remarkably
+// like MediaExtractor and can be easily wrapped in the required C API
+class MediaExtractorPluginHelper
+{
+public:
+    virtual ~MediaExtractorPluginHelper() {}
+    virtual size_t countTracks() = 0;
+    virtual MediaTrack *getTrack(size_t index) = 0;
+
+    enum GetTrackMetaDataFlags {
+        kIncludeExtensiveMetaData = 1
+    };
+    virtual status_t getTrackMetaData(
+            MetaDataBase& meta,
+            size_t index, uint32_t flags = 0) = 0;
+
+    // Return container specific meta-data. The default implementation
+    // returns an empty metadata object.
+    virtual status_t getMetaData(MetaDataBase& meta) = 0;
+
+    enum Flags {
+        CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
+        CAN_SEEK_FORWARD   = 2,  // the "seek 10secs forward button"
+        CAN_PAUSE          = 4,
+        CAN_SEEK           = 8,  // the "seek bar"
+    };
+
+    // If subclasses do _not_ override this, the default is
+    // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
+    virtual uint32_t flags() const {
+        return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
+    };
+
+    virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
+        return INVALID_OPERATION;
+    }
+
+    virtual const char * name() { return "<unspecified>"; }
+
+protected:
+    MediaExtractorPluginHelper() {}
+
+private:
+    MediaExtractorPluginHelper(const MediaExtractorPluginHelper &);
+    MediaExtractorPluginHelper &operator=(const MediaExtractorPluginHelper &);
+};
+
+inline CMediaExtractor *wrap(MediaExtractorPluginHelper *extractor) {
+    CMediaExtractor *wrapper = (CMediaExtractor*) malloc(sizeof(CMediaExtractor));
+    wrapper->data = extractor;
+    wrapper->free = [](void *data) -> void {
+        delete (MediaExtractorPluginHelper*)(data);
+    };
+    wrapper->countTracks = [](void *data) -> size_t {
+        return ((MediaExtractorPluginHelper*)data)->countTracks();
+    };
+    wrapper->getTrack = [](void *data, size_t index) -> MediaTrack* {
+        return ((MediaExtractorPluginHelper*)data)->getTrack(index);
+    };
+    wrapper->getTrackMetaData = [](
+            void *data,
+            MetaDataBase& meta,
+            size_t index, uint32_t flags) -> status_t {
+        return ((MediaExtractorPluginHelper*)data)->getTrackMetaData(meta, index, flags);
+    };
+    wrapper->getMetaData = [](
+            void *data,
+            MetaDataBase& meta) -> status_t {
+        return ((MediaExtractorPluginHelper*)data)->getMetaData(meta);
+    };
+    wrapper->flags = [](
+            void *data) -> uint32_t {
+        return ((MediaExtractorPluginHelper*)data)->flags();
+    };
+    wrapper->setMediaCas = [](
+            void *data, const uint8_t *casToken, size_t size) -> status_t {
+        return ((MediaExtractorPluginHelper*)data)->setMediaCas(casToken, size);
+    };
+    wrapper->name = [](
+            void *data) -> const char * {
+        return ((MediaExtractorPluginHelper*)data)->name();
+    };
+    return wrapper;
+}
+
+/* adds some convience methods */
+class DataSourceHelper {
+public:
+    explicit DataSourceHelper(CDataSource *csource) {
+        mSource = csource;
+    }
+
+    explicit DataSourceHelper(DataSourceHelper *source) {
+        mSource = source->mSource;
+    }
+
+    ssize_t readAt(off64_t offset, void *data, size_t size) {
+        return mSource->readAt(mSource->handle, offset, data, size);
+    }
+
+    status_t getSize(off64_t *size) {
+        return mSource->getSize(mSource->handle, size);
+    }
+
+    bool getUri(char *uriString, size_t bufferSize) {
+        return mSource->getUri(mSource->handle, uriString, bufferSize);
+    }
+
+    uint32_t flags() {
+        return mSource->flags(mSource->handle);
+    }
+
+    // Convenience methods:
+    bool getUInt16(off64_t offset, uint16_t *x) {
+        *x = 0;
+
+        uint8_t byte[2];
+        if (readAt(offset, byte, 2) != 2) {
+            return false;
+        }
+
+        *x = (byte[0] << 8) | byte[1];
+
+        return true;
+    }
+
+    // 3 byte int, returned as a 32-bit int
+    bool getUInt24(off64_t offset, uint32_t *x) {
+        *x = 0;
+
+        uint8_t byte[3];
+        if (readAt(offset, byte, 3) != 3) {
+            return false;
+        }
+
+        *x = (byte[0] << 16) | (byte[1] << 8) | byte[2];
+
+        return true;
+    }
+
+    bool getUInt32(off64_t offset, uint32_t *x) {
+        *x = 0;
+
+        uint32_t tmp;
+        if (readAt(offset, &tmp, 4) != 4) {
+            return false;
+        }
+
+        *x = ntohl(tmp);
+
+        return true;
+    }
+
+    bool getUInt64(off64_t offset, uint64_t *x) {
+        *x = 0;
+
+        uint64_t tmp;
+        if (readAt(offset, &tmp, 8) != 8) {
+            return false;
+        }
+
+        *x = ((uint64_t)ntohl(tmp & 0xffffffff) << 32) | ntohl(tmp >> 32);
+
+        return true;
+    }
+
+    // read either int<N> or int<2N> into a uint<2N>_t, size is the int size in bytes.
+    bool getUInt16Var(off64_t offset, uint16_t *x, size_t size) {
+        if (size == 2) {
+            return getUInt16(offset, x);
+        }
+        if (size == 1) {
+            uint8_t tmp;
+            if (readAt(offset, &tmp, 1) == 1) {
+                *x = tmp;
+                return true;
+            }
+        }
+        return false;
+    }
+
+    bool getUInt32Var(off64_t offset, uint32_t *x, size_t size) {
+        if (size == 4) {
+            return getUInt32(offset, x);
+        }
+        if (size == 2) {
+            uint16_t tmp;
+            if (getUInt16(offset, &tmp)) {
+                *x = tmp;
+                return true;
+            }
+        }
+        return false;
+    }
+
+    bool getUInt64Var(off64_t offset, uint64_t *x, size_t size) {
+        if (size == 8) {
+            return getUInt64(offset, x);
+        }
+        if (size == 4) {
+            uint32_t tmp;
+            if (getUInt32(offset, &tmp)) {
+                *x = tmp;
+                return true;
+            }
+        }
+        return false;
+    }
+
+protected:
+    CDataSource *mSource;
+};
+
+
+
+// helpers to create a media_uuid_t from a string literal
+
+// purposely not defined anywhere so that this will fail to link if
+// expressions below are not evaluated at compile time
+int invalid_uuid_string(const char *);
+
+template <typename T, size_t N>
+constexpr uint8_t _digitAt_(const T (&s)[N], const size_t n) {
+    return s[n] >= '0' && s[n] <= '9' ? s[n] - '0'
+            : s[n] >= 'a' && s[n] <= 'f' ? s[n] - 'a' + 10
+                    : s[n] >= 'A' && s[n] <= 'F' ? s[n] - 'A' + 10
+                            : invalid_uuid_string("uuid: bad digits");
+}
+
+template <typename T, size_t N>
+constexpr uint8_t _hexByteAt_(const T (&s)[N], size_t n) {
+    return (_digitAt_(s, n) << 4) + _digitAt_(s, n + 1);
+}
+
+constexpr bool _assertIsDash_(char c) {
+    return c == '-' ? true : invalid_uuid_string("Wrong format");
+}
+
+template <size_t N>
+constexpr media_uuid_t constUUID(const char (&s) [N]) {
+    static_assert(N == 37, "uuid: wrong length");
+    return
+            _assertIsDash_(s[8]),
+            _assertIsDash_(s[13]),
+            _assertIsDash_(s[18]),
+            _assertIsDash_(s[23]),
+            media_uuid_t {{
+                _hexByteAt_(s, 0),
+                _hexByteAt_(s, 2),
+                _hexByteAt_(s, 4),
+                _hexByteAt_(s, 6),
+                _hexByteAt_(s, 9),
+                _hexByteAt_(s, 11),
+                _hexByteAt_(s, 14),
+                _hexByteAt_(s, 16),
+                _hexByteAt_(s, 19),
+                _hexByteAt_(s, 21),
+                _hexByteAt_(s, 24),
+                _hexByteAt_(s, 26),
+                _hexByteAt_(s, 28),
+                _hexByteAt_(s, 30),
+                _hexByteAt_(s, 32),
+                _hexByteAt_(s, 34),
+            }};
+}
+// Convenience macro to create a media_uuid_t from a string literal, which should
+// be formatted as "12345678-1234-1234-1234-123456789abc", as generated by
+// e.g. https://www.uuidgenerator.net/ or the 'uuidgen' linux command.
+// Hex digits may be upper or lower case.
+//
+// The macro call is otherwise equivalent to specifying the structure directly
+// (e.g. UUID("7d613858-5837-4a38-84c5-332d1cddee27") is the same as
+//       {{0x7d, 0x61, 0x38, 0x58, 0x58, 0x37, 0x4a, 0x38,
+//         0x84, 0xc5, 0x33, 0x2d, 0x1c, 0xdd, 0xee, 0x27}})
+
+#define UUID(str) []{ constexpr media_uuid_t uuid = constUUID(str); return uuid; }()
+
+}  // namespace android
+
+#endif  // MEDIA_EXTRACTOR_PLUGIN_HELPER_H_
diff --git a/include/media/TimeCheck.h b/include/media/TimeCheck.h
index e3ef134..85e17f9 120000
--- a/include/media/TimeCheck.h
+++ b/include/media/TimeCheck.h
@@ -1 +1 @@
-../../media/libmedia/include/media/TimeCheck.h
\ No newline at end of file
+../../media/utils/include/mediautils/TimeCheck.h
\ No newline at end of file
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 70c281a..8a97299 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -21,6 +21,9 @@
 	libsoundtriggerservice \
 	libutils
 
+LOCAL_STATIC_LIBRARIES := \
+	libjsoncpp
+
 # TODO oboeservice is the old folder name for aaudioservice. It will be changed.
 LOCAL_C_INCLUDES := \
 	frameworks/av/services/audioflinger \
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index 9fc5a76..955a588 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -19,7 +19,7 @@
 #include <utils/Log.h>
 
 #include "AACExtractor.h"
-#include <media/DataSourceBase.h>
+#include <media/MediaExtractorPluginApi.h>
 #include <media/MediaTrack.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -36,7 +36,7 @@
 class AACSource : public MediaTrack {
 public:
     AACSource(
-            DataSourceBase *source,
+            DataSourceHelper *source,
             MetaDataBase &meta,
             const Vector<uint64_t> &offset_vector,
             int64_t frame_duration_us);
@@ -54,7 +54,7 @@
 
 private:
     static const size_t kMaxFrameSize;
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     MetaDataBase mMeta;
 
     off64_t mOffset;
@@ -92,7 +92,7 @@
 // The returned value is the AAC frame size with the ADTS header length (regardless of
 //     the presence of the CRC).
 // If headerSize is non-NULL, it will be used to return the size of the header of this ADTS frame.
-static size_t getAdtsFrameLength(DataSourceBase *source, off64_t offset, size_t* headerSize) {
+static size_t getAdtsFrameLength(DataSourceHelper *source, off64_t offset, size_t* headerSize) {
 
     const size_t kAdtsHeaderLengthNoCrc = 7;
     const size_t kAdtsHeaderLengthWithCrc = 9;
@@ -133,7 +133,7 @@
 }
 
 AACExtractor::AACExtractor(
-        DataSourceBase *source, off64_t offset)
+        DataSourceHelper *source, off64_t offset)
     : mDataSource(source),
       mInitCheck(NO_INIT),
       mFrameDurationUs(0) {
@@ -219,7 +219,7 @@
 const size_t AACSource::kMaxFrameSize = 8192;
 
 AACSource::AACSource(
-        DataSourceBase *source,
+        DataSourceHelper *source,
         MetaDataBase &meta,
         const Vector<uint64_t> &offset_vector,
         int64_t frame_duration_us)
@@ -323,21 +323,22 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-static MediaExtractor* CreateExtractor(
-        DataSourceBase *source,
+static CMediaExtractor* CreateExtractor(
+        CDataSource *source,
         void *meta) {
     off64_t offset = *static_cast<off64_t*>(meta);
-    return new AACExtractor(source, offset);
+    return wrap(new AACExtractor(new DataSourceHelper(source), offset));
 }
 
-static MediaExtractor::CreatorFunc Sniff(
-        DataSourceBase *source, float *confidence, void **meta,
-        MediaExtractor::FreeMetaFunc *freeMeta) {
+static CreatorFunc Sniff(
+        CDataSource *source, float *confidence, void **meta,
+        FreeMetaFunc *freeMeta) {
     off64_t pos = 0;
 
+    DataSourceHelper helper(source);
     for (;;) {
         uint8_t id3header[10];
-        if (source->readAt(pos, id3header, sizeof(id3header))
+        if (helper.readAt(pos, id3header, sizeof(id3header))
                 < (ssize_t)sizeof(id3header)) {
             return NULL;
         }
@@ -364,7 +365,7 @@
 
     uint8_t header[2];
 
-    if (source->readAt(pos, &header, 2) != 2) {
+    if (helper.readAt(pos, &header, 2) != 2) {
         return NULL;
     }
 
@@ -387,9 +388,9 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("4fd80eae-03d2-4d72-9eb9-48fa6bb54613"),
         1, // version
         "AAC Extractor",
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/AACExtractor.h
index 9dadbed..3f20461 100644
--- a/media/extractors/aac/AACExtractor.h
+++ b/media/extractors/aac/AACExtractor.h
@@ -18,7 +18,8 @@
 
 #define AAC_EXTRACTOR_H_
 
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 
 #include <utils/Vector.h>
@@ -28,9 +29,9 @@
 struct AMessage;
 class String8;
 
-class AACExtractor : public MediaExtractor {
+class AACExtractor : public MediaExtractorPluginHelper {
 public:
-    AACExtractor(DataSourceBase *source, off64_t offset);
+    AACExtractor(DataSourceHelper *source, off64_t offset);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -43,7 +44,7 @@
     virtual ~AACExtractor();
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     MetaDataBase mMeta;
     status_t mInitCheck;
 
@@ -55,7 +56,7 @@
 };
 
 bool SniffAAC(
-        DataSourceBase *source, String8 *mimeType, float *confidence, off64_t *offset);
+        DataSourceHelper *source, String8 *mimeType, float *confidence, off64_t *offset);
 
 }  // namespace android
 
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
index f56d5ef..e109fb3 100644
--- a/media/extractors/amr/AMRExtractor.cpp
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -20,7 +20,6 @@
 
 #include "AMRExtractor.h"
 
-#include <media/DataSourceBase.h>
 #include <media/MediaTrack.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBufferGroup.h>
@@ -34,7 +33,7 @@
 class AMRSource : public MediaTrack {
 public:
     AMRSource(
-            DataSourceBase *source,
+            DataSourceHelper *source,
             MetaDataBase &meta,
             bool isWide,
             const off64_t *offset_table,
@@ -52,7 +51,7 @@
     virtual ~AMRSource();
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     MetaDataBase mMeta;
     bool mIsWide;
 
@@ -98,7 +97,7 @@
     return frameSize;
 }
 
-static status_t getFrameSizeByOffset(DataSourceBase *source,
+static status_t getFrameSizeByOffset(DataSourceHelper *source,
         off64_t offset, bool isWide, size_t *frameSize) {
     uint8_t header;
     ssize_t count = source->readAt(offset, &header, 1);
@@ -118,7 +117,7 @@
 }
 
 static bool SniffAMR(
-        DataSourceBase *source, bool *isWide, float *confidence) {
+        DataSourceHelper *source, bool *isWide, float *confidence) {
     char header[9];
 
     if (source->readAt(0, header, sizeof(header)) != sizeof(header)) {
@@ -144,7 +143,7 @@
     return false;
 }
 
-AMRExtractor::AMRExtractor(DataSourceBase *source)
+AMRExtractor::AMRExtractor(DataSourceHelper *source)
     : mDataSource(source),
       mInitCheck(NO_INIT),
       mOffsetTableLength(0) {
@@ -192,6 +191,7 @@
 }
 
 AMRExtractor::~AMRExtractor() {
+    delete mDataSource;
 }
 
 status_t AMRExtractor::getMetaData(MetaDataBase &meta) {
@@ -229,7 +229,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 AMRSource::AMRSource(
-        DataSourceBase *source, MetaDataBase &meta,
+        DataSourceHelper *source, MetaDataBase &meta,
         bool isWide, const off64_t *offset_table, size_t offset_table_length)
     : mDataSource(source),
       mMeta(meta),
@@ -365,22 +365,23 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("c86639c9-2f31-40ac-a715-fa01b4493aaf"),
         1,
         "AMR Extractor",
         [](
-                DataSourceBase *source,
+                CDataSource *source,
                 float *confidence,
                 void **,
-                MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
-            if (SniffAMR(source, nullptr, confidence)) {
+                FreeMetaFunc *) -> CreatorFunc {
+            DataSourceHelper helper(source);
+            if (SniffAMR(&helper, nullptr, confidence)) {
                 return [](
-                        DataSourceBase *source,
-                        void *) -> MediaExtractor* {
-                    return new AMRExtractor(source);};
+                        CDataSource *source,
+                        void *) -> CMediaExtractor* {
+                    return wrap(new AMRExtractor(new DataSourceHelper(source)));};
             }
             return NULL;
         }
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/AMRExtractor.h
index c90b325..499ca67 100644
--- a/media/extractors/amr/AMRExtractor.h
+++ b/media/extractors/amr/AMRExtractor.h
@@ -19,7 +19,8 @@
 #define AMR_EXTRACTOR_H_
 
 #include <utils/Errors.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 
 namespace android {
@@ -28,9 +29,9 @@
 class String8;
 #define OFFSET_TABLE_LEN    300
 
-class AMRExtractor : public MediaExtractor {
+class AMRExtractor : public MediaExtractorPluginHelper {
 public:
-    explicit AMRExtractor(DataSourceBase *source);
+    explicit AMRExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -43,7 +44,7 @@
     virtual ~AMRExtractor();
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     MetaDataBase mMeta;
     status_t mInitCheck;
     bool mIsWide;
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index e3da259..1efaa0c 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -18,11 +18,13 @@
 #define LOG_TAG "FLACExtractor"
 #include <utils/Log.h>
 
+#include <stdint.h>
+
 #include "FLACExtractor.h"
 // libFLAC parser
 #include "FLAC/stream_decoder.h"
 
-#include <media/DataSourceBase.h>
+#include <media/MediaExtractorPluginApi.h>
 #include <media/MediaTrack.h>
 #include <media/VorbisComment.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -41,7 +43,7 @@
 
 public:
     FLACSource(
-            DataSourceBase *dataSource,
+            DataSourceHelper *dataSource,
             MetaDataBase &meta);
 
     virtual status_t start(MetaDataBase *params);
@@ -55,7 +57,7 @@
     virtual ~FLACSource();
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     MetaDataBase mTrackMetadata;
     FLACParser *mParser;
     bool mInitCheck;
@@ -77,7 +79,7 @@
     };
 
     explicit FLACParser(
-        DataSourceBase *dataSource,
+        DataSourceHelper *dataSource,
         // If metadata pointers aren't provided, we don't fill them
         MetaDataBase *fileMetadata = 0,
         MetaDataBase *trackMetadata = 0);
@@ -116,7 +118,7 @@
     }
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     MetaDataBase *mFileMetadata;
     MetaDataBase *mTrackMetadata;
     bool mInitCheck;
@@ -124,7 +126,7 @@
     // media buffers
     size_t mMaxBufferSize;
     MediaBufferGroup *mGroup;
-    void (*mCopy)(short *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
+    void (*mCopy)(int16_t *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
 
     // handle to underlying libFLAC parser
     FLAC__StreamDecoder *mDecoder;
@@ -383,7 +385,7 @@
 // These are candidates for optimization if needed.
 
 static void copyMono8(
-        short *dst,
+        int16_t *dst,
         const int * src[FLACParser::kMaxChannels],
         unsigned nSamples,
         unsigned /* nChannels */) {
@@ -393,7 +395,7 @@
 }
 
 static void copyStereo8(
-        short *dst,
+        int16_t *dst,
         const int * src[FLACParser::kMaxChannels],
         unsigned nSamples,
         unsigned /* nChannels */) {
@@ -403,7 +405,7 @@
     }
 }
 
-static void copyMultiCh8(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
+static void copyMultiCh8(int16_t *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
 {
     for (unsigned i = 0; i < nSamples; ++i) {
         for (unsigned c = 0; c < nChannels; ++c) {
@@ -413,7 +415,7 @@
 }
 
 static void copyMono16(
-        short *dst,
+        int16_t *dst,
         const int * src[FLACParser::kMaxChannels],
         unsigned nSamples,
         unsigned /* nChannels */) {
@@ -423,7 +425,7 @@
 }
 
 static void copyStereo16(
-        short *dst,
+        int16_t *dst,
         const int * src[FLACParser::kMaxChannels],
         unsigned nSamples,
         unsigned /* nChannels */) {
@@ -433,7 +435,7 @@
     }
 }
 
-static void copyMultiCh16(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
+static void copyMultiCh16(int16_t *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
 {
     for (unsigned i = 0; i < nSamples; ++i) {
         for (unsigned c = 0; c < nChannels; ++c) {
@@ -445,7 +447,7 @@
 // 24-bit versions should do dithering or noise-shaping, here or in AudioFlinger
 
 static void copyMono24(
-        short *dst,
+        int16_t *dst,
         const int * src[FLACParser::kMaxChannels],
         unsigned nSamples,
         unsigned /* nChannels */) {
@@ -455,7 +457,7 @@
 }
 
 static void copyStereo24(
-        short *dst,
+        int16_t *dst,
         const int * src[FLACParser::kMaxChannels],
         unsigned nSamples,
         unsigned /* nChannels */) {
@@ -465,7 +467,7 @@
     }
 }
 
-static void copyMultiCh24(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
+static void copyMultiCh24(int16_t *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
 {
     for (unsigned i = 0; i < nSamples; ++i) {
         for (unsigned c = 0; c < nChannels; ++c) {
@@ -475,7 +477,7 @@
 }
 
 static void copyTrespass(
-        short * /* dst */,
+        int16_t * /* dst */,
         const int *[FLACParser::kMaxChannels] /* src */,
         unsigned /* nSamples */,
         unsigned /* nChannels */) {
@@ -485,7 +487,7 @@
 // FLACParser
 
 FLACParser::FLACParser(
-        DataSourceBase *dataSource,
+        DataSourceHelper *dataSource,
         MetaDataBase *fileMetadata,
         MetaDataBase *trackMetadata)
     : mDataSource(dataSource),
@@ -592,7 +594,7 @@
         static const struct {
             unsigned mChannels;
             unsigned mBitsPerSample;
-            void (*mCopy)(short *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
+            void (*mCopy)(int16_t *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
         } table[] = {
             { 1,  8, copyMono8    },
             { 2,  8, copyStereo8  },
@@ -635,7 +637,7 @@
 {
     CHECK(mGroup == NULL);
     mGroup = new MediaBufferGroup;
-    mMaxBufferSize = getMaxBlockSize() * getChannels() * sizeof(short);
+    mMaxBufferSize = getMaxBlockSize() * getChannels() * sizeof(int16_t);
     mGroup->add_buffer(MediaBufferBase::Create(mMaxBufferSize));
 }
 
@@ -688,9 +690,9 @@
     if (err != OK) {
         return NULL;
     }
-    size_t bufferSize = blocksize * getChannels() * sizeof(short);
+    size_t bufferSize = blocksize * getChannels() * sizeof(int16_t);
     CHECK(bufferSize <= mMaxBufferSize);
-    short *data = (short *) buffer->data();
+    int16_t *data = (int16_t *) buffer->data();
     buffer->set_range(0, bufferSize);
     // copy PCM from FLAC write buffer to our media buffer, with interleaving
     (*mCopy)(data, mWriteBuffer, blocksize, getChannels());
@@ -706,7 +708,7 @@
 // FLACsource
 
 FLACSource::FLACSource(
-        DataSourceBase *dataSource,
+        DataSourceHelper *dataSource,
         MetaDataBase &trackMetadata)
     : mDataSource(dataSource),
       mTrackMetadata(trackMetadata),
@@ -787,7 +789,7 @@
 // FLACExtractor
 
 FLACExtractor::FLACExtractor(
-        DataSourceBase *dataSource)
+        DataSourceHelper *dataSource)
     : mDataSource(dataSource),
       mParser(nullptr),
       mInitCheck(false)
@@ -802,6 +804,7 @@
 {
     ALOGV("~FLACExtractor::FLACExtractor");
     delete mParser;
+    delete mDataSource;
 }
 
 size_t FLACExtractor::countTracks()
@@ -835,7 +838,7 @@
 
 // Sniffer
 
-bool SniffFLAC(DataSourceBase *source, float *confidence)
+bool SniffFLAC(DataSourceHelper *source, float *confidence)
 {
     // first 4 is the signature word
     // second 4 is the sizeof STREAMINFO
@@ -857,22 +860,23 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
             UUID("1364b048-cc45-4fda-9934-327d0ebf9829"),
             1,
             "FLAC Extractor",
             [](
-                    DataSourceBase *source,
+                    CDataSource *source,
                     float *confidence,
                     void **,
-                    MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
-                if (SniffFLAC(source, confidence)) {
+                    FreeMetaFunc *) -> CreatorFunc {
+                DataSourceHelper helper(source);
+                if (SniffFLAC(&helper, confidence)) {
                     return [](
-                            DataSourceBase *source,
-                            void *) -> MediaExtractor* {
-                        return new FLACExtractor(source);};
+                            CDataSource *source,
+                            void *) -> CMediaExtractor* {
+                        return wrap(new FLACExtractor(new DataSourceHelper(source)));};
                 }
                 return NULL;
             }
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/FLACExtractor.h
index 7fb6ec6..1ddff43 100644
--- a/media/extractors/flac/FLACExtractor.h
+++ b/media/extractors/flac/FLACExtractor.h
@@ -18,7 +18,8 @@
 #define FLAC_EXTRACTOR_H_
 
 #include <media/DataSourceBase.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <utils/String8.h>
 
@@ -26,10 +27,10 @@
 
 class FLACParser;
 
-class FLACExtractor : public MediaExtractor {
+class FLACExtractor : public MediaExtractorPluginHelper {
 
 public:
-    explicit FLACExtractor(DataSourceBase *source);
+    explicit FLACExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -42,7 +43,7 @@
     virtual ~FLACExtractor();
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     FLACParser *mParser;
     status_t mInitCheck;
     MetaDataBase mFileMetadata;
@@ -55,8 +56,6 @@
 
 };
 
-bool SniffFLAC(DataSourceBase *source, float *confidence);
-
 }  // namespace android
 
 #endif  // FLAC_EXTRACTOR_H_
diff --git a/media/extractors/midi/MidiExtractor.cpp b/media/extractors/midi/MidiExtractor.cpp
index 949fbe0..233033e 100644
--- a/media/extractors/midi/MidiExtractor.cpp
+++ b/media/extractors/midi/MidiExtractor.cpp
@@ -142,7 +142,7 @@
 
 // MidiEngine
 
-MidiEngine::MidiEngine(DataSourceBase *dataSource,
+MidiEngine::MidiEngine(CDataSource *dataSource,
         MetaDataBase *fileMetadata,
         MetaDataBase *trackMetadata) :
             mGroup(NULL),
@@ -247,8 +247,9 @@
         EAS_I32 numRendered;
         EAS_RESULT result = EAS_Render(mEasData, p, mEasConfig->mixBufferSize, &numRendered);
         if (result != EAS_SUCCESS) {
-            ALOGE("EAS_Render returned %ld", result);
-            break;
+            ALOGE("EAS_Render() returned %ld, numBytesOutput = %d", result, numBytesOutput);
+            buffer->release();
+            return NULL; // Stop processing to prevent infinite loops.
         }
         p += numRendered * mEasConfig->numChannels;
         numBytesOutput += numRendered * mEasConfig->numChannels * sizeof(EAS_PCM);
@@ -262,7 +263,7 @@
 // MidiExtractor
 
 MidiExtractor::MidiExtractor(
-        DataSourceBase *dataSource)
+        CDataSource *dataSource)
     : mDataSource(dataSource),
       mInitCheck(false)
 {
@@ -309,7 +310,7 @@
 
 // Sniffer
 
-bool SniffMidi(DataSourceBase *source, float *confidence)
+bool SniffMidi(CDataSource *source, float *confidence)
 {
     MidiEngine p(source, NULL, NULL);
     if (p.initCheck() == OK) {
@@ -325,22 +326,22 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("ef6cca0a-f8a2-43e6-ba5f-dfcd7c9a7ef2"),
         1,
         "MIDI Extractor",
         [](
-                DataSourceBase *source,
+                CDataSource *source,
                 float *confidence,
                 void **,
-                MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
+                FreeMetaFunc *) -> CreatorFunc {
             if (SniffMidi(source, confidence)) {
                 return [](
-                        DataSourceBase *source,
-                        void *) -> MediaExtractor* {
-                    return new MidiExtractor(source);};
+                        CDataSource *source,
+                        void *) -> CMediaExtractor* {
+                    return wrap(new MidiExtractor(source));};
             }
             return NULL;
         }
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/MidiExtractor.h
index 244dd0f..fbbe93e 100644
--- a/media/extractors/midi/MidiExtractor.h
+++ b/media/extractors/midi/MidiExtractor.h
@@ -18,7 +18,8 @@
 #define MIDI_EXTRACTOR_H_
 
 #include <media/DataSourceBase.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MediaBufferBase.h>
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MetaDataBase.h>
@@ -30,7 +31,7 @@
 
 class MidiEngine {
 public:
-    explicit MidiEngine(DataSourceBase *dataSource,
+    explicit MidiEngine(CDataSource *dataSource,
             MetaDataBase *fileMetadata,
             MetaDataBase *trackMetadata);
     ~MidiEngine();
@@ -50,10 +51,10 @@
     bool mIsInitialized;
 };
 
-class MidiExtractor : public MediaExtractor {
+class MidiExtractor : public MediaExtractorPluginHelper {
 
 public:
-    explicit MidiExtractor(DataSourceBase *source);
+    explicit MidiExtractor(CDataSource *source);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -66,7 +67,7 @@
     virtual ~MidiExtractor();
 
 private:
-    DataSourceBase *mDataSource;
+    CDataSource *mDataSource;
     status_t mInitCheck;
     MetaDataBase mFileMetadata;
 
@@ -88,7 +89,7 @@
 
 };
 
-bool SniffMidi(DataSourceBase *source, float *confidence);
+bool SniffMidi(CDataSource *source, float *confidence);
 
 }  // namespace android
 
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index d657582..a387970 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -44,7 +44,7 @@
 namespace android {
 
 struct DataSourceBaseReader : public mkvparser::IMkvReader {
-    explicit DataSourceBaseReader(DataSourceBase *source)
+    explicit DataSourceBaseReader(DataSourceHelper *source)
         : mSource(source) {
     }
 
@@ -86,7 +86,7 @@
     }
 
 private:
-    DataSourceBase *mSource;
+    DataSourceHelper *mSource;
 
     DataSourceBaseReader(const DataSourceBaseReader &);
     DataSourceBaseReader &operator=(const DataSourceBaseReader &);
@@ -921,7 +921,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-MatroskaExtractor::MatroskaExtractor(DataSourceBase *source)
+MatroskaExtractor::MatroskaExtractor(DataSourceHelper *source)
     : mDataSource(source),
       mReader(new DataSourceBaseReader(mDataSource)),
       mSegment(NULL),
@@ -994,6 +994,8 @@
 
     delete mReader;
     mReader = NULL;
+
+    delete mDataSource;
 }
 
 size_t MatroskaExtractor::countTracks() {
@@ -1621,7 +1623,7 @@
 }
 
 bool SniffMatroska(
-        DataSourceBase *source, float *confidence) {
+        DataSourceHelper *source, float *confidence) {
     DataSourceBaseReader reader(source);
     mkvparser::EBMLHeader ebmlHeader;
     long long pos;
@@ -1638,22 +1640,23 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("abbedd92-38c4-4904-a4c1-b3f45f899980"),
         1,
         "Matroska Extractor",
         [](
-                DataSourceBase *source,
+                CDataSource *source,
                 float *confidence,
                 void **,
-                MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
-            if (SniffMatroska(source, confidence)) {
+                FreeMetaFunc *) -> CreatorFunc {
+            DataSourceHelper helper(source);
+            if (SniffMatroska(&helper, confidence)) {
                 return [](
-                        DataSourceBase *source,
-                        void *) -> MediaExtractor* {
-                    return new MatroskaExtractor(source);};
+                        CDataSource *source,
+                        void *) -> CMediaExtractor* {
+                    return wrap(new MatroskaExtractor(new DataSourceHelper(source)));};
             }
             return NULL;
         }
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/MatroskaExtractor.h
index 3568ea1..2c6ca85 100644
--- a/media/extractors/mkv/MatroskaExtractor.h
+++ b/media/extractors/mkv/MatroskaExtractor.h
@@ -20,7 +20,8 @@
 
 #include "mkvparser/mkvparser.h"
 
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <utils/Vector.h>
 #include <utils/threads.h>
@@ -34,8 +35,8 @@
 struct DataSourceBaseReader;
 struct MatroskaSource;
 
-struct MatroskaExtractor : public MediaExtractor {
-    explicit MatroskaExtractor(DataSourceBase *source);
+struct MatroskaExtractor : public MediaExtractorPluginHelper {
+    explicit MatroskaExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
 
@@ -76,7 +77,7 @@
     Mutex mLock;
     Vector<TrackInfo> mTracks;
 
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     DataSourceBaseReader *mReader;
     mkvparser::Segment *mSegment;
     bool mExtractedThumbnails;
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 33cff96..a1e5593 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -24,7 +24,6 @@
 #include "VBRISeeker.h"
 #include "XINGSeeker.h"
 
-#include <media/DataSourceBase.h>
 #include <media/MediaTrack.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -46,7 +45,7 @@
 static const uint32_t kMask = 0xfffe0c00;
 
 static bool Resync(
-        DataSourceBase *source, uint32_t match_header,
+        DataSourceHelper *source, uint32_t match_header,
         off64_t *inout_pos, off64_t *post_id3_pos, uint32_t *out_header) {
     if (post_id3_pos != NULL) {
         *post_id3_pos = 0;
@@ -212,7 +211,7 @@
 class MP3Source : public MediaTrack {
 public:
     MP3Source(
-            MetaDataBase &meta, DataSourceBase *source,
+            MetaDataBase &meta, DataSourceHelper *source,
             off64_t first_frame_pos, uint32_t fixed_header,
             MP3Seeker *seeker);
 
@@ -230,7 +229,7 @@
 private:
     static const size_t kMaxFrameSize;
     MetaDataBase &mMeta;
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     off64_t mFirstFramePos;
     uint32_t mFixedHeader;
     off64_t mCurrentPos;
@@ -253,7 +252,7 @@
 };
 
 MP3Extractor::MP3Extractor(
-        DataSourceBase *source, Mp3Meta *meta)
+        DataSourceHelper *source, Mp3Meta *meta)
     : mInitCheck(NO_INIT),
       mDataSource(source),
       mFirstFramePos(-1),
@@ -371,7 +370,8 @@
     // Get iTunes-style gapless info if present.
     // When getting the id3 tag, skip the V1 tags to prevent the source cache
     // from being iterated to the end of the file.
-    ID3 id3(mDataSource, true);
+    DataSourceHelper helper(mDataSource);
+    ID3 id3(&helper, true);
     if (id3.isValid()) {
         ID3::Iterator *com = new ID3::Iterator(id3, "COM");
         if (com->done()) {
@@ -404,6 +404,7 @@
 
 MP3Extractor::~MP3Extractor() {
     delete mSeeker;
+    delete mDataSource;
 }
 
 size_t MP3Extractor::countTracks() {
@@ -440,7 +441,7 @@
 // Set our max frame size to the nearest power of 2 above this size (aka, 4kB)
 const size_t MP3Source::kMaxFrameSize = (1 << 12); /* 4096 bytes */
 MP3Source::MP3Source(
-        MetaDataBase &meta, DataSourceBase *source,
+        MetaDataBase &meta, DataSourceHelper *source,
         off64_t first_frame_pos, uint32_t fixed_header,
         MP3Seeker *seeker)
     : mMeta(meta),
@@ -612,7 +613,8 @@
     }
     meta.setCString(kKeyMIMEType, "audio/mpeg");
 
-    ID3 id3(mDataSource);
+    DataSourceHelper helper(mDataSource);
+    ID3 id3(&helper);
 
     if (!id3.isValid()) {
         return OK;
@@ -669,21 +671,22 @@
     return OK;
 }
 
-static MediaExtractor* CreateExtractor(
-        DataSourceBase *source,
+static CMediaExtractor* CreateExtractor(
+        CDataSource *source,
         void *meta) {
     Mp3Meta *metaData = static_cast<Mp3Meta *>(meta);
-    return new MP3Extractor(source, metaData);
+    return wrap(new MP3Extractor(new DataSourceHelper(source), metaData));
 }
 
-static MediaExtractor::CreatorFunc Sniff(
-        DataSourceBase *source, float *confidence, void **meta,
-        MediaExtractor::FreeMetaFunc *freeMeta) {
+static CreatorFunc Sniff(
+        CDataSource *source, float *confidence, void **meta,
+        FreeMetaFunc *freeMeta) {
     off64_t pos = 0;
     off64_t post_id3_pos;
     uint32_t header;
     uint8_t mpeg_header[5];
-    if (source->readAt(0, mpeg_header, sizeof(mpeg_header)) < (ssize_t)sizeof(mpeg_header)) {
+    DataSourceHelper helper(source);
+    if (helper.readAt(0, mpeg_header, sizeof(mpeg_header)) < (ssize_t)sizeof(mpeg_header)) {
         return NULL;
     }
 
@@ -691,7 +694,7 @@
         ALOGV("MPEG1PS container is not supported!");
         return NULL;
     }
-    if (!Resync(source, 0, &pos, &post_id3_pos, &header)) {
+    if (!Resync(&helper, 0, &pos, &post_id3_pos, &header)) {
         return NULL;
     }
 
@@ -710,9 +713,9 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("812a3f6c-c8cf-46de-b529-3774b14103d4"),
         1, // version
         "MP3 Extractor",
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/MP3Extractor.h
index 485b0ca..585d9f6 100644
--- a/media/extractors/mp3/MP3Extractor.h
+++ b/media/extractors/mp3/MP3Extractor.h
@@ -19,20 +19,22 @@
 #define MP3_EXTRACTOR_H_
 
 #include <utils/Errors.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 
 namespace android {
 
+class DataSourceHelper;
+
 struct AMessage;
-class DataSourceBase;
 struct MP3Seeker;
 class String8;
 struct Mp3Meta;
 
-class MP3Extractor : public MediaExtractor {
+class MP3Extractor : public MediaExtractorPluginHelper {
 public:
-    MP3Extractor(DataSourceBase *source, Mp3Meta *meta);
+    MP3Extractor(DataSourceHelper *source, Mp3Meta *meta);
     ~MP3Extractor();
 
     virtual size_t countTracks();
@@ -45,7 +47,7 @@
 private:
     status_t mInitCheck;
 
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     off64_t mFirstFramePos;
     MetaDataBase mMeta;
     uint32_t mFixedHeader;
diff --git a/media/extractors/mp3/VBRISeeker.cpp b/media/extractors/mp3/VBRISeeker.cpp
index 523f14c..9eb72a7 100644
--- a/media/extractors/mp3/VBRISeeker.cpp
+++ b/media/extractors/mp3/VBRISeeker.cpp
@@ -27,7 +27,9 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ByteUtils.h>
-#include <media/DataSourceBase.h>
+
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 
 namespace android {
 
@@ -37,7 +39,7 @@
 
 // static
 VBRISeeker *VBRISeeker::CreateFromSource(
-        DataSourceBase *source, off64_t post_id3_pos) {
+        DataSourceHelper *source, off64_t post_id3_pos) {
     off64_t pos = post_id3_pos;
 
     uint8_t header[4];
diff --git a/media/extractors/mp3/VBRISeeker.h b/media/extractors/mp3/VBRISeeker.h
index 9213f6e..507899c 100644
--- a/media/extractors/mp3/VBRISeeker.h
+++ b/media/extractors/mp3/VBRISeeker.h
@@ -24,11 +24,11 @@
 
 namespace android {
 
-class DataSourceBase;
+class DataSourceHelper;
 
 struct VBRISeeker : public MP3Seeker {
     static VBRISeeker *CreateFromSource(
-            DataSourceBase *source, off64_t post_id3_pos);
+            DataSourceHelper *source, off64_t post_id3_pos);
 
     virtual bool getDuration(int64_t *durationUs);
     virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos);
diff --git a/media/extractors/mp3/XINGSeeker.cpp b/media/extractors/mp3/XINGSeeker.cpp
index 95ca556..9f1fd7a 100644
--- a/media/extractors/mp3/XINGSeeker.cpp
+++ b/media/extractors/mp3/XINGSeeker.cpp
@@ -21,7 +21,9 @@
 #include <media/stagefright/foundation/avc_utils.h>
 
 #include <media/stagefright/foundation/ByteUtils.h>
-#include <media/DataSourceBase.h>
+
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 
 namespace android {
 
@@ -44,7 +46,7 @@
 }
 
 bool XINGSeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
-    if (mSizeBytes == 0 || !mTOCValid || mDurationUs < 0) {
+    if (mSizeBytes == 0 || mDurationUs < 0) {
         return false;
     }
 
@@ -54,7 +56,7 @@
         fx = 0.0f;
     } else if( percent >= 100.0f ) {
         fx = 256.0f;
-    } else {
+    } else if (mTOCValid) {
         int a = (int)percent;
         float fa, fb;
         if ( a == 0 ) {
@@ -68,6 +70,8 @@
             fb = 256.0f;
         }
         fx = fa + (fb-fa)*(percent-a);
+    } else {
+        fx = percent * 2.56f;
     }
 
     *pos = (int)((1.0f/256.0f)*fx*mSizeBytes) + mFirstFramePos;
@@ -77,7 +81,7 @@
 
 // static
 XINGSeeker *XINGSeeker::CreateFromSource(
-        DataSourceBase *source, off64_t first_frame_pos) {
+        DataSourceHelper *source, off64_t first_frame_pos) {
 
     uint8_t buffer[4];
     int offset = first_frame_pos;
diff --git a/media/extractors/mp3/XINGSeeker.h b/media/extractors/mp3/XINGSeeker.h
index 5867eae..9acee38 100644
--- a/media/extractors/mp3/XINGSeeker.h
+++ b/media/extractors/mp3/XINGSeeker.h
@@ -22,11 +22,11 @@
 
 namespace android {
 
-class DataSourceBase;
+class DataSourceHelper;
 
 struct XINGSeeker : public MP3Seeker {
     static XINGSeeker *CreateFromSource(
-            DataSourceBase *source, off64_t first_frame_pos);
+            DataSourceHelper *source, off64_t first_frame_pos);
 
     virtual bool getDuration(int64_t *durationUs);
     virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos);
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
new file mode 100644
index 0000000..a95c2db
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -0,0 +1,624 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AC4Parser"
+
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include "AC4Parser.h"
+
+#define BOOLSTR(a)  ((a)?"true":"false")
+#define BYTE_ALIGN mBitReader.skipBits(mBitReader.numBitsLeft() % 8)
+#define CHECK_BITS_LEFT(n) if (mBitReader.numBitsLeft() < n) {return false;}
+
+namespace android {
+
+AC4Parser::AC4Parser() {
+}
+
+AC4DSIParser::AC4DSIParser(ABitReader &br)
+    : mBitReader(br){
+
+    mDSISize = mBitReader.numBitsLeft();
+}
+
+// ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+static const char *ChannelModes[] = {
+    "mono",
+    "stereo",
+    "3.0",
+    "5.0",
+    "5.1",
+    "7.0 (3/4/0)",
+    "7.1 (3/4/0.1)",
+    "7.0 (5/2/0)",
+    "7.1 (5/2/0.1)",
+    "7.0 (3/2/2)",
+    "7.1 (3/2/2.1)",
+    "7.0.4",
+    "7.1.4",
+    "9.0.4",
+    "9.1.4",
+    "22.2"
+};
+
+static const char* ContentClassifier[] = {
+    "Complete Main",
+    "Music and Effects",
+    "Visually Impaired",
+    "Hearing Impaired",
+    "Dialog",
+    "Commentary",
+    "Emergency",
+    "Voice Over"
+};
+
+bool AC4DSIParser::parseLanguageTag(uint32_t presentationID, uint32_t substreamID){
+    CHECK_BITS_LEFT(6);
+    uint32_t n_language_tag_bytes = mBitReader.getBits(6);
+    if (n_language_tag_bytes < 2 || n_language_tag_bytes >= 42) {
+        return false;
+    }
+    CHECK_BITS_LEFT(n_language_tag_bytes * 8);
+    char language_tag_bytes[42]; // TS 103 190 part 1 4.3.3.8.7
+    for (uint32_t i = 0; i < n_language_tag_bytes; i++) {
+        language_tag_bytes[i] = (char)mBitReader.getBits(8);
+    }
+    language_tag_bytes[n_language_tag_bytes] = 0;
+    ALOGV("%u.%u: language_tag = %s\n", presentationID, substreamID, language_tag_bytes);
+
+    std::string language(language_tag_bytes, n_language_tag_bytes);
+    mPresentations[presentationID].mLanguage = language;
+
+    return true;
+}
+
+// TS 103 190-1 v1.2.1 E.5 and TS 103 190-2 v1.1.1 E.9
+bool AC4DSIParser::parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID){
+    CHECK_BITS_LEFT(5);
+    uint32_t channel_mode = mBitReader.getBits(5);
+    CHECK_BITS_LEFT(2);
+    uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+    CHECK_BITS_LEFT(1);
+    bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+    ALOGV("%u.%u: channel_mode = %u (%s)\n", presentationID, substreamID, channel_mode,
+    channel_mode < NELEM(ChannelModes) ? ChannelModes[channel_mode] : "reserved");
+    ALOGV("%u.%u: dsi_sf_multiplier = %u\n", presentationID,
+        substreamID, dsi_sf_multiplier);
+    ALOGV("%u.%u: b_substream_bitrate_indicator = %s\n", presentationID,
+        substreamID, BOOLSTR(b_substream_bitrate_indicator));
+
+    if (b_substream_bitrate_indicator) {
+        CHECK_BITS_LEFT(5);
+        uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+        ALOGV("%u.%u: substream_bitrate_indicator = %u\n", presentationID, substreamID,
+            substream_bitrate_indicator);
+    }
+    if (channel_mode >= 7 && channel_mode <= 10) {
+        CHECK_BITS_LEFT(1);
+        uint32_t add_ch_base = mBitReader.getBits(1);
+        ALOGV("%u.%u: add_ch_base = %u\n", presentationID, substreamID, add_ch_base);
+    }
+    CHECK_BITS_LEFT(1);
+    bool b_content_type = (mBitReader.getBits(1) == 1);
+    ALOGV("%u.%u: b_content_type = %s\n", presentationID, substreamID, BOOLSTR(b_content_type));
+    if (b_content_type) {
+        CHECK_BITS_LEFT(3);
+        uint32_t content_classifier = mBitReader.getBits(3);
+        ALOGV("%u.%u: content_classifier = %u (%s)\n", presentationID, substreamID,
+            content_classifier, ContentClassifier[content_classifier]);
+
+        // For streams based on TS 103 190 part 1 the presentation level channel_mode doesn't
+        // exist and so we use the channel_mode from either the CM or M&E substream
+        // (they are mutually exclusive)
+        if (mPresentations[presentationID].mChannelMode == -1 &&
+            (content_classifier == 0 || content_classifier == 1)) {
+            mPresentations[presentationID].mChannelMode = channel_mode;
+        }
+        mPresentations[presentationID].mContentClassifier = content_classifier;
+        CHECK_BITS_LEFT(1);
+        bool b_language_indicator = (mBitReader.getBits(1) == 1);
+        ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, substreamID,
+            BOOLSTR(b_language_indicator));
+        if (b_language_indicator) {
+            if (!parseLanguageTag(presentationID, substreamID)) {
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
+// ETSI TS 103 190-2 v1.1.1 section E.11
+bool AC4DSIParser::parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID)
+{
+    CHECK_BITS_LEFT(1);
+    bool b_substreams_present = (mBitReader.getBits(1) == 1);
+    CHECK_BITS_LEFT(1);
+    bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+    CHECK_BITS_LEFT(1);
+    bool b_channel_coded = (mBitReader.getBits(1) == 1);
+    CHECK_BITS_LEFT(8);
+    uint32_t n_substreams = mBitReader.getBits(8);
+    ALOGV("%u.%u: b_substreams_present = %s\n", presentationID, groupID,
+        BOOLSTR(b_substreams_present));
+    ALOGV("%u.%u: b_hsf_ext = %s\n", presentationID, groupID, BOOLSTR(b_hsf_ext));
+    ALOGV("%u.%u: b_channel_coded = %s\n", presentationID, groupID, BOOLSTR(b_channel_coded));
+    ALOGV("%u.%u: n_substreams = %u\n", presentationID, groupID, n_substreams);
+
+    for (uint32_t i = 0; i < n_substreams; i++) {
+        CHECK_BITS_LEFT(2);
+        uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+        CHECK_BITS_LEFT(1);
+        bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+        ALOGV("%u.%u.%u: dsi_sf_multiplier = %u\n", presentationID, groupID, i, dsi_sf_multiplier);
+        ALOGV("%u.%u.%u: b_substream_bitrate_indicator = %s\n", presentationID, groupID, i,
+            BOOLSTR(b_substream_bitrate_indicator));
+
+        if (b_substream_bitrate_indicator) {
+            CHECK_BITS_LEFT(5);
+            uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+            ALOGV("%u.%u.%u: substream_bitrate_indicator = %u\n", presentationID, groupID, i,
+                substream_bitrate_indicator);
+        }
+        if (b_channel_coded) {
+            CHECK_BITS_LEFT(24);
+            uint32_t dsi_substream_channel_mask = mBitReader.getBits(24);
+            ALOGV("%u.%u.%u: dsi_substream_channel_mask = 0x%06x\n", presentationID, groupID, i,
+                dsi_substream_channel_mask);
+        } else {
+            CHECK_BITS_LEFT(1);
+            bool b_ajoc = (mBitReader.getBits(1) == 1);
+            ALOGV("%u.%u.%u: b_ajoc = %s\n", presentationID, groupID, i, BOOLSTR(b_ajoc));
+            if (b_ajoc) {
+                CHECK_BITS_LEFT(1);
+                bool b_static_dmx = (mBitReader.getBits(1) == 1);
+                ALOGV("%u.%u.%u: b_static_dmx = %s\n", presentationID, groupID, i,
+                    BOOLSTR(b_static_dmx));
+                if (!b_static_dmx) {
+                    CHECK_BITS_LEFT(4);
+                    uint32_t n_dmx_objects_minus1 = mBitReader.getBits(4);
+                    ALOGV("%u.%u.%u: n_dmx_objects_minus1 = %u\n", presentationID, groupID, i,
+                        n_dmx_objects_minus1);
+                }
+                CHECK_BITS_LEFT(6);
+                uint32_t n_umx_objects_minus1 = mBitReader.getBits(6);
+                ALOGV("%u.%u.%u: n_umx_objects_minus1 = %u\n", presentationID, groupID, i,
+                    n_umx_objects_minus1);
+            }
+            CHECK_BITS_LEFT(4);
+            mBitReader.skipBits(4); // objects_assignment_mask
+        }
+    }
+
+    CHECK_BITS_LEFT(1);
+    bool b_content_type = (mBitReader.getBits(1) == 1);
+    ALOGV("%u.%u: b_content_type = %s\n", presentationID, groupID, BOOLSTR(b_content_type));
+    if (b_content_type) {
+        CHECK_BITS_LEFT(3);
+        uint32_t content_classifier = mBitReader.getBits(3);
+        ALOGV("%u.%u: content_classifier = %s (%u)\n", presentationID, groupID,
+            ContentClassifier[content_classifier], content_classifier);
+
+        mPresentations[presentationID].mContentClassifier = content_classifier;
+
+        CHECK_BITS_LEFT(1);
+        bool b_language_indicator = (mBitReader.getBits(1) == 1);
+        ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, groupID,
+            BOOLSTR(b_language_indicator));
+
+        if (b_language_indicator) {
+            if (!parseLanguageTag(presentationID, groupID)) {
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
+bool AC4DSIParser::parseBitrateDsi() {
+    CHECK_BITS_LEFT(2 + 32 + 32);
+    mBitReader.skipBits(2); // bit_rate_mode
+    mBitReader.skipBits(32); // bit_rate
+    mBitReader.skipBits(32); // bit_rate_precision
+
+    return true;
+}
+
+// TS 103 190-1 section E.4 (ac4_dsi) and TS 103 190-2 section E.6 (ac4_dsi_v1)
+bool AC4DSIParser::parse() {
+    CHECK_BITS_LEFT(3);
+    uint32_t ac4_dsi_version = mBitReader.getBits(3);
+    if (ac4_dsi_version > 1) {
+        ALOGE("error while parsing ac-4 dsi: only versions 0 and 1 are supported");
+        return false;
+    }
+
+    CHECK_BITS_LEFT(7 + 1 + 4 + 9);
+    uint32_t bitstream_version = mBitReader.getBits(7);
+    mBitReader.skipBits(1); // fs_index
+    mBitReader.skipBits(4); // frame_rate_index
+    uint32_t n_presentations = mBitReader.getBits(9);
+
+    int32_t short_program_id = -1;
+    if (bitstream_version > 1) {
+        if (ac4_dsi_version == 0){
+            ALOGE("invalid ac4 dsi");
+            return false;
+        }
+        CHECK_BITS_LEFT(1);
+        bool b_program_id = (mBitReader.getBits(1) == 1);
+        if (b_program_id) {
+            CHECK_BITS_LEFT(16 + 1);
+            short_program_id = mBitReader.getBits(16);
+            bool b_uuid = (mBitReader.getBits(1) == 1);
+            if (b_uuid) {
+                const uint32_t kAC4UUIDSizeInBytes = 16;
+                char program_uuid[kAC4UUIDSizeInBytes];
+                CHECK_BITS_LEFT(kAC4UUIDSizeInBytes * 8);
+                for (uint32_t i = 0; i < kAC4UUIDSizeInBytes; i++) {
+                    program_uuid[i] = (char)(mBitReader.getBits(8));
+                }
+                ALOGV("UUID = %s", program_uuid);
+            }
+        }
+    }
+
+    if (ac4_dsi_version == 1) {
+        if (!parseBitrateDsi()) {
+            return false;
+        }
+        BYTE_ALIGN;
+    }
+
+    for (uint32_t presentation = 0; presentation < n_presentations; presentation++) {
+        mPresentations[presentation].mProgramID = short_program_id;
+        // known as b_single_substream in ac4_dsi_version 0
+        bool b_single_substream_group = false;
+        uint32_t presentation_config = 0, presentation_version = 0;
+        uint32_t pres_bytes = 0;
+
+        if (ac4_dsi_version == 0) {
+            CHECK_BITS_LEFT(1 + 5 + 5);
+            b_single_substream_group = (mBitReader.getBits(1) == 1);
+            presentation_config = mBitReader.getBits(5);
+            presentation_version = mBitReader.getBits(5);
+        } else if (ac4_dsi_version == 1) {
+            CHECK_BITS_LEFT(8 + 8);
+            presentation_version = mBitReader.getBits(8);
+            pres_bytes = mBitReader.getBits(8);
+            if (pres_bytes == 0xff) {
+                CHECK_BITS_LEFT(16);
+                pres_bytes += mBitReader.getBits(16);
+            }
+            ALOGV("%u: pres_bytes = %u\n", presentation, pres_bytes);
+            if (presentation_version > 1) {
+                CHECK_BITS_LEFT(pres_bytes * 8);
+                mBitReader.skipBits(pres_bytes * 8);
+                continue;
+            }
+            // ac4_presentation_v0_dsi() and ac4_presentation_v1_dsi() both
+            // start with a presentation_config of 5 bits
+            CHECK_BITS_LEFT(5);
+            presentation_config = mBitReader.getBits(5);
+            b_single_substream_group = (presentation_config == 0x1f);
+        }
+
+        static const char *PresentationConfig[] = {
+            "Music&Effects + Dialog",
+            "Main + DE",
+            "Main + Associate",
+            "Music&Effects + Dialog + Associate",
+            "Main + DE + Associate",
+            "Arbitrary substream groups",
+            "EMDF only"
+        };
+        ALOGV("%u: b_single_substream/group = %s\n", presentation,
+            BOOLSTR(b_single_substream_group));
+        ALOGV("%u: presentation_version = %u\n", presentation, presentation_version);
+        ALOGV("%u: presentation_config = %u (%s)\n", presentation, presentation_config,
+            (presentation_config >= NELEM(PresentationConfig) ?
+            "reserved" : PresentationConfig[presentation_config]));
+
+        /* record a marker, less the size of the presentation_config */
+        uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
+
+        bool b_add_emdf_substreams = false;
+        if (!b_single_substream_group && presentation_config == 6) {
+            b_add_emdf_substreams = true;
+            ALOGV("%u: b_add_emdf_substreams = %s\n", presentation, BOOLSTR(b_add_emdf_substreams));
+        } else {
+            CHECK_BITS_LEFT(3 + 1);
+            uint32_t mdcompat = mBitReader.getBits(3);
+            ALOGV("%u: mdcompat = %d\n", presentation, mdcompat);
+
+            bool b_presentation_group_index = (mBitReader.getBits(1) == 1);
+            ALOGV("%u: b_presentation_group_index = %s\n", presentation,
+                BOOLSTR(b_presentation_group_index));
+            if (b_presentation_group_index) {
+                CHECK_BITS_LEFT(5);
+                mPresentations[presentation].mGroupIndex = mBitReader.getBits(5);
+                ALOGV("%u: presentation_group_index = %d\n", presentation,
+                    mPresentations[presentation].mGroupIndex);
+            }
+            CHECK_BITS_LEFT(2);
+            uint32_t dsi_frame_rate_multiply_info = mBitReader.getBits(2);
+            ALOGV("%u: dsi_frame_rate_multiply_info = %d\n", presentation,
+                dsi_frame_rate_multiply_info);
+            if (ac4_dsi_version == 1 && presentation_version == 1) {
+                CHECK_BITS_LEFT(2);
+                uint32_t dsi_frame_rate_fraction_info = mBitReader.getBits(2);
+                ALOGV("%u: dsi_frame_rate_fraction_info = %d\n", presentation,
+                    dsi_frame_rate_fraction_info);
+            }
+            CHECK_BITS_LEFT(5 + 10);
+            uint32_t presentation_emdf_version = mBitReader.getBits(5);
+            uint32_t presentation_key_id = mBitReader.getBits(10);
+            ALOGV("%u: presentation_emdf_version = %d\n", presentation, presentation_emdf_version);
+            ALOGV("%u: presentation_key_id = %d\n", presentation, presentation_key_id);
+
+            if (ac4_dsi_version == 1) {
+                bool b_presentation_channel_coded = false;
+                if (presentation_version == 0) {
+                    b_presentation_channel_coded = true;
+                } else {
+                    CHECK_BITS_LEFT(1);
+                    b_presentation_channel_coded = (mBitReader.getBits(1) == 1);
+                }
+                ALOGV("%u: b_presentation_channel_coded = %s\n", presentation,
+                    BOOLSTR(b_presentation_channel_coded));
+                if (b_presentation_channel_coded) {
+                    if (presentation_version == 1) {
+                        CHECK_BITS_LEFT(5);
+                        uint32_t dsi_presentation_ch_mode = mBitReader.getBits(5);
+                        mPresentations[presentation].mChannelMode = dsi_presentation_ch_mode;
+                        ALOGV("%u: dsi_presentation_ch_mode = %d (%s)\n", presentation,
+                            dsi_presentation_ch_mode,
+                            dsi_presentation_ch_mode < NELEM(ChannelModes) ?
+                            ChannelModes[dsi_presentation_ch_mode] : "reserved");
+
+                        if (dsi_presentation_ch_mode >= 11 && dsi_presentation_ch_mode <= 14) {
+                            CHECK_BITS_LEFT(1 + 2);
+                            uint32_t pres_b_4_back_channels_present = mBitReader.getBits(1);
+                            uint32_t pres_top_channel_pairs = mBitReader.getBits(2);
+                            ALOGV("%u: pres_b_4_back_channels_present = %s\n", presentation,
+                                BOOLSTR(pres_b_4_back_channels_present));
+                            ALOGV("%u: pres_top_channel_pairs = %d\n", presentation,
+                                pres_top_channel_pairs);
+                        }
+                    }
+                    // presentation_channel_mask in ac4_presentation_v0_dsi()
+                    CHECK_BITS_LEFT(24);
+                    uint32_t presentation_channel_mask_v1 = mBitReader.getBits(24);
+                    ALOGV("%u: presentation_channel_mask_v1 = 0x%06x\n", presentation,
+                        presentation_channel_mask_v1);
+                }
+                if (presentation_version == 1) {
+                    CHECK_BITS_LEFT(1);
+                    bool b_presentation_core_differs = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_presentation_core_differs = %s\n", presentation,
+                        BOOLSTR(b_presentation_core_differs));
+                    if (b_presentation_core_differs) {
+                        CHECK_BITS_LEFT(1);
+                        bool b_presentation_core_channel_coded = (mBitReader.getBits(1) == 1);
+                        if (b_presentation_core_channel_coded) {
+                            CHECK_BITS_LEFT(2);
+                            mBitReader.skipBits(2); // dsi_presentation_channel_mode_core
+                        }
+                    }
+                    CHECK_BITS_LEFT(1);
+                    bool b_presentation_filter = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_presentation_filter = %s\n", presentation,
+                        BOOLSTR(b_presentation_filter));
+                    if (b_presentation_filter) {
+                        CHECK_BITS_LEFT(1 + 8);
+                        bool b_enable_presentation = (mBitReader.getBits(1) == 1);
+                        if (!b_enable_presentation) {
+                            mPresentations[presentation].mEnabled = false;
+                        }
+                        ALOGV("%u: b_enable_presentation = %s\n", presentation,
+                            BOOLSTR(b_enable_presentation));
+                        uint32_t n_filter_bytes = mBitReader.getBits(8);
+                        CHECK_BITS_LEFT(n_filter_bytes * 8);
+                        for (uint32_t i = 0; i < n_filter_bytes; i++) {
+                            mBitReader.skipBits(8); // filter_data
+                        }
+                    }
+                }
+            } /* ac4_dsi_version == 1 */
+
+            if (b_single_substream_group) {
+                if (presentation_version == 0) {
+                    if (!parseSubstreamDSI(presentation, 0)) {
+                        return false;
+                    }
+                } else {
+                    if (!parseSubstreamGroupDSI(presentation, 0)) {
+                        return false;
+                    }
+                }
+            } else {
+                if (ac4_dsi_version == 1) {
+                    CHECK_BITS_LEFT(1);
+                    bool b_multi_pid = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_multi_pid = %s\n", presentation, BOOLSTR(b_multi_pid));
+                } else {
+                    CHECK_BITS_LEFT(1);
+                    bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_hsf_ext = %s\n", presentation, BOOLSTR(b_hsf_ext));
+                }
+                switch (presentation_config) {
+                case 0:
+                case 1:
+                case 2:
+                    if (presentation_version == 0) {
+                        if (!parseSubstreamDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamDSI(presentation, 1)) {
+                            return false;
+                        }
+                    } else {
+                        if (!parseSubstreamGroupDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamGroupDSI(presentation, 1)) {
+                            return false;
+                        }
+                    }
+                    break;
+                case 3:
+                case 4:
+                    if (presentation_version == 0) {
+                        if (!parseSubstreamDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamDSI(presentation, 1)) {
+                            return false;
+                        }
+                        if (!parseSubstreamDSI(presentation, 2)) {
+                            return false;
+                        }
+                    } else {
+                        if (!parseSubstreamGroupDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamGroupDSI(presentation, 1)) {
+                            return false;
+                        }
+                        if (!parseSubstreamGroupDSI(presentation, 2)) {
+                            return false;
+                        }
+                    }
+                    break;
+                case 5:
+                    if (presentation_version == 0) {
+                        if (!parseSubstreamDSI(presentation, 0)) {
+                            return false;
+                        }
+                    } else {
+                        CHECK_BITS_LEFT(3);
+                        uint32_t n_substream_groups_minus2 = mBitReader.getBits(3);
+                        ALOGV("%u: n_substream_groups_minus2 = %d\n", presentation,
+                            n_substream_groups_minus2);
+                        for (uint32_t sg = 0; sg < n_substream_groups_minus2 + 2; sg++) {
+                            if (!parseSubstreamGroupDSI(presentation, sg)) {
+                                return false;
+                            }
+                        }
+                    }
+                    break;
+                default:
+                    CHECK_BITS_LEFT(7);
+                    uint32_t n_skip_bytes = mBitReader.getBits(7);
+                    CHECK_BITS_LEFT(n_skip_bytes * 8)
+                    for (uint32_t j = 0; j < n_skip_bytes; j++) {
+                        mBitReader.getBits(8);
+                    }
+                    break;
+                }
+                CHECK_BITS_LEFT(1 + 1);
+                bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+                mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+                b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+                ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+                ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+                    BOOLSTR(b_add_emdf_substreams));
+            }
+        }
+        if (b_add_emdf_substreams) {
+            CHECK_BITS_LEFT(7);
+            uint32_t n_add_emdf_substreams = mBitReader.getBits(7);
+            for (uint32_t j = 0; j < n_add_emdf_substreams; j++) {
+                CHECK_BITS_LEFT(5 + 10);
+                uint32_t substream_emdf_version = mBitReader.getBits(5);
+                uint32_t substream_key_id = mBitReader.getBits(10);
+                ALOGV("%u: emdf_substream[%d]: version=%d, key_id=%d\n", presentation, j,
+                    substream_emdf_version, substream_key_id);
+            }
+        }
+
+        bool b_presentation_bitrate_info = false;
+        if (presentation_version > 0) {
+            CHECK_BITS_LEFT(1);
+            b_presentation_bitrate_info = (mBitReader.getBits(1) == 1);
+        }
+
+        ALOGV("b_presentation_bitrate_info = %s\n", BOOLSTR(b_presentation_bitrate_info));
+        if (b_presentation_bitrate_info) {
+            if (!parseBitrateDsi()) {
+                return false;
+            }
+        }
+
+        if (presentation_version > 0) {
+            CHECK_BITS_LEFT(1);
+            bool b_alternative = (mBitReader.getBits(1) == 1);
+            ALOGV("b_alternative = %s\n", BOOLSTR(b_alternative));
+            if (b_alternative) {
+                BYTE_ALIGN;
+                CHECK_BITS_LEFT(16);
+                uint32_t name_len = mBitReader.getBits(16);
+                CHECK_BITS_LEFT(name_len * 8);
+                std::string &presentation_name =
+                    mPresentations[presentation].mDescription;
+                presentation_name.clear();
+                presentation_name.resize(name_len);
+                for (uint32_t i = 0; i < name_len; i++) {
+                    presentation_name[i] = (char)(mBitReader.getBits(8));
+                }
+                CHECK_BITS_LEFT(5);
+                uint32_t n_targets = mBitReader.getBits(5);
+                CHECK_BITS_LEFT(n_targets * (3 + 8));
+                for (uint32_t i = 0; i < n_targets; i++){
+                    mBitReader.skipBits(3); // target_md_compat
+                    mBitReader.skipBits(8); // target_device_category
+                }
+            }
+        }
+
+        BYTE_ALIGN;
+
+        if (ac4_dsi_version == 1) {
+            uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
+            if (mBitReader.numBitsLeft() % 8 != 0) {
+                end += 1;
+            }
+
+            uint64_t presentation_bytes = end - start;
+            uint64_t skip_bytes = pres_bytes - presentation_bytes;
+            ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
+            CHECK_BITS_LEFT(skip_bytes * 8);
+            mBitReader.skipBits(skip_bytes * 8);
+        }
+
+        // we should know this or something is probably wrong
+        // with the bitstream (or we don't support it)
+        if (mPresentations[presentation].mChannelMode == -1){
+            ALOGE("could not determing channel mode of presentation %d", presentation);
+            return false;
+        }
+    } /* each presentation */
+
+    return true;
+}
+
+};
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/AC4Parser.h
new file mode 100644
index 0000000..73b6e31
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AC4_PARSER_H_
+#define AC4_PARSER_H_
+
+#include <cstdint>
+#include <map>
+#include <string>
+
+#include <media/stagefright/foundation/ABitReader.h>
+
+namespace android {
+
+class AC4Parser {
+public:
+    AC4Parser();
+    virtual ~AC4Parser() { }
+
+    virtual bool parse() = 0;
+
+    struct AC4Presentation {
+        int32_t mChannelMode = -1;
+        int32_t mProgramID = -1;
+        int32_t mGroupIndex = -1;
+
+        // TS 103 190-1 v1.2.1 4.3.3.8.1
+        enum ContentClassifiers {
+            kCompleteMain,
+            kMusicAndEffects,
+            kVisuallyImpaired,
+            kHearingImpaired,
+            kDialog,
+            kCommentary,
+            kEmergency,
+            kVoiceOver
+        };
+
+        uint32_t mContentClassifier = kCompleteMain;
+
+        // ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+        enum InputChannelMode {
+            kChannelMode_Mono,
+            kChannelMode_Stereo,
+            kChannelMode_3_0,
+            kChannelMode_5_0,
+            kChannelMode_5_1,
+            kChannelMode_7_0_34,
+            kChannelMode_7_1_34,
+            kChannelMode_7_0_52,
+            kChannelMode_7_1_52,
+            kChannelMode_7_0_322,
+            kChannelMode_7_1_322,
+            kChannelMode_7_0_4,
+            kChannelMode_7_1_4,
+            kChannelMode_9_0_4,
+            kChannelMode_9_1_4,
+            kChannelMode_22_2,
+            kChannelMode_Reserved,
+        };
+
+        bool mHasDialogEnhancements = false;
+        bool mPreVirtualized = false;
+        bool mEnabled = true;
+
+        std::string mLanguage;
+        std::string mDescription;
+    };
+    typedef std::map<uint32_t, AC4Presentation> AC4Presentations;
+
+    const AC4Presentations& getPresentations() const { return mPresentations; }
+
+protected:
+    AC4Presentations mPresentations;
+};
+
+class AC4DSIParser: public AC4Parser {
+public:
+    explicit AC4DSIParser(ABitReader &br);
+    virtual ~AC4DSIParser() { }
+
+    bool parse();
+
+private:
+    bool parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID);
+    bool parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID);
+    bool parseLanguageTag(uint32_t presentationID, uint32_t substreamID);
+    bool parseBitrateDsi();
+
+    uint64_t mDSISize;
+    ABitReader& mBitReader;
+};
+
+};
+
+#endif  // AC4_PARSER_H_
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index fa739e8..40b2c97 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -2,6 +2,7 @@
     name: "libmp4extractor_defaults",
 
     srcs: [
+        "AC4Parser.cpp",
         "ItemTable.cpp",
         "MPEG4Extractor.cpp",
         "SampleIterator.cpp",
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index be442e6..a61e60a 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -18,7 +18,8 @@
 #define LOG_TAG "ItemTable"
 
 #include <ItemTable.h>
-#include <media/DataSourceBase.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -92,7 +93,7 @@
 
 struct Box {
 protected:
-    Box(DataSourceBase *source, uint32_t type) :
+    Box(DataSourceHelper *source, uint32_t type) :
         mDataSource(source), mType(type) {}
 
     virtual ~Box() {}
@@ -104,14 +105,14 @@
 
     inline uint32_t type() const { return mType; }
 
-    inline DataSourceBase *source() const { return mDataSource; }
+    inline DataSourceHelper *source() const { return mDataSource; }
 
     status_t parseChunk(off64_t *offset);
 
     status_t parseChunks(off64_t offset, size_t size);
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     uint32_t mType;
 };
 
@@ -186,7 +187,7 @@
 
 struct FullBox : public Box {
 protected:
-    FullBox(DataSourceBase *source, uint32_t type) :
+    FullBox(DataSourceHelper *source, uint32_t type) :
         Box(source, type), mVersion(0), mFlags(0) {}
 
     inline uint8_t version() const { return mVersion; }
@@ -221,7 +222,7 @@
 //
 
 struct PitmBox : public FullBox {
-    PitmBox(DataSourceBase *source) :
+    PitmBox(DataSourceHelper *source) :
         FullBox(source, FOURCC('p', 'i', 't', 'm')) {}
 
     status_t parse(off64_t offset, size_t size, uint32_t *primaryItemId);
@@ -301,7 +302,7 @@
 };
 
 struct IlocBox : public FullBox {
-    IlocBox(DataSourceBase *source, KeyedVector<uint32_t, ItemLoc> *itemLocs) :
+    IlocBox(DataSourceHelper *source, KeyedVector<uint32_t, ItemLoc> *itemLocs) :
         FullBox(source, FOURCC('i', 'l', 'o', 'c')),
         mItemLocs(itemLocs), mHasConstructMethod1(false) {}
 
@@ -471,7 +472,7 @@
 //
 
 struct ItemReference : public Box, public RefBase {
-    ItemReference(DataSourceBase *source, uint32_t type, uint32_t itemIdSize) :
+    ItemReference(DataSourceHelper *source, uint32_t type, uint32_t itemIdSize) :
         Box(source, type), mItemId(0), mRefIdSize(itemIdSize) {}
 
     status_t parse(off64_t offset, size_t size);
@@ -626,7 +627,7 @@
 }
 
 struct IrefBox : public FullBox {
-    IrefBox(DataSourceBase *source, Vector<sp<ItemReference> > *itemRefs) :
+    IrefBox(DataSourceHelper *source, Vector<sp<ItemReference> > *itemRefs) :
         FullBox(source, FOURCC('i', 'r', 'e', 'f')), mRefIdSize(0), mItemRefs(itemRefs) {}
 
     status_t parse(off64_t offset, size_t size);
@@ -688,7 +689,7 @@
 };
 
 struct IspeBox : public FullBox, public ItemProperty {
-    IspeBox(DataSourceBase *source) :
+    IspeBox(DataSourceHelper *source) :
         FullBox(source, FOURCC('i', 's', 'p', 'e')), mWidth(0), mHeight(0) {}
 
     status_t parse(off64_t offset, size_t size) override;
@@ -724,7 +725,7 @@
 }
 
 struct HvccBox : public Box, public ItemProperty {
-    HvccBox(DataSourceBase *source) :
+    HvccBox(DataSourceHelper *source) :
         Box(source, FOURCC('h', 'v', 'c', 'C')) {}
 
     status_t parse(off64_t offset, size_t size) override;
@@ -757,7 +758,7 @@
 }
 
 struct IrotBox : public Box, public ItemProperty {
-    IrotBox(DataSourceBase *source) :
+    IrotBox(DataSourceHelper *source) :
         Box(source, FOURCC('i', 'r', 'o', 't')), mAngle(0) {}
 
     status_t parse(off64_t offset, size_t size) override;
@@ -786,7 +787,7 @@
 }
 
 struct ColrBox : public Box, public ItemProperty {
-    ColrBox(DataSourceBase *source) :
+    ColrBox(DataSourceHelper *source) :
         Box(source, FOURCC('c', 'o', 'l', 'r')) {}
 
     status_t parse(off64_t offset, size_t size) override;
@@ -834,7 +835,7 @@
 }
 
 struct IpmaBox : public FullBox {
-    IpmaBox(DataSourceBase *source, Vector<AssociationEntry> *associations) :
+    IpmaBox(DataSourceHelper *source, Vector<AssociationEntry> *associations) :
         FullBox(source, FOURCC('i', 'p', 'm', 'a')), mAssociations(associations) {}
 
     status_t parse(off64_t offset, size_t size);
@@ -908,7 +909,7 @@
 }
 
 struct IpcoBox : public Box {
-    IpcoBox(DataSourceBase *source, Vector<sp<ItemProperty> > *properties) :
+    IpcoBox(DataSourceHelper *source, Vector<sp<ItemProperty> > *properties) :
         Box(source, FOURCC('i', 'p', 'c', 'o')), mItemProperties(properties) {}
 
     status_t parse(off64_t offset, size_t size);
@@ -965,7 +966,7 @@
 }
 
 struct IprpBox : public Box {
-    IprpBox(DataSourceBase *source,
+    IprpBox(DataSourceHelper *source,
             Vector<sp<ItemProperty> > *properties,
             Vector<AssociationEntry> *associations) :
         Box(source, FOURCC('i', 'p', 'r', 'p')),
@@ -1022,7 +1023,7 @@
 };
 
 struct InfeBox : public FullBox {
-    InfeBox(DataSourceBase *source) :
+    InfeBox(DataSourceHelper *source) :
         FullBox(source, FOURCC('i', 'n', 'f', 'e')) {}
 
     status_t parse(off64_t offset, size_t size, ItemInfo *itemInfo);
@@ -1127,7 +1128,7 @@
 }
 
 struct IinfBox : public FullBox {
-    IinfBox(DataSourceBase *source, Vector<ItemInfo> *itemInfos) :
+    IinfBox(DataSourceHelper *source, Vector<ItemInfo> *itemInfos) :
         FullBox(source, FOURCC('i', 'i', 'n', 'f')),
         mItemInfos(itemInfos), mHasGrids(false) {}
 
@@ -1196,7 +1197,7 @@
 
 //////////////////////////////////////////////////////////////////
 
-ItemTable::ItemTable(DataSourceBase *source)
+ItemTable::ItemTable(DataSourceHelper *source)
     : mDataSource(source),
       mPrimaryItemId(0),
       mIdatOffset(0),
diff --git a/media/extractors/mp4/ItemTable.h b/media/extractors/mp4/ItemTable.h
index 536dcb0..650b3f3 100644
--- a/media/extractors/mp4/ItemTable.h
+++ b/media/extractors/mp4/ItemTable.h
@@ -25,7 +25,7 @@
 
 namespace android {
 
-class DataSourceBase;
+class DataSourceHelper;
 class MetaData;
 
 namespace heif {
@@ -45,7 +45,7 @@
 
 class ItemTable : public RefBase {
 public:
-    explicit ItemTable(DataSourceBase *source);
+    explicit ItemTable(DataSourceHelper *source);
 
     status_t parse(uint32_t type, off64_t offset, size_t size);
 
@@ -62,7 +62,7 @@
     ~ItemTable();
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
 
     KeyedVector<uint32_t, ItemLoc> mItemLocs;
     Vector<ItemInfo> mItemInfos;
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 7b3b81d..c02d2fb 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -26,11 +26,13 @@
 
 #include <utils/Log.h>
 
+#include "AC4Parser.h"
 #include "MPEG4Extractor.h"
 #include "SampleTable.h"
 #include "ItemTable.h"
 #include "include/ESDS.h"
 
+#include <media/DataSourceBase.h>
 #include <media/ExtractorUtils.h>
 #include <media/MediaTrack.h>
 #include <media/stagefright/foundation/ABitReader.h>
@@ -67,10 +69,11 @@
 };
 
 class MPEG4Source : public MediaTrack {
+static const size_t  kMaxPcmFrameSize = 8192;
 public:
     // Caller retains ownership of both "dataSource" and "sampleTable".
     MPEG4Source(MetaDataBase &format,
-                DataSourceBase *dataSource,
+                DataSourceHelper *dataSource,
                 int32_t timeScale,
                 const sp<SampleTable> &sampleTable,
                 Vector<SidxEntry> &sidx,
@@ -94,7 +97,7 @@
     Mutex mLock;
 
     MetaDataBase &mFormat;
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     int32_t mTimescale;
     sp<SampleTable> mSampleTable;
     uint32_t mCurrentSampleIndex;
@@ -125,6 +128,8 @@
 
     bool mIsAVC;
     bool mIsHEVC;
+    bool mIsAC4;
+    bool mIsPcm;
     size_t mNALLengthSize;
 
     bool mStarted;
@@ -193,11 +198,10 @@
 // possibly wrapping multiple times to cover all tracks, i.e.
 // Each CachedRangedDataSource caches the sampletable metadata for a single track.
 
-struct CachedRangedDataSource : public DataSourceBase {
-    explicit CachedRangedDataSource(DataSourceBase *source);
+struct CachedRangedDataSource : public DataSourceHelper {
+    explicit CachedRangedDataSource(DataSourceHelper *source);
     virtual ~CachedRangedDataSource();
 
-    virtual status_t initCheck() const;
     virtual ssize_t readAt(off64_t offset, void *data, size_t size);
     virtual status_t getSize(off64_t *size);
     virtual uint32_t flags();
@@ -208,7 +212,7 @@
 private:
     Mutex mLock;
 
-    DataSourceBase *mSource;
+    DataSourceHelper *mSource;
     bool mOwnsDataSource;
     off64_t mCachedOffset;
     size_t mCachedSize;
@@ -220,8 +224,9 @@
     CachedRangedDataSource &operator=(const CachedRangedDataSource &);
 };
 
-CachedRangedDataSource::CachedRangedDataSource(DataSourceBase *source)
-    : mSource(source),
+CachedRangedDataSource::CachedRangedDataSource(DataSourceHelper *source)
+    : DataSourceHelper(source),
+      mSource(source),
       mOwnsDataSource(false),
       mCachedOffset(0),
       mCachedSize(0),
@@ -245,10 +250,6 @@
     mCachedSize = 0;
 }
 
-status_t CachedRangedDataSource::initCheck() const {
-    return mSource->initCheck();
-}
-
 ssize_t CachedRangedDataSource::readAt(off64_t offset, void *data, size_t size) {
     Mutex::Autolock autoLock(mLock);
 
@@ -310,6 +311,9 @@
         case FOURCC('s', 'a', 'w', 'b'):
             return MEDIA_MIMETYPE_AUDIO_AMR_WB;
 
+        case FOURCC('e', 'c', '-', '3'):
+            return MEDIA_MIMETYPE_AUDIO_EAC3;
+
         case FOURCC('m', 'p', '4', 'v'):
             return MEDIA_MIMETYPE_VIDEO_MPEG4;
 
@@ -324,6 +328,13 @@
         case FOURCC('h', 'v', 'c', '1'):
         case FOURCC('h', 'e', 'v', '1'):
             return MEDIA_MIMETYPE_VIDEO_HEVC;
+        case FOURCC('a', 'c', '-', '4'):
+            return MEDIA_MIMETYPE_AUDIO_AC4;
+
+        case FOURCC('t', 'w', 'o', 's'):
+        case FOURCC('s', 'o', 'w', 't'):
+            return MEDIA_MIMETYPE_AUDIO_RAW;
+
         default:
             ALOGW("Unknown fourcc: %c%c%c%c",
                    (fourcc >> 24) & 0xff,
@@ -350,7 +361,7 @@
     return false;
 }
 
-MPEG4Extractor::MPEG4Extractor(DataSourceBase *source, const char *mime)
+MPEG4Extractor::MPEG4Extractor(DataSourceHelper *source, const char *mime)
     : mMoofOffset(0),
       mMoofFound(false),
       mMdatFound(false),
@@ -383,6 +394,7 @@
     mPssh.clear();
 
     delete mCachedSource;
+    delete mDataSource;
 }
 
 uint32_t MPEG4Extractor::flags() const {
@@ -467,8 +479,15 @@
             if (__builtin_mul_overflow(media_time, samplerate, &delay) ||
                     __builtin_add_overflow(delay, halfscale, &delay) ||
                     (delay /= mHeaderTimescale, false) ||
-                    delay > INT32_MAX ||
-                    delay < INT32_MIN) {
+                    /* the calculated delay should be small, but some apps
+                     * appear to write a bogus edit list that causes a really
+                     * large delay, resulting in playback problems.
+                     * Ignore such edit lists.
+                     * (4096 is enough to drop 4 full samples)
+                     */
+                    delay > 4096 ||
+                    delay < 0) {
+                ALOGW("ignoring edit list with bogus values");
                 return;
             }
             ALOGV("delay = %" PRId64, delay);
@@ -1473,6 +1492,8 @@
         case FOURCC('e', 'n', 'c', 'a'):
         case FOURCC('s', 'a', 'm', 'r'):
         case FOURCC('s', 'a', 'w', 'b'):
+        case FOURCC('t', 'w', 'o', 's'):
+        case FOURCC('s', 'o', 'w', 't'):
         {
             if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')
                     && depth >= 1 && mPath[depth - 1] == FOURCC('w', 'a', 'v', 'e')) {
@@ -1542,6 +1563,13 @@
                 // if the chunk type is enca, we'll get the type from the frma box later
                 mLastTrack->meta.setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
                 AdjustChannelsAndRate(chunk_type, &num_channels, &sample_rate);
+
+                if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, FourCC2MIME(chunk_type))) {
+                    mLastTrack->meta.setInt32(kKeyBitsPerSample, sample_size);
+                    if (chunk_type == FOURCC('t', 'w', 'o', 's')) {
+                        mLastTrack->meta.setInt32(kKeyPcmBigEndian, 1);
+                    }
+                }
             }
             ALOGV("*** coding='%s' %d channels, size %d, rate %d\n",
                    chunk, num_channels, sample_size, sample_rate);
@@ -2433,7 +2461,19 @@
         case FOURCC('a', 'c', '-', '3'):
         {
             *offset += chunk_size;
-            return parseAC3SampleEntry(data_offset);
+            return parseAC3SpecificBox(data_offset);
+        }
+
+        case FOURCC('e', 'c', '-', '3'):
+        {
+            *offset += chunk_size;
+            return parseEAC3SpecificBox(data_offset);
+        }
+
+        case FOURCC('a', 'c', '-', '4'):
+        {
+            *offset += chunk_size;
+            return parseAC4SpecificBox(data_offset);
         }
 
         case FOURCC('f', 't', 'y', 'p'):
@@ -2507,36 +2547,260 @@
     return OK;
 }
 
-status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+status_t MPEG4Extractor::parseChannelCountSampleRate(
+        off64_t *offset, uint16_t *channelCount, uint16_t *sampleRate) {
     // skip 16 bytes:
     //  + 6-byte reserved,
     //  + 2-byte data reference index,
     //  + 8-byte reserved
-    offset += 16;
-    uint16_t channelCount;
-    if (!mDataSource->getUInt16(offset, &channelCount)) {
+    *offset += 16;
+    if (!mDataSource->getUInt16(*offset, channelCount)) {
+        ALOGE("MPEG4Extractor: error while reading sample entry box: cannot read channel count");
         return ERROR_MALFORMED;
     }
     // skip 8 bytes:
     //  + 2-byte channelCount,
     //  + 2-byte sample size,
     //  + 4-byte reserved
-    offset += 8;
-    uint16_t sampleRate;
-    if (!mDataSource->getUInt16(offset, &sampleRate)) {
-        ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+    *offset += 8;
+    if (!mDataSource->getUInt16(*offset, sampleRate)) {
+        ALOGE("MPEG4Extractor: error while reading sample entry box: cannot read sample rate");
         return ERROR_MALFORMED;
     }
-
     // skip 4 bytes:
     //  + 2-byte sampleRate,
     //  + 2-byte reserved
-    offset += 4;
-    return parseAC3SpecificBox(offset, sampleRate);
+    *offset += 4;
+    return OK;
 }
 
-status_t MPEG4Extractor::parseAC3SpecificBox(
-        off64_t offset, uint16_t sampleRate) {
+status_t MPEG4Extractor::parseAC4SpecificBox(off64_t offset) {
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    uint16_t sampleRate, channelCount;
+    status_t status;
+    if ((status = parseChannelCountSampleRate(&offset, &channelCount, &sampleRate)) != OK) {
+        return status;
+    }
+    uint32_t size;
+    // + 4-byte size
+    // + 4-byte type
+    // + 3-byte payload
+    const uint32_t kAC4MinimumBoxSize = 4 + 4 + 3;
+    if (!mDataSource->getUInt32(offset, &size) || size < kAC4MinimumBoxSize) {
+        ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read specific box size");
+        return ERROR_MALFORMED;
+    }
+
+    // + 4-byte size
+    offset += 4;
+    uint32_t type;
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '4')) {
+        ALOGE("MPEG4Extractor: error while reading ac-4 specific block: header not dac4");
+        return ERROR_MALFORMED;
+    }
+
+    // + 4-byte type
+    offset += 4;
+    // at least for AC4 DSI v1 this is big enough
+    const uint32_t kAC4SpecificBoxPayloadSize = 256;
+    uint8_t chunk[kAC4SpecificBoxPayloadSize];
+    ssize_t dsiSize = size - 8; // size of box - size and type fields
+    if (dsiSize >= (ssize_t)kAC4SpecificBoxPayloadSize ||
+        mDataSource->readAt(offset, chunk, dsiSize) != dsiSize) {
+        ALOGE("MPEG4Extractor: error while reading ac-4 specific block: bitstream fields");
+        return ERROR_MALFORMED;
+    }
+    // + size-byte payload
+    offset += dsiSize;
+    ABitReader br(chunk, dsiSize);
+    AC4DSIParser parser(br);
+    if (!parser.parse()){
+        ALOGE("MPEG4Extractor: error while parsing ac-4 specific block");
+        return ERROR_MALFORMED;
+    }
+
+    mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+    mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+    mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+    return OK;
+}
+
+status_t MPEG4Extractor::parseEAC3SpecificBox(off64_t offset) {
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    uint16_t sampleRate, channels;
+    status_t status;
+    if ((status = parseChannelCountSampleRate(&offset, &channels, &sampleRate)) != OK) {
+        return status;
+    }
+    uint32_t size;
+    // + 4-byte size
+    // + 4-byte type
+    // + 3-byte payload
+    const uint32_t kEAC3SpecificBoxMinSize = 11;
+    // 13 + 3 + (8 * (2 + 5 + 5 + 3 + 1 + 3 + 4 + (14 * 9 + 1))) bits == 152 bytes theoretical max
+    // calculated from the required bits read below as well as the maximum number of independent
+    // and dependant sub streams you can have
+    const uint32_t kEAC3SpecificBoxMaxSize = 152;
+    if (!mDataSource->getUInt32(offset, &size) ||
+        size < kEAC3SpecificBoxMinSize ||
+        size > kEAC3SpecificBoxMaxSize) {
+        ALOGE("MPEG4Extractor: error while reading eac-3 block: cannot read specific box size");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    uint32_t type;
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'e', 'c', '3')) {
+        ALOGE("MPEG4Extractor: error while reading eac-3 specific block: header not dec3");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    uint8_t* chunk = new (std::nothrow) uint8_t[size];
+    if (chunk == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    if (mDataSource->readAt(offset, chunk, size) != (ssize_t)size) {
+        ALOGE("MPEG4Extractor: error while reading eac-3 specific block: bitstream fields");
+        delete[] chunk;
+        return ERROR_MALFORMED;
+    }
+
+    ABitReader br(chunk, size);
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+    static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+    if (br.numBitsLeft() < 16) {
+        delete[] chunk;
+        return ERROR_MALFORMED;
+    }
+    unsigned data_rate = br.getBits(13);
+    ALOGV("EAC3 data rate = %d", data_rate);
+
+    unsigned num_ind_sub = br.getBits(3) + 1;
+    ALOGV("EAC3 independant substreams = %d", num_ind_sub);
+    if (br.numBitsLeft() < (num_ind_sub * 23)) {
+        delete[] chunk;
+        return ERROR_MALFORMED;
+    }
+
+    unsigned channelCount = 0;
+    for (unsigned i = 0; i < num_ind_sub; i++) {
+        unsigned fscod = br.getBits(2);
+        if (fscod == 3) {
+            ALOGE("Incorrect fscod (3) in EAC3 header");
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+        unsigned boxSampleRate = sampleRateTable[fscod];
+        if (boxSampleRate != sampleRate) {
+            ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+                boxSampleRate, sampleRate);
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+
+        unsigned bsid = br.getBits(5);
+        if (bsid < 8) {
+            ALOGW("Incorrect bsid in EAC3 header. Possibly AC-3?");
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+
+        // skip
+        br.skipBits(2);
+        unsigned bsmod = br.getBits(3);
+        unsigned acmod = br.getBits(3);
+        unsigned lfeon = br.getBits(1);
+        // we currently only support the first stream
+        if (i == 0)
+            channelCount = channelCountTable[acmod] + lfeon;
+        ALOGV("bsmod = %d, acmod = %d, lfeon = %d", bsmod, acmod, lfeon);
+
+        br.skipBits(3);
+        unsigned num_dep_sub = br.getBits(4);
+        ALOGV("EAC3 dependant substreams = %d", num_dep_sub);
+        if (num_dep_sub != 0) {
+            if (br.numBitsLeft() < 9) {
+                delete[] chunk;
+                return ERROR_MALFORMED;
+            }
+            static const char* chan_loc_tbl[] = { "Lc/Rc","Lrs/Rrs","Cs","Ts","Lsd/Rsd",
+                "Lw/Rw","Lvh/Rvh","Cvh","Lfe2" };
+            unsigned chan_loc = br.getBits(9);
+            unsigned mask = 1;
+            for (unsigned j = 0; j < 9; j++, mask <<= 1) {
+                if ((chan_loc & mask) != 0) {
+                    // we currently only support the first stream
+                    if (i == 0) {
+                        channelCount++;
+                        // these are 2 channels in the mask
+                        if (j == 0 || j == 1 || j == 4 || j == 5 || j == 6) {
+                            channelCount++;
+                        }
+                    }
+                    ALOGV(" %s", chan_loc_tbl[j]);
+                }
+            }
+        } else {
+            if (br.numBitsLeft() == 0) {
+                delete[] chunk;
+                return ERROR_MALFORMED;
+            }
+            br.skipBits(1);
+        }
+    }
+
+    if (br.numBitsLeft() != 0) {
+        if (br.numBitsLeft() < 8) {
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+        unsigned mask = br.getBits(8);
+        for (unsigned i = 0; i < 8; i++) {
+            if (((0x1 << i) && mask) == 0)
+                continue;
+
+            if (br.numBitsLeft() < 8) {
+                delete[] chunk;
+                return ERROR_MALFORMED;
+            }
+            switch (i) {
+                case 0: {
+                    unsigned complexity = br.getBits(8);
+                    ALOGV("Found a JOC stream with complexity = %d", complexity);
+                }break;
+                default: {
+                    br.skipBits(8);
+                }break;
+            }
+        }
+    }
+    mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EAC3);
+    mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+    mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+
+    delete[] chunk;
+    return OK;
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(off64_t offset) {
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    uint16_t sampleRate, channels;
+    status_t status;
+    if ((status = parseChannelCountSampleRate(&offset, &channels, &sampleRate)) != OK) {
+        return status;
+    }
     uint32_t size;
     // + 4-byte size
     // + 4-byte type
@@ -2591,9 +2855,6 @@
     unsigned lfeon = br.getBits(1);
     unsigned channelCount = channelCountTable[acmod] + lfeon;
 
-    if (mLastTrack == NULL) {
-        return ERROR_MALFORMED;
-    }
     mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
     mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
     mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
@@ -3830,7 +4091,7 @@
 
 MPEG4Source::MPEG4Source(
         MetaDataBase &format,
-        DataSourceBase *dataSource,
+        DataSourceHelper *dataSource,
         int32_t timeScale,
         const sp<SampleTable> &sampleTable,
         Vector<SidxEntry> &sidx,
@@ -3857,6 +4118,8 @@
       mCurrentSampleInfoOffsets(NULL),
       mIsAVC(false),
       mIsHEVC(false),
+      mIsAC4(false),
+      mIsPcm(false),
       mNALLengthSize(0),
       mStarted(false),
       mGroup(NULL),
@@ -3890,6 +4153,7 @@
     mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
     mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
               !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+    mIsAC4 = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4);
 
     if (mIsAVC) {
         uint32_t type;
@@ -3918,6 +4182,27 @@
         mNALLengthSize = 1 + (ptr[14 + 7] & 3);
     }
 
+    mIsPcm = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW);
+
+    if (mIsPcm) {
+        int32_t numChannels = 0;
+        int32_t bitsPerSample = 0;
+        CHECK(mFormat.findInt32(kKeyBitsPerSample, &bitsPerSample));
+        CHECK(mFormat.findInt32(kKeyChannelCount, &numChannels));
+
+        int32_t bytesPerSample = bitsPerSample >> 3;
+        int32_t pcmSampleSize = bytesPerSample * numChannels;
+
+        size_t maxSampleSize;
+        status_t err = mSampleTable->getMaxSampleSize(&maxSampleSize);
+        if (err != OK || maxSampleSize != static_cast<size_t>(pcmSampleSize) || bitsPerSample != 16) {
+            // Not supported
+            mIsPcm = false;
+        } else {
+            mFormat.setInt32(kKeyMaxInputSize, pcmSampleSize * kMaxPcmFrameSize);
+        }
+    }
+
     CHECK(format.findInt32(kKeyTrackID, &mTrackId));
 
 }
@@ -4830,45 +5115,96 @@
         }
     }
 
-    if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
+    if ((!mIsAVC && !mIsHEVC && !mIsAC4) || mWantsNALFragments) {
         if (newBuffer) {
-            ssize_t num_bytes_read =
-                mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
+            if (mIsPcm) {
+                // The twos' PCM block reader assumes that all samples has the same size.
 
-            if (num_bytes_read < (ssize_t)size) {
-                mBuffer->release();
-                mBuffer = NULL;
+                uint32_t samplesToRead = mSampleTable->getLastSampleIndexInChunk()
+                                                      - mCurrentSampleIndex + 1;
+                if (samplesToRead > kMaxPcmFrameSize) {
+                    samplesToRead = kMaxPcmFrameSize;
+                }
 
-                return ERROR_IO;
-            }
+                ALOGV("Reading %d PCM frames of size %zu at index %d to stop of chunk at %d",
+                      samplesToRead, size, mCurrentSampleIndex,
+                      mSampleTable->getLastSampleIndexInChunk());
 
-            CHECK(mBuffer != NULL);
-            mBuffer->set_range(0, size);
-            mBuffer->meta_data().clear();
-            mBuffer->meta_data().setInt64(
-                    kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
-            mBuffer->meta_data().setInt64(
-                    kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+               size_t totalSize = samplesToRead * size;
+                uint8_t* buf = (uint8_t *)mBuffer->data();
+                ssize_t bytesRead = mDataSource->readAt(offset, buf, totalSize);
+                if (bytesRead < (ssize_t)totalSize) {
+                    mBuffer->release();
+                    mBuffer = NULL;
 
-            if (targetSampleTimeUs >= 0) {
-                mBuffer->meta_data().setInt64(
-                        kKeyTargetTime, targetSampleTimeUs);
-            }
+                    return ERROR_IO;
+                }
 
-            if (isSyncSample) {
+                mBuffer->meta_data().clear();
+                mBuffer->meta_data().setInt64(kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
                 mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
-            }
 
-            ++mCurrentSampleIndex;
+                int32_t byteOrder;
+                mFormat.findInt32(kKeyPcmBigEndian, &byteOrder);
+
+                if (byteOrder == 1) {
+                    // Big-endian -> little-endian
+                    uint16_t *dstData = (uint16_t *)buf;
+                    uint16_t *srcData = (uint16_t *)buf;
+
+                    for (size_t j = 0; j < bytesRead / sizeof(uint16_t); j++) {
+                         dstData[j] = ntohs(srcData[j]);
+                    }
+                }
+
+                mCurrentSampleIndex += samplesToRead;
+                mBuffer->set_range(0, totalSize);
+            } else {
+                ssize_t num_bytes_read =
+                    mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
+
+                if (num_bytes_read < (ssize_t)size) {
+                    mBuffer->release();
+                    mBuffer = NULL;
+
+                    return ERROR_IO;
+                }
+
+                CHECK(mBuffer != NULL);
+                mBuffer->set_range(0, size);
+                mBuffer->meta_data().clear();
+                mBuffer->meta_data().setInt64(
+                        kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+                mBuffer->meta_data().setInt64(
+                        kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+                if (targetSampleTimeUs >= 0) {
+                    mBuffer->meta_data().setInt64(
+                            kKeyTargetTime, targetSampleTimeUs);
+                }
+
+                if (isSyncSample) {
+                    mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+                }
+ 
+                ++mCurrentSampleIndex;
+            }
         }
 
-        if (!mIsAVC && !mIsHEVC) {
+        if (!mIsAVC && !mIsHEVC && !mIsAC4) {
             *out = mBuffer;
             mBuffer = NULL;
 
             return OK;
         }
 
+        if (mIsAC4) {
+            mBuffer->release();
+            mBuffer = NULL;
+
+            return ERROR_IO;
+        }
+
         // Each NAL unit is split up into its constituent fragments and
         // each one of them returned in its own buffer.
 
@@ -4907,6 +5243,58 @@
         *out = clone;
 
         return OK;
+    } else if (mIsAC4) {
+        CHECK(mBuffer != NULL);
+        // Make sure there is enough space to write the sync header and the raw frame
+        if (mBuffer->range_length() < (7 + size)) {
+            mBuffer->release();
+            mBuffer = NULL;
+
+            return ERROR_IO;
+        }
+
+        uint8_t *dstData = (uint8_t *)mBuffer->data();
+        size_t dstOffset = 0;
+        // Add AC-4 sync header to MPEG4 encapsulated AC-4 raw frame
+        // AC40 sync word, meaning no CRC at the end of the frame
+        dstData[dstOffset++] = 0xAC;
+        dstData[dstOffset++] = 0x40;
+        dstData[dstOffset++] = 0xFF;
+        dstData[dstOffset++] = 0xFF;
+        dstData[dstOffset++] = (uint8_t)((size >> 16) & 0xFF);
+        dstData[dstOffset++] = (uint8_t)((size >> 8) & 0xFF);
+        dstData[dstOffset++] = (uint8_t)((size >> 0) & 0xFF);
+
+        ssize_t numBytesRead = mDataSource->readAt(offset, dstData + dstOffset, size);
+        if (numBytesRead != (ssize_t)size) {
+            mBuffer->release();
+            mBuffer = NULL;
+
+            return ERROR_IO;
+        }
+
+        mBuffer->set_range(0, dstOffset + size);
+        mBuffer->meta_data().clear();
+        mBuffer->meta_data().setInt64(
+                kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+        mBuffer->meta_data().setInt64(
+                kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+        if (targetSampleTimeUs >= 0) {
+            mBuffer->meta_data().setInt64(
+                    kKeyTargetTime, targetSampleTimeUs);
+        }
+
+        if (isSyncSample) {
+            mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+        }
+
+        ++mCurrentSampleIndex;
+
+        *out = mBuffer;
+        mBuffer = NULL;
+
+        return OK;
     } else {
         // Whole NAL units are returned but each fragment is prefixed by
         // the start code (0x00 00 00 01).
@@ -5081,9 +5469,13 @@
     uint32_t cts = 0;
     bool isSyncSample = false;
     bool newBuffer = false;
-    if (mBuffer == NULL) {
+    if (mBuffer == NULL || mCurrentSampleIndex >= mCurrentSamples.size()) {
         newBuffer = true;
 
+        if (mBuffer != NULL) {
+            mBuffer->release();
+            mBuffer = NULL;
+        }
         if (mCurrentSampleIndex >= mCurrentSamples.size()) {
             // move to next fragment if there is one
             if (mNextMoofOffset <= mCurrentMoofOffset) {
@@ -5361,6 +5753,8 @@
 
         return OK;
     }
+
+    return OK;
 }
 
 MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
@@ -5376,7 +5770,7 @@
     return NULL;
 }
 
-static bool LegacySniffMPEG4(DataSourceBase *source, float *confidence) {
+static bool LegacySniffMPEG4(DataSourceHelper *source, float *confidence) {
     uint8_t header[8];
 
     ssize_t n = source->readAt(4, header, sizeof(header));
@@ -5441,7 +5835,7 @@
 // Also try to identify where this file's metadata ends
 // (end of the 'moov' atom) and report it to the caller as part of
 // the metadata.
-static bool BetterSniffMPEG4(DataSourceBase *source, float *confidence) {
+static bool BetterSniffMPEG4(DataSourceHelper *source, float *confidence) {
     // We scan up to 128 bytes to identify this file as an MP4.
     static const off64_t kMaxScanOffset = 128ll;
 
@@ -5548,18 +5942,19 @@
     return true;
 }
 
-static MediaExtractor* CreateExtractor(DataSourceBase *source, void *) {
-    return new MPEG4Extractor(source);
+static CMediaExtractor* CreateExtractor(CDataSource *source, void *) {
+    return wrap(new MPEG4Extractor(new DataSourceHelper(source)));
 }
 
-static MediaExtractor::CreatorFunc Sniff(
-        DataSourceBase *source, float *confidence, void **,
-        MediaExtractor::FreeMetaFunc *) {
-    if (BetterSniffMPEG4(source, confidence)) {
+static CreatorFunc Sniff(
+        CDataSource *source, float *confidence, void **,
+        FreeMetaFunc *) {
+    DataSourceHelper helper(source);
+    if (BetterSniffMPEG4(&helper, confidence)) {
         return CreateExtractor;
     }
 
-    if (LegacySniffMPEG4(source, confidence)) {
+    if (LegacySniffMPEG4(&helper, confidence)) {
         ALOGW("Identified supported mpeg4 through LegacySniffMPEG4.");
         return CreateExtractor;
     }
@@ -5570,9 +5965,9 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("27575c67-4417-4c54-8d3d-8e626985a164"),
         1, // version
         "MP4 Extractor",
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 3ea0963..ca273e0 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -20,8 +20,8 @@
 
 #include <arpa/inet.h>
 
-#include <media/DataSourceBase.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <media/stagefright/foundation/AString.h>
 #include <utils/KeyedVector.h>
@@ -31,7 +31,8 @@
 
 namespace android {
 struct AMessage;
-class DataSourceBase;
+struct CDataSource;
+class DataSourceHelper;
 struct CachedRangedDataSource;
 class SampleTable;
 class String8;
@@ -53,9 +54,9 @@
     uint32_t default_sample_flags;
 };
 
-class MPEG4Extractor : public MediaExtractor {
+class MPEG4Extractor : public MediaExtractorPluginHelper {
 public:
-    explicit MPEG4Extractor(DataSourceBase *source, const char *mime = NULL);
+    explicit MPEG4Extractor(DataSourceHelper *source, const char *mime = NULL);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -97,7 +98,7 @@
 
     Vector<Trex> mTrex;
 
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     CachedRangedDataSource *mCachedSource;
     status_t mInitCheck;
     uint32_t mHeaderTimescale;
@@ -139,17 +140,16 @@
 
     Track *findTrackByMimePrefix(const char *mimePrefix);
 
-    status_t parseAC3SampleEntry(off64_t offset);
-    status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+    status_t parseChannelCountSampleRate(
+            off64_t *offset, uint16_t *channelCount, uint16_t *sampleRate);
+    status_t parseAC3SpecificBox(off64_t offset);
+    status_t parseEAC3SpecificBox(off64_t offset);
+    status_t parseAC4SpecificBox(off64_t offset);
 
     MPEG4Extractor(const MPEG4Extractor &);
     MPEG4Extractor &operator=(const MPEG4Extractor &);
 };
 
-bool SniffMPEG4(
-        DataSourceBase *source, String8 *mimeType, float *confidence,
-        sp<AMessage> *);
-
 }  // namespace android
 
 #endif  // MPEG4_EXTRACTOR_H_
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index 93ee7c6..1a6d306 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -328,7 +328,15 @@
         ++mTimeToSampleIndex;
     }
 
-    *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
+    // below is equivalent to:
+    // *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
+    uint32_t tmp;
+    if (__builtin_sub_overflow(sampleIndex, mTTSSampleIndex, &tmp) ||
+            __builtin_mul_overflow(mTTSDuration, tmp, &tmp) ||
+            __builtin_add_overflow(mTTSSampleTime, tmp, &tmp)) {
+        return ERROR_OUT_OF_RANGE;
+    }
+    *time = tmp;
 
     int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
     if ((offset < 0 && *time < (offset == INT32_MIN ?
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/SampleIterator.h
index 6a3fd3b..6e4f60e 100644
--- a/media/extractors/mp4/SampleIterator.h
+++ b/media/extractors/mp4/SampleIterator.h
@@ -36,6 +36,11 @@
     uint32_t getSampleTime() const { return mCurrentSampleTime; }
     uint32_t getSampleDuration() const { return mCurrentSampleDuration; }
 
+    uint32_t getLastSampleIndexInChunk() const {
+        return mCurrentSampleIndex + mSamplesPerChunk -
+                ((mCurrentSampleIndex - mFirstChunkSampleIndex) % mSamplesPerChunk) - 1;
+    }
+
     status_t getSampleSizeDirect(
             uint32_t sampleIndex, size_t *size);
 
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index 81c353e..d242798 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -25,7 +25,7 @@
 
 #include <arpa/inet.h>
 
-#include <media/DataSourceBase.h>
+#include <media/MediaExtractorPluginApi.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ByteUtils.h>
 
@@ -114,7 +114,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-SampleTable::SampleTable(DataSourceBase *source)
+SampleTable::SampleTable(DataSourceHelper *source)
     : mDataSource(source),
       mChunkOffsetOffset(-1),
       mChunkOffsetType(0),
@@ -946,6 +946,11 @@
             sampleIndex, sampleSize);
 }
 
+uint32_t SampleTable::getLastSampleIndexInChunk() {
+    Mutex::Autolock autoLock(mLock);
+    return mSampleIterator->getLastSampleIndexInChunk();
+}
+
 status_t SampleTable::getMetaDataForSample(
         uint32_t sampleIndex,
         off64_t *offset,
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/SampleTable.h
index e4e974b..d4b5dc8 100644
--- a/media/extractors/mp4/SampleTable.h
+++ b/media/extractors/mp4/SampleTable.h
@@ -21,18 +21,19 @@
 #include <sys/types.h>
 #include <stdint.h>
 
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MediaErrors.h>
 #include <utils/RefBase.h>
 #include <utils/threads.h>
 
 namespace android {
 
-class DataSourceBase;
+class DataSourceHelper;
 struct SampleIterator;
 
 class SampleTable : public RefBase {
 public:
-    explicit SampleTable(DataSourceBase *source);
+    explicit SampleTable(DataSourceHelper *source);
 
     bool isValid() const;
 
@@ -69,6 +70,9 @@
             bool *isSyncSample = NULL,
             uint32_t *sampleDuration = NULL);
 
+    // call only after getMetaDataForSample has been called successfully.
+    uint32_t getLastSampleIndexInChunk();
+
     enum {
         kFlagBefore,
         kFlagAfter,
@@ -99,7 +103,7 @@
     // Limit the total size of all internal tables to 200MiB.
     static const size_t kMaxTotalSize = 200 * (1 << 20);
 
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     Mutex mLock;
 
     off64_t mChunkOffsetOffset;
diff --git a/media/extractors/mpeg2/ExtractorBundle.cpp b/media/extractors/mpeg2/ExtractorBundle.cpp
index 8a0fa03..88c2d87 100644
--- a/media/extractors/mpeg2/ExtractorBundle.cpp
+++ b/media/extractors/mpeg2/ExtractorBundle.cpp
@@ -18,36 +18,39 @@
 #define LOG_TAG "MPEG2ExtractorBundle"
 #include <utils/Log.h>
 
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include "MPEG2PSExtractor.h"
 #include "MPEG2TSExtractor.h"
 
 namespace android {
 
+struct CDataSource;
+
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("3d1dcfeb-e40a-436d-a574-c2438a555e5f"),
         1,
         "MPEG2-PS/TS Extractor",
         [](
-                DataSourceBase *source,
+                CDataSource *source,
                 float *confidence,
                 void **,
-                MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
-            if (SniffMPEG2TS(source, confidence)) {
+                FreeMetaFunc *) -> CreatorFunc {
+            DataSourceHelper helper(source);
+            if (SniffMPEG2TS(&helper, confidence)) {
                 return [](
-                        DataSourceBase *source,
-                        void *) -> MediaExtractor* {
-                    return new MPEG2TSExtractor(source);};
-            } else if (SniffMPEG2PS(source, confidence)) {
+                        CDataSource *source,
+                        void *) -> CMediaExtractor* {
+                    return wrap(new MPEG2TSExtractor(new DataSourceHelper(source)));};
+            } else if (SniffMPEG2PS(&helper, confidence)) {
                         return [](
-                                DataSourceBase *source,
-                                void *) -> MediaExtractor* {
-                            return new MPEG2PSExtractor(source);};
+                                CDataSource *source,
+                                void *) -> CMediaExtractor* {
+                            return wrap(new MPEG2PSExtractor(new DataSourceHelper(source)));};
             }
             return NULL;
         }
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
index 6980b82..ae1e6ba 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -94,7 +94,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-MPEG2PSExtractor::MPEG2PSExtractor(DataSourceBase *source)
+MPEG2PSExtractor::MPEG2PSExtractor(DataSourceHelper *source)
     : mDataSource(source),
       mOffset(0),
       mFinalResult(OK),
@@ -120,6 +120,7 @@
 }
 
 MPEG2PSExtractor::~MPEG2PSExtractor() {
+    delete mDataSource;
 }
 
 size_t MPEG2PSExtractor::countTracks() {
@@ -754,7 +755,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 bool SniffMPEG2PS(
-        DataSourceBase *source, float *confidence) {
+        DataSourceHelper *source, float *confidence) {
     uint8_t header[5];
     if (source->readAt(0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
         return false;
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/MPEG2PSExtractor.h
index 8b9dad9..7689910 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.h
@@ -19,7 +19,8 @@
 #define MPEG2_PS_EXTRACTOR_H_
 
 #include <media/stagefright/foundation/ABase.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <utils/threads.h>
 #include <utils/KeyedVector.h>
@@ -31,8 +32,8 @@
 struct Track;
 class String8;
 
-struct MPEG2PSExtractor : public MediaExtractor {
-    explicit MPEG2PSExtractor(DataSourceBase *source);
+struct MPEG2PSExtractor : public MediaExtractorPluginHelper {
+    explicit MPEG2PSExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -51,7 +52,7 @@
     struct WrappedTrack;
 
     mutable Mutex mLock;
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
 
     off64_t mOffset;
     status_t mFinalResult;
@@ -72,7 +73,7 @@
     DISALLOW_EVIL_CONSTRUCTORS(MPEG2PSExtractor);
 };
 
-bool SniffMPEG2PS(DataSourceBase *source, float *confidence);
+bool SniffMPEG2PS(DataSourceHelper *source, float *confidence);
 
 }  // namespace android
 
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index c83f7ce..cbe8556 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -123,7 +123,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-MPEG2TSExtractor::MPEG2TSExtractor(DataSourceBase *source)
+MPEG2TSExtractor::MPEG2TSExtractor(DataSourceHelper *source)
     : mDataSource(source),
       mParser(new ATSParser),
       mLastSyncEvent(0),
@@ -131,6 +131,10 @@
     init();
 }
 
+MPEG2TSExtractor::~MPEG2TSExtractor() {
+    delete mDataSource;
+}
+
 size_t MPEG2TSExtractor::countTracks() {
     return mSourceImpls.size();
 }
@@ -652,7 +656,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-bool SniffMPEG2TS(DataSourceBase *source, float *confidence) {
+bool SniffMPEG2TS(DataSourceHelper *source, float *confidence) {
     for (int i = 0; i < 5; ++i) {
         char header;
         if (source->readAt(kTSPacketSize * i, &header, 1) != 1
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/MPEG2TSExtractor.h
index cbdd3cb..cdaede3 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.h
@@ -20,7 +20,8 @@
 #define MPEG2_TS_EXTRACTOR_H_
 
 #include <media/stagefright/foundation/ABase.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/MediaTrack.h>
 #include <media/stagefright/MetaDataBase.h>
 #include <utils/threads.h>
@@ -34,12 +35,12 @@
 struct AMessage;
 struct AnotherPacketSource;
 struct ATSParser;
-class DataSourceBase;
+struct CDataSource;
 struct MPEG2TSSource;
 class String8;
 
-struct MPEG2TSExtractor : public MediaExtractor {
-    explicit MPEG2TSExtractor(DataSourceBase *source);
+struct MPEG2TSExtractor : public MediaExtractorPluginHelper {
+    explicit MPEG2TSExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -52,12 +53,15 @@
     virtual uint32_t flags() const;
     virtual const char * name() { return "MPEG2TSExtractor"; }
 
+protected:
+    virtual ~MPEG2TSExtractor();
+
 private:
     friend struct MPEG2TSSource;
 
     mutable Mutex mLock;
 
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
 
     sp<ATSParser> mParser;
 
@@ -81,9 +85,9 @@
     // Try to feed more data from source to parser.
     // |isInit| means this function is called inside init(). This is a signal to
     // save SyncEvent so that init() can add SyncPoint after it updates |mSourceImpls|.
-    // This function returns OK if expected amount of data is fed from DataSourceBase to
+    // This function returns OK if expected amount of data is fed from DataSourceHelper to
     // parser and is successfully parsed. Otherwise, various error codes could be
-    // returned, e.g., ERROR_END_OF_STREAM, or no data availalbe from DataSourceBase, or
+    // returned, e.g., ERROR_END_OF_STREAM, or no data availalbe from DataSourceHelper, or
     // the data has syntax error during parsing, etc.
     status_t feedMore(bool isInit = false);
     status_t seek(int64_t seekTimeUs,
@@ -101,7 +105,7 @@
     DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
 };
 
-bool SniffMPEG2TS(DataSourceBase *source, float *confidence);
+bool SniffMPEG2TS(DataSourceHelper *source, float *confidence);
 
 }  // namespace android
 
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index b2fe69c..4e97921 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -71,7 +71,7 @@
 
 struct MyOggExtractor {
     MyOggExtractor(
-            DataSourceBase *source,
+            DataSourceHelper *source,
             const char *mimeType,
             size_t numHeaders,
             int64_t seekPreRollUs);
@@ -110,7 +110,7 @@
         int64_t mTimeUs;
     };
 
-    DataSourceBase *mSource;
+    DataSourceHelper *mSource;
     off64_t mOffset;
     Page mCurrentPage;
     uint64_t mCurGranulePosition;
@@ -169,7 +169,7 @@
 };
 
 struct MyVorbisExtractor : public MyOggExtractor {
-    explicit MyVorbisExtractor(DataSourceBase *source)
+    explicit MyVorbisExtractor(DataSourceHelper *source)
         : MyOggExtractor(source,
                 MEDIA_MIMETYPE_AUDIO_VORBIS,
                 /* numHeaders */ 3,
@@ -197,7 +197,7 @@
     static const int32_t kOpusSampleRate = 48000;
     static const int64_t kOpusSeekPreRollUs = 80000; // 80 ms
 
-    explicit MyOpusExtractor(DataSourceBase *source)
+    explicit MyOpusExtractor(DataSourceHelper *source)
         : MyOggExtractor(source, MEDIA_MIMETYPE_AUDIO_OPUS, /*numHeaders*/ 2, kOpusSeekPreRollUs),
           mChannelCount(0),
           mCodecDelay(0),
@@ -296,7 +296,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 MyOggExtractor::MyOggExtractor(
-        DataSourceBase *source,
+        DataSourceHelper *source,
         const char *mimeType,
         size_t numHeaders,
         int64_t seekPreRollUs)
@@ -1193,7 +1193,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-OggExtractor::OggExtractor(DataSourceBase *source)
+OggExtractor::OggExtractor(DataSourceHelper *source)
     : mDataSource(source),
       mInitCheck(NO_INIT),
       mImpl(NULL) {
@@ -1220,6 +1220,7 @@
 OggExtractor::~OggExtractor() {
     delete mImpl;
     mImpl = NULL;
+    delete mDataSource;
 }
 
 size_t OggExtractor::countTracks() {
@@ -1248,19 +1249,20 @@
     return mImpl->getFileMetaData(meta);
 }
 
-static MediaExtractor* CreateExtractor(
-        DataSourceBase *source,
+static CMediaExtractor* CreateExtractor(
+        CDataSource *source,
         void *) {
-    return new OggExtractor(source);
+    return wrap(new OggExtractor(new DataSourceHelper(source)));
 }
 
-static MediaExtractor::CreatorFunc Sniff(
-        DataSourceBase *source,
+static CreatorFunc Sniff(
+        CDataSource *source,
         float *confidence,
         void **,
-        MediaExtractor::FreeMetaFunc *) {
+        FreeMetaFunc *) {
+    DataSourceHelper helper(source);
     char tmp[4];
-    if (source->readAt(0, tmp, 4) < 4 || memcmp(tmp, "OggS", 4)) {
+    if (helper.readAt(0, tmp, 4) < 4 || memcmp(tmp, "OggS", 4)) {
         return NULL;
     }
 
@@ -1272,9 +1274,9 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("8cc5cd06-f772-495e-8a62-cba9649374e9"),
         1, // version
         "Ogg Extractor",
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/OggExtractor.h
index 9fe2944..fbd4663 100644
--- a/media/extractors/ogg/OggExtractor.h
+++ b/media/extractors/ogg/OggExtractor.h
@@ -19,19 +19,19 @@
 #define OGG_EXTRACTOR_H_
 
 #include <utils/Errors.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 
 namespace android {
 
 struct AMessage;
-class DataSourceBase;
 class String8;
 
 struct MyOggExtractor;
 struct OggSource;
 
-struct OggExtractor : public MediaExtractor {
-    explicit OggExtractor(DataSourceBase *source);
+struct OggExtractor : public MediaExtractorPluginHelper {
+    explicit OggExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -46,7 +46,7 @@
 private:
     friend struct OggSource;
 
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     status_t mInitCheck;
 
     MyOggExtractor *mImpl;
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
index f5a1b01..c739c2a 100644
--- a/media/extractors/wav/WAVExtractor.cpp
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -57,7 +57,7 @@
 
 struct WAVSource : public MediaTrack {
     WAVSource(
-            DataSourceBase *dataSource,
+            DataSourceHelper *dataSource,
             MetaDataBase &meta,
             uint16_t waveFormat,
             int32_t bitsPerSample,
@@ -78,7 +78,7 @@
 private:
     static const size_t kMaxFrameSize;
 
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     MetaDataBase &mMeta;
     uint16_t mWaveFormat;
     int32_t mSampleRate;
@@ -94,7 +94,7 @@
     WAVSource &operator=(const WAVSource &);
 };
 
-WAVExtractor::WAVExtractor(DataSourceBase *source)
+WAVExtractor::WAVExtractor(DataSourceHelper *source)
     : mDataSource(source),
       mValidFormat(false),
       mChannelMask(CHANNEL_MASK_USE_CHANNEL_ORDER) {
@@ -102,6 +102,7 @@
 }
 
 WAVExtractor::~WAVExtractor() {
+    delete mDataSource;
 }
 
 status_t WAVExtractor::getMetaData(MetaDataBase &meta) {
@@ -347,7 +348,7 @@
 const size_t WAVSource::kMaxFrameSize = 32768;
 
 WAVSource::WAVSource(
-        DataSourceBase *dataSource,
+        DataSourceHelper *dataSource,
         MetaDataBase &meta,
         uint16_t waveFormat,
         int32_t bitsPerSample,
@@ -544,27 +545,30 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-static MediaExtractor* CreateExtractor(
-        DataSourceBase *source,
+static CMediaExtractor* CreateExtractor(
+        CDataSource *source,
         void *) {
-    return new WAVExtractor(source);
+    return wrap(new WAVExtractor(new DataSourceHelper(source)));
 }
 
-static MediaExtractor::CreatorFunc Sniff(
-        DataSourceBase *source,
+static CreatorFunc Sniff(
+        CDataSource *source,
         float *confidence,
         void **,
-        MediaExtractor::FreeMetaFunc *) {
+        FreeMetaFunc *) {
+    DataSourceHelper *helper = new DataSourceHelper(source);
     char header[12];
-    if (source->readAt(0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+    if (helper->readAt(0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+        delete helper;
         return NULL;
     }
 
     if (memcmp(header, "RIFF", 4) || memcmp(&header[8], "WAVE", 4)) {
+        delete helper;
         return NULL;
     }
 
-    MediaExtractor *extractor = new WAVExtractor(source);
+    WAVExtractor *extractor = new WAVExtractor(helper); // extractor owns the helper
     int numTracks = extractor->countTracks();
     delete extractor;
     if (numTracks == 0) {
@@ -579,9 +583,9 @@
 extern "C" {
 // This is the only symbol that needs to be exported
 __attribute__ ((visibility ("default")))
-MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ExtractorDef GETEXTRACTORDEF() {
     return {
-        MediaExtractor::EXTRACTORDEF_VERSION,
+        EXTRACTORDEF_VERSION,
         UUID("7d613858-5837-4a38-84c5-332d1cddee27"),
         1, // version
         "WAV Extractor",
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/WAVExtractor.h
index 467d0b7..5136aa8 100644
--- a/media/extractors/wav/WAVExtractor.h
+++ b/media/extractors/wav/WAVExtractor.h
@@ -19,18 +19,19 @@
 #define WAV_EXTRACTOR_H_
 
 #include <utils/Errors.h>
-#include <media/MediaExtractor.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/MetaDataBase.h>
 
 namespace android {
 
 struct AMessage;
-class DataSourceBase;
+struct CDataSource;
 class String8;
 
-class WAVExtractor : public MediaExtractor {
+class WAVExtractor : public MediaExtractorPluginHelper {
 public:
-    explicit WAVExtractor(DataSourceBase *source);
+    explicit WAVExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
@@ -42,7 +43,7 @@
     virtual ~WAVExtractor();
 
 private:
-    DataSourceBase *mDataSource;
+    DataSourceHelper *mDataSource;
     status_t mInitCheck;
     bool mValidFormat;
     uint16_t mWaveFormat;
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
deleted file mode 100644
index a0b981c..0000000
--- a/media/libaaudio/examples/input_monitor/jni/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/input_monitor.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor_callback
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libaaudio_prebuilt
-LOCAL_SRC_FILES := libaaudio.so
-LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/jni/Application.mk b/media/libaaudio/examples/input_monitor/jni/Application.mk
deleted file mode 100644
index e74475c..0000000
--- a/media/libaaudio/examples/input_monitor/jni/Application.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-# TODO remove then when we support other architectures
-APP_ABI := arm64-v8a
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/loopback/jni/Android.mk b/media/libaaudio/examples/loopback/jni/Android.mk
deleted file mode 100644
index aebe877..0000000
--- a/media/libaaudio/examples/loopback/jni/Android.mk
+++ /dev/null
@@ -1,16 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/loopback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_STATIC_LIBRARIES := libsndfile
-LOCAL_SHARED_LIBRARIES := libaaudio libaudioutils
-LOCAL_MODULE := aaudio_loopback
-include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/loopback/jni/Application.mk b/media/libaaudio/examples/loopback/jni/Application.mk
deleted file mode 100644
index ba44f37..0000000
--- a/media/libaaudio/examples/loopback/jni/Application.mk
+++ /dev/null
@@ -1 +0,0 @@
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 91ebf73..84f9c22 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -338,7 +338,7 @@
     aaudio_sharing_mode_t requestedInputSharingMode  = AAUDIO_SHARING_MODE_SHARED;
     int                   requestedInputChannelCount = NUM_INPUT_CHANNELS;
     aaudio_format_t       requestedInputFormat       = AAUDIO_FORMAT_UNSPECIFIED;
-    int32_t               requestedInputCapacity     = -1;
+    int32_t               requestedInputCapacity     = AAUDIO_UNSPECIFIED;
     aaudio_performance_mode_t inputPerformanceLevel  = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
 
     int32_t               outputFramesPerBurst = 0;
@@ -459,15 +459,8 @@
     argParser.setPerformanceMode(inputPerformanceLevel);
     argParser.setChannelCount(requestedInputChannelCount);
     argParser.setSharingMode(requestedInputSharingMode);
-
-    // Make sure the input buffer has plenty of capacity.
-    // Extra capacity on input should not increase latency if we keep it drained.
-    int32_t inputBufferCapacity = requestedInputCapacity;
-    if (inputBufferCapacity < 0) {
-        int32_t outputBufferCapacity = AAudioStream_getBufferCapacityInFrames(outputStream);
-        inputBufferCapacity = 2 * outputBufferCapacity;
-    }
-    argParser.setBufferCapacity(inputBufferCapacity);
+    // Warning! If you change input capacity then you may not get a FAST track on Legacy path.
+    argParser.setBufferCapacity(requestedInputCapacity);
 
     result = recorder.open(argParser);
     if (result != AAUDIO_OK) {
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
deleted file mode 100644
index 1a1bd43..0000000
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/write_sine.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_callback
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libaaudio_prebuilt
-LOCAL_SRC_FILES := libaaudio.so
-LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/write_sine/jni/Application.mk b/media/libaaudio/examples/write_sine/jni/Application.mk
deleted file mode 100644
index ba44f37..0000000
--- a/media/libaaudio/examples/write_sine/jni/Application.mk
+++ /dev/null
@@ -1 +0,0 @@
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index f8e34d1..214f888 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -196,7 +196,7 @@
 
 int32_t AudioEndpoint::getEmptyFramesAvailable()
 {
-    return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
+    return mDataQueue->getEmptyFramesAvailable();
 }
 
 int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
@@ -206,15 +206,15 @@
 
 int32_t AudioEndpoint::getFullFramesAvailable()
 {
-    return mDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+    return mDataQueue->getFullFramesAvailable();
 }
 
 void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
-    mDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
+    mDataQueue->advanceWriteIndex(deltaFrames);
 }
 
 void AudioEndpoint::advanceReadIndex(int32_t deltaFrames) {
-    mDataQueue->getFifoControllerBase()->advanceReadIndex(deltaFrames);
+    mDataQueue->advanceReadIndex(deltaFrames);
 }
 
 void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead)
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index b09258e..f5113f2 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -23,30 +23,26 @@
 #include <utils/Log.h>
 
 #include <algorithm>
+#include <memory>
 
 #include "FifoControllerBase.h"
 #include "FifoController.h"
 #include "FifoControllerIndirect.h"
 #include "FifoBuffer.h"
 
-using namespace android; // TODO just import names needed
+using android::FifoBuffer;
+using android::fifo_frames_t;
 
 FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
-        : mFrameCapacity(capacityInFrames)
-        , mBytesPerFrame(bytesPerFrame)
-        , mStorage(nullptr)
-        , mFramesReadCount(0)
-        , mFramesUnderrunCount(0)
-        , mUnderrunCount(0)
+        : mBytesPerFrame(bytesPerFrame)
 {
-    // TODO Handle possible failures to allocate. Move out of constructor?
-    mFifo = new FifoController(capacityInFrames, capacityInFrames);
+    mFifo = std::make_unique<FifoController>(capacityInFrames, capacityInFrames);
     // allocate buffer
     int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
     mStorage = new uint8_t[bytesPerBuffer];
     mStorageOwned = true;
-    ALOGV("capacityInFrames = %d, bytesPerFrame = %d",
-          capacityInFrames, bytesPerFrame);
+    ALOGV("%s() capacityInFrames = %d, bytesPerFrame = %d",
+          __func__, capacityInFrames, bytesPerFrame);
 }
 
 FifoBuffer::FifoBuffer( int32_t   bytesPerFrame,
@@ -55,14 +51,10 @@
                         fifo_counter_t *  writeIndexAddress,
                         void *  dataStorageAddress
                         )
-        : mFrameCapacity(capacityInFrames)
-        , mBytesPerFrame(bytesPerFrame)
+        : mBytesPerFrame(bytesPerFrame)
         , mStorage(static_cast<uint8_t *>(dataStorageAddress))
-        , mFramesReadCount(0)
-        , mFramesUnderrunCount(0)
-        , mUnderrunCount(0)
 {
-    mFifo = new FifoControllerIndirect(capacityInFrames,
+    mFifo = std::make_unique<FifoControllerIndirect>(capacityInFrames,
                                        capacityInFrames,
                                        readIndexAddress,
                                        writeIndexAddress);
@@ -73,10 +65,8 @@
     if (mStorageOwned) {
         delete[] mStorage;
     }
-    delete mFifo;
 }
 
-
 int32_t FifoBuffer::convertFramesToBytes(fifo_frames_t frames) {
     return frames * mBytesPerFrame;
 }
@@ -87,11 +77,12 @@
     wrappingBuffer->data[1] = nullptr;
     wrappingBuffer->numFrames[1] = 0;
     if (framesAvailable > 0) {
+        fifo_frames_t capacity = mFifo->getCapacity();
         uint8_t *source = &mStorage[convertFramesToBytes(startIndex)];
         // Does the available data cross the end of the FIFO?
-        if ((startIndex + framesAvailable) > mFrameCapacity) {
+        if ((startIndex + framesAvailable) > capacity) {
             wrappingBuffer->data[0] = source;
-            fifo_frames_t firstFrames = mFrameCapacity - startIndex;
+            fifo_frames_t firstFrames = capacity - startIndex;
             wrappingBuffer->numFrames[0] = firstFrames;
             wrappingBuffer->data[1] = &mStorage[0];
             wrappingBuffer->numFrames[1] = framesAvailable - firstFrames;
@@ -107,7 +98,8 @@
 
 fifo_frames_t FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
     // The FIFO might be overfull so clip to capacity.
-    fifo_frames_t framesAvailable = std::min(mFifo->getFullFramesAvailable(), mFrameCapacity);
+    fifo_frames_t framesAvailable = std::min(mFifo->getFullFramesAvailable(),
+                                             mFifo->getCapacity());
     fifo_frames_t startIndex = mFifo->getReadIndex();
     fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
     return framesAvailable;
@@ -115,7 +107,8 @@
 
 fifo_frames_t FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
     // The FIFO might have underrun so clip to capacity.
-    fifo_frames_t framesAvailable = std::min(mFifo->getEmptyFramesAvailable(), mFrameCapacity);
+    fifo_frames_t framesAvailable = std::min(mFifo->getEmptyFramesAvailable(),
+                                             mFifo->getCapacity());
     fifo_frames_t startIndex = mFifo->getWriteIndex();
     fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
     return framesAvailable;
@@ -183,23 +176,6 @@
     return framesWritten;
 }
 
-fifo_frames_t FifoBuffer::readNow(void *buffer, fifo_frames_t numFrames) {
-    mLastReadSize = numFrames;
-    fifo_frames_t framesLeft = numFrames;
-    fifo_frames_t framesRead = read(buffer, numFrames);
-    framesLeft -= framesRead;
-    mFramesReadCount += framesRead;
-    mFramesUnderrunCount += framesLeft;
-    // Zero out any samples we could not set.
-    if (framesLeft > 0) {
-        mUnderrunCount++;
-        int32_t bytesToZero = convertFramesToBytes(framesLeft);
-        memset(buffer, 0, bytesToZero);
-    }
-
-    return framesRead;
-}
-
 fifo_frames_t FifoBuffer::getThreshold() {
     return mFifo->getThreshold();
 }
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index f5a9e27..0d188c4 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -17,6 +17,7 @@
 #ifndef FIFO_FIFO_BUFFER_H
 #define FIFO_FIFO_BUFFER_H
 
+#include <memory>
 #include <stdint.h>
 
 #include "FifoControllerBase.h"
@@ -77,24 +78,12 @@
      */
     fifo_frames_t getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
 
-    /**
-     * Copy data from the FIFO into the buffer.
-     * @param buffer
-     * @param numFrames
-     * @return
-     */
-    fifo_frames_t readNow(void *buffer, fifo_frames_t numFrames);
-
-    int64_t getNextReadTime(int32_t frameRate);
-
-    int32_t getUnderrunCount() const { return mUnderrunCount; }
-
-    FifoControllerBase *getFifoControllerBase() { return mFifo; }
-
     int32_t getBytesPerFrame() {
         return mBytesPerFrame;
     }
 
+    // Proxy methods for the internal FifoController
+
     fifo_counter_t getReadCounter() {
         return mFifo->getReadCounter();
     }
@@ -111,6 +100,22 @@
         mFifo->setWriteCounter(n);
     }
 
+    void advanceReadIndex(fifo_frames_t numFrames) {
+        mFifo->advanceReadIndex(numFrames);
+    }
+
+    void advanceWriteIndex(fifo_frames_t numFrames) {
+        mFifo->advanceWriteIndex(numFrames);
+    }
+
+    fifo_frames_t getEmptyFramesAvailable() {
+        return mFifo->getEmptyFramesAvailable();
+    }
+
+    fifo_frames_t getFullFramesAvailable() {
+        return mFifo->getFullFramesAvailable();
+    }
+
     /*
      * This is generally only called before or after the buffer is used.
      */
@@ -121,15 +126,12 @@
     void fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
                             int32_t framesAvailable, int32_t startIndex);
 
-    const fifo_frames_t mFrameCapacity;
-    const int32_t mBytesPerFrame;
-    uint8_t *mStorage;
-    bool mStorageOwned; // did this object allocate the storage?
-    FifoControllerBase *mFifo;
-    fifo_counter_t mFramesReadCount;
-    fifo_counter_t mFramesUnderrunCount;
-    int32_t mUnderrunCount; // need? just use frames
-    int32_t mLastReadSize;
+    const int32_t             mBytesPerFrame;
+    // We do not use a std::unique_ptr for mStorage because it is often a pointer to
+    // memory shared between processes and cannot be deleted trivially.
+    uint8_t                  *mStorage = nullptr;
+    bool                      mStorageOwned = false; // did this object allocate the storage?
+    std::unique_ptr<FifoControllerBase> mFifo{};
 };
 
 }  // android
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
index 79d98a1..057a94e 100644
--- a/media/libaaudio/src/fifo/FifoController.h
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -30,7 +30,7 @@
 class FifoController : public FifoControllerBase
 {
 public:
-    FifoController(fifo_counter_t bufferSize, fifo_counter_t threshold)
+    FifoController(fifo_frames_t bufferSize, fifo_frames_t threshold)
     : FifoControllerBase(bufferSize, threshold)
     , mReadCounter(0)
     , mWriteCounter(0)
diff --git a/media/libaaudio/tests/test_atomic_fifo.cpp b/media/libaaudio/tests/test_atomic_fifo.cpp
index 0085217..a09b74c 100644
--- a/media/libaaudio/tests/test_atomic_fifo.cpp
+++ b/media/libaaudio/tests/test_atomic_fifo.cpp
@@ -96,14 +96,14 @@
     void checkWrappingBuffer() {
         WrappingBuffer wrappingBuffer;
         fifo_frames_t framesAvailable =
-                mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+                mFifoBuffer.getEmptyFramesAvailable();
         fifo_frames_t wrapAvailable = mFifoBuffer.getEmptyRoomAvailable(&wrappingBuffer);
         EXPECT_EQ(framesAvailable, wrapAvailable);
         fifo_frames_t bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
         EXPECT_EQ(framesAvailable, bothAvailable);
 
         framesAvailable =
-                mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+                mFifoBuffer.getFullFramesAvailable();
         wrapAvailable = mFifoBuffer.getFullDataAvailable(&wrappingBuffer);
         EXPECT_EQ(framesAvailable, wrapAvailable);
         bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
@@ -113,7 +113,7 @@
     // Write data but do not overflow.
     void writeData(fifo_frames_t numFrames) {
         fifo_frames_t framesAvailable =
-                mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+                mFifoBuffer.getEmptyFramesAvailable();
         fifo_frames_t framesToWrite = std::min(framesAvailable, numFrames);
         for (int i = 0; i < framesToWrite; i++) {
             mData[i] = mNextWriteIndex++;
@@ -125,7 +125,7 @@
     // Read data but do not underflow.
     void verifyData(fifo_frames_t numFrames) {
         fifo_frames_t framesAvailable =
-                mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+                mFifoBuffer.getFullFramesAvailable();
         fifo_frames_t framesToRead = std::min(framesAvailable, numFrames);
         fifo_frames_t actual = mFifoBuffer.read(mData, framesToRead);
         ASSERT_EQ(framesToRead, actual);
@@ -178,12 +178,12 @@
     void checkRandomWriteRead() {
         for (int i = 0; i < 20; i++) {
             fifo_frames_t framesEmpty =
-                    mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+                    mFifoBuffer.getEmptyFramesAvailable();
             fifo_frames_t numFrames = (fifo_frames_t)(drand48() * framesEmpty);
             writeData(numFrames);
 
             fifo_frames_t framesFull =
-                    mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+                    mFifoBuffer.getFullFramesAvailable();
             numFrames = (fifo_frames_t)(drand48() * framesFull);
             verifyData(numFrames);
         }
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index b1cb0e7..cf11936 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -52,6 +52,7 @@
                 )
     : mStatus(NO_INIT), mOpPackageName(opPackageName)
 {
+    AutoMutex lock(mConstructLock);
     mStatus = set(type, uuid, priority, cbf, user, sessionId, io);
 }
 
@@ -85,6 +86,7 @@
         }
     }
 
+    AutoMutex lock(mConstructLock);
     mStatus = set(pType, pUuid, priority, cbf, user, sessionId, io);
 }
 
@@ -430,14 +432,15 @@
 }
 
 status_t AudioEffect::getEffectDescriptor(const effect_uuid_t *uuid,
-        effect_descriptor_t *descriptor) /*const*/
+                                          const effect_uuid_t *type,
+                                          uint32_t preferredTypeFlag,
+                                          effect_descriptor_t *descriptor)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-    return af->getEffectDescriptor(uuid, descriptor);
+    return af->getEffectDescriptor(uuid, type, preferredTypeFlag, descriptor);
 }
 
-
 status_t AudioEffect::queryDefaultPreProcessing(audio_session_t audioSession,
                                           effect_descriptor_t *descriptors,
                                           uint32_t *count)
@@ -446,6 +449,95 @@
     if (aps == 0) return PERMISSION_DENIED;
     return aps->queryDefaultPreProcessing(audioSession, descriptors, count);
 }
+
+status_t AudioEffect::newEffectUniqueId(audio_unique_id_t* id)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return PERMISSION_DENIED;
+    *id = af->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+    return NO_ERROR;
+}
+
+status_t AudioEffect::addSourceDefaultEffect(const char *typeStr,
+                                             const String16& opPackageName,
+                                             const char *uuidStr,
+                                             int32_t priority,
+                                             audio_source_t source,
+                                             audio_unique_id_t *id)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    if (typeStr == NULL && uuidStr == NULL) return BAD_VALUE;
+
+    // Convert type & uuid from string to effect_uuid_t.
+    effect_uuid_t type;
+    if (typeStr != NULL) {
+        status_t res = stringToGuid(typeStr, &type);
+        if (res != OK) return res;
+    } else {
+        type = *EFFECT_UUID_NULL;
+    }
+
+    effect_uuid_t uuid;
+    if (uuidStr != NULL) {
+        status_t res = stringToGuid(uuidStr, &uuid);
+        if (res != OK) return res;
+    } else {
+        uuid = *EFFECT_UUID_NULL;
+    }
+
+    return aps->addSourceDefaultEffect(&type, opPackageName, &uuid, priority, source, id);
+}
+
+status_t AudioEffect::addStreamDefaultEffect(const char *typeStr,
+                                             const String16& opPackageName,
+                                             const char *uuidStr,
+                                             int32_t priority,
+                                             audio_usage_t usage,
+                                             audio_unique_id_t *id)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    if (typeStr == NULL && uuidStr == NULL) return BAD_VALUE;
+
+    // Convert type & uuid from string to effect_uuid_t.
+    effect_uuid_t type;
+    if (typeStr != NULL) {
+        status_t res = stringToGuid(typeStr, &type);
+        if (res != OK) return res;
+    } else {
+        type = *EFFECT_UUID_NULL;
+    }
+
+    effect_uuid_t uuid;
+    if (uuidStr != NULL) {
+        status_t res = stringToGuid(uuidStr, &uuid);
+        if (res != OK) return res;
+    } else {
+        uuid = *EFFECT_UUID_NULL;
+    }
+
+    return aps->addStreamDefaultEffect(&type, opPackageName, &uuid, priority, usage, id);
+}
+
+status_t AudioEffect::removeSourceDefaultEffect(audio_unique_id_t id)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    return aps->removeSourceDefaultEffect(id);
+}
+
+status_t AudioEffect::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    return aps->removeStreamDefaultEffect(id);
+}
+
 // -------------------------------------------------------------------------
 
 status_t AudioEffect::stringToGuid(const char *str, effect_uuid_t *guid)
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index f9df5b1..e2de8e7 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -865,9 +865,9 @@
     } else if (waitCount == 0) {
         requested = &ClientProxy::kNonBlocking;
     } else if (waitCount > 0) {
-        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
+        time_t ms = WAIT_PERIOD_MS * (time_t) waitCount;
         timeout.tv_sec = ms / 1000;
-        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
+        timeout.tv_nsec = (long) (ms % 1000) * 1000000;
         requested = &timeout;
     } else {
         ALOGE("%s invalid waitCount %d", __func__, waitCount);
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index c072901..e260fd8 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -878,31 +878,25 @@
                                  flags, selectedDeviceId, portId);
 }
 
-status_t AudioSystem::startOutput(audio_io_handle_t output,
-                                  audio_stream_type_t stream,
-                                  audio_session_t session)
+status_t AudioSystem::startOutput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->startOutput(output, stream, session);
+    return aps->startOutput(portId);
 }
 
-status_t AudioSystem::stopOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session)
+status_t AudioSystem::stopOutput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->stopOutput(output, stream, session);
+    return aps->stopOutput(portId);
 }
 
-void AudioSystem::releaseOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session)
+void AudioSystem::releaseOutput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return;
-    aps->releaseOutput(output, stream, session);
+    aps->releaseOutput(portId);
 }
 
 status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
@@ -1244,18 +1238,18 @@
 
 status_t AudioSystem::startAudioSource(const struct audio_port_config *source,
                                        const audio_attributes_t *attributes,
-                                       audio_patch_handle_t *handle)
+                                       audio_port_handle_t *portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->startAudioSource(source, attributes, handle);
+    return aps->startAudioSource(source, attributes, portId);
 }
 
-status_t AudioSystem::stopAudioSource(audio_patch_handle_t handle)
+status_t AudioSystem::stopAudioSource(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->stopAudioSource(handle);
+    return aps->stopAudioSource(portId);
 }
 
 status_t AudioSystem::setMasterMono(bool mono)
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index ab9efe8..76c9bfb 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -706,6 +706,13 @@
         // force refresh of remaining frames by processAudioBuffer() as last
         // write before stop could be partial.
         mRefreshRemaining = true;
+
+        // for static track, clear the old flags when starting from stopped state
+        if (mSharedBuffer != 0) {
+            android_atomic_and(
+            ~(CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
+            &mCblk->mFlags);
+        }
     }
     mNewPosition = mPosition + mUpdatePeriod;
     int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
@@ -1634,9 +1641,9 @@
     } else if (waitCount == 0) {
         requested = &ClientProxy::kNonBlocking;
     } else if (waitCount > 0) {
-        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
+        time_t ms = WAIT_PERIOD_MS * (time_t) waitCount;
         timeout.tv_sec = ms / 1000;
-        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
+        timeout.tv_nsec = (long) (ms % 1000) * 1000000;
         requested = &timeout;
     } else {
         ALOGE("%s invalid waitCount %d", __func__, waitCount);
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index a018b22..b8156c6 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -286,7 +286,8 @@
                 struct timespec after;
                 clock_gettime(CLOCK_MONOTONIC, &after);
                 total.tv_sec += after.tv_sec - before.tv_sec;
-                long deltaNs = after.tv_nsec - before.tv_nsec;
+                // Use auto instead of long to avoid the google-runtime-int warning.
+                auto deltaNs = after.tv_nsec - before.tv_nsec;
                 if (deltaNs < 0) {
                     deltaNs += 1000000000;
                     total.tv_sec--;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 37c62a8..00678c2 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -598,14 +598,18 @@
     }
 
     virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
-            effect_descriptor_t *pDescriptor) const
+                                         const effect_uuid_t *pType,
+                                         uint32_t preferredTypeFlag,
+                                         effect_descriptor_t *pDescriptor) const
     {
-        if (pUuid == NULL || pDescriptor == NULL) {
+        if (pUuid == NULL || pType == NULL || pDescriptor == NULL) {
             return BAD_VALUE;
         }
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.write(pUuid, sizeof(effect_uuid_t));
+        data.write(pType, sizeof(effect_uuid_t));
+        data.writeUint32(preferredTypeFlag);
         status_t status = remote()->transact(GET_EFFECT_DESCRIPTOR, data, &reply);
         if (status != NO_ERROR) {
             return status;
@@ -634,10 +638,10 @@
         sp<IEffect> effect;
 
         if (pDesc == NULL) {
-            return effect;
             if (status != NULL) {
                 *status = BAD_VALUE;
             }
+            return effect;
         }
 
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -949,7 +953,8 @@
             break;
     }
 
-    TimeCheck check("IAudioFlinger");
+    std::string tag("IAudioFlinger command " + std::to_string(code));
+    TimeCheck check(tag.c_str());
 
     switch (code) {
         case CREATE_TRACK: {
@@ -1276,8 +1281,11 @@
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             effect_uuid_t uuid;
             data.read(&uuid, sizeof(effect_uuid_t));
+            effect_uuid_t type;
+            data.read(&type, sizeof(effect_uuid_t));
+            uint32_t preferredTypeFlag = data.readUint32();
             effect_descriptor_t desc = {};
-            status_t status = getEffectDescriptor(&uuid, &desc);
+            status_t status = getEffectDescriptor(&uuid, &type, preferredTypeFlag, &desc);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->write(&desc, sizeof(effect_descriptor_t));
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 316105c..32a71f3 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -81,7 +81,11 @@
     GET_MASTER_MONO,
     GET_STREAM_VOLUME_DB,
     GET_SURROUND_FORMATS,
-    SET_SURROUND_FORMAT_ENABLED
+    SET_SURROUND_FORMAT_ENABLED,
+    ADD_STREAM_DEFAULT_EFFECT,
+    REMOVE_STREAM_DEFAULT_EFFECT,
+    ADD_SOURCE_DEFAULT_EFFECT,
+    REMOVE_SOURCE_DEFAULT_EFFECT
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -244,41 +248,29 @@
             return status;
         }
 
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session)
+    virtual status_t startOutput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(output);
-        data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t) session);
+        data.writeInt32((int32_t)portId);
         remote()->transact(START_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session)
+    virtual status_t stopOutput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(output);
-        data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t) session);
+        data.writeInt32((int32_t)portId);
         remote()->transact(STOP_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session)
+    virtual void releaseOutput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(output);
-        data.writeInt32((int32_t)stream);
-        data.writeInt32((int32_t)session);
+        data.writeInt32((int32_t)portId);
         remote()->transact(RELEASE_OUTPUT, data, &reply);
     }
 
@@ -752,11 +744,11 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle)
+                                      audio_port_handle_t *portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        if (source == NULL || attributes == NULL || handle == NULL) {
+        if (source == NULL || attributes == NULL || portId == NULL) {
             return BAD_VALUE;
         }
         data.write(source, sizeof(struct audio_port_config));
@@ -769,15 +761,15 @@
         if (status != NO_ERROR) {
             return status;
         }
-        *handle = (audio_patch_handle_t)reply.readInt32();
+        *portId = (audio_port_handle_t)reply.readInt32();
         return status;
     }
 
-    virtual status_t stopAudioSource(audio_patch_handle_t handle)
+    virtual status_t stopAudioSource(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(handle);
+        data.writeInt32(portId);
         status_t status = remote()->transact(STOP_AUDIO_SOURCE, data, &reply);
         if (status != NO_ERROR) {
             return status;
@@ -878,6 +870,77 @@
         }
         return reply.readInt32();
     }
+
+    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_usage_t usage,
+                                            audio_unique_id_t* id)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(type, sizeof(effect_uuid_t));
+        data.writeString16(opPackageName);
+        data.write(uuid, sizeof(effect_uuid_t));
+        data.writeInt32(priority);
+        data.writeInt32((int32_t) usage);
+        status_t status = remote()->transact(ADD_STREAM_DEFAULT_EFFECT, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = static_cast <status_t> (reply.readInt32());
+        *id = reply.readInt32();
+        return status;
+    }
+
+    virtual status_t removeStreamDefaultEffect(audio_unique_id_t id)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(id);
+        status_t status = remote()->transact(REMOVE_STREAM_DEFAULT_EFFECT, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return static_cast <status_t> (reply.readInt32());
+    }
+
+    virtual status_t addSourceDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_source_t source,
+                                            audio_unique_id_t* id)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(type, sizeof(effect_uuid_t));
+        data.writeString16(opPackageName);
+        data.write(uuid, sizeof(effect_uuid_t));
+        data.writeInt32(priority);
+        data.writeInt32((int32_t) source);
+        status_t status = remote()->transact(ADD_SOURCE_DEFAULT_EFFECT, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = static_cast <status_t> (reply.readInt32());
+        *id = reply.readInt32();
+        return status;
+    }
+
+    virtual status_t removeSourceDefaultEffect(audio_unique_id_t id)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(id);
+        status_t status = remote()->transact(REMOVE_SOURCE_DEFAULT_EFFECT, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return static_cast <status_t> (reply.readInt32());
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -947,7 +1010,8 @@
             break;
     }
 
-    TimeCheck check("IAudioPolicyService");
+    std::string tag("IAudioPolicyService command " + std::to_string(code));
+    TimeCheck check(tag.c_str());
 
     switch (code) {
         case SET_DEVICE_CONNECTION_STATE: {
@@ -1074,34 +1138,22 @@
 
         case START_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            audio_stream_type_t stream =
-                                static_cast <audio_stream_type_t>(data.readInt32());
-            audio_session_t session = (audio_session_t)data.readInt32();
-            reply->writeInt32(static_cast <uint32_t>(startOutput(output,
-                                                                 stream,
-                                                                 session)));
+            const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+            reply->writeInt32(static_cast <uint32_t>(startOutput(portId)));
             return NO_ERROR;
         } break;
 
         case STOP_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            audio_stream_type_t stream =
-                                static_cast <audio_stream_type_t>(data.readInt32());
-            audio_session_t session = (audio_session_t)data.readInt32();
-            reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
-                                                                stream,
-                                                                session)));
+            const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+            reply->writeInt32(static_cast <uint32_t>(stopOutput(portId)));
             return NO_ERROR;
         } break;
 
         case RELEASE_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            audio_stream_type_t stream = (audio_stream_type_t)data.readInt32();
-            audio_session_t session = (audio_session_t)data.readInt32();
-            releaseOutput(output, stream, session);
+            const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+            releaseOutput(portId);
             return NO_ERROR;
         } break;
 
@@ -1495,17 +1547,17 @@
             audio_attributes_t attributes = {};
             data.read(&attributes, sizeof(audio_attributes_t));
             sanetizeAudioAttributes(&attributes);
-            audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
-            status_t status = startAudioSource(&source, &attributes, &handle);
+            audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+            status_t status = startAudioSource(&source, &attributes, &portId);
             reply->writeInt32(status);
-            reply->writeInt32(handle);
+            reply->writeInt32(portId);
             return NO_ERROR;
         } break;
 
         case STOP_AUDIO_SOURCE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_patch_handle_t handle = (audio_patch_handle_t) data.readInt32();
-            status_t status = stopAudioSource(handle);
+            audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
+            status_t status = stopAudioSource(portId);
             reply->writeInt32(status);
             return NO_ERROR;
         } break;
@@ -1584,6 +1636,80 @@
             return NO_ERROR;
         }
 
+        case ADD_STREAM_DEFAULT_EFFECT: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            effect_uuid_t type;
+            status_t status = data.read(&type, sizeof(effect_uuid_t));
+            if (status != NO_ERROR) {
+                return status;
+            }
+            String16 opPackageName;
+            status = data.readString16(&opPackageName);
+            if (status != NO_ERROR) {
+                return status;
+            }
+            effect_uuid_t uuid;
+            status = data.read(&uuid, sizeof(effect_uuid_t));
+            if (status != NO_ERROR) {
+                return status;
+            }
+            int32_t priority = data.readInt32();
+            audio_usage_t usage = (audio_usage_t) data.readInt32();
+            audio_unique_id_t id = 0;
+            reply->writeInt32(static_cast <int32_t>(addStreamDefaultEffect(&type,
+                                                                           opPackageName,
+                                                                           &uuid,
+                                                                           priority,
+                                                                           usage,
+                                                                           &id)));
+            reply->writeInt32(id);
+            return NO_ERROR;
+        }
+
+        case REMOVE_STREAM_DEFAULT_EFFECT: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_unique_id_t id = static_cast<audio_unique_id_t>(data.readInt32());
+            reply->writeInt32(static_cast <int32_t>(removeStreamDefaultEffect(id)));
+            return NO_ERROR;
+        }
+
+        case ADD_SOURCE_DEFAULT_EFFECT: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            effect_uuid_t type;
+            status_t status = data.read(&type, sizeof(effect_uuid_t));
+            if (status != NO_ERROR) {
+                return status;
+            }
+            String16 opPackageName;
+            status = data.readString16(&opPackageName);
+            if (status != NO_ERROR) {
+                return status;
+            }
+            effect_uuid_t uuid;
+            status = data.read(&uuid, sizeof(effect_uuid_t));
+            if (status != NO_ERROR) {
+                return status;
+            }
+            int32_t priority = data.readInt32();
+            audio_source_t source = (audio_source_t) data.readInt32();
+            audio_unique_id_t id = 0;
+            reply->writeInt32(static_cast <int32_t>(addSourceDefaultEffect(&type,
+                                                                           opPackageName,
+                                                                           &uuid,
+                                                                           priority,
+                                                                           source,
+                                                                           &id)));
+            reply->writeInt32(id);
+            return NO_ERROR;
+        }
+
+        case REMOVE_SOURCE_DEFAULT_EFFECT: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_unique_id_t id = static_cast<audio_unique_id_t>(data.readInt32());
+            reply->writeInt32(static_cast <int32_t>(removeSourceDefaultEffect(id)));
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 5716727..c9263f4 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -1136,13 +1136,13 @@
             // This is needed in case of cold start of the output stream.
             if ((mStartTime.tv_sec != 0) && (clock_gettime(CLOCK_MONOTONIC, &stopTime) == 0)) {
                 time_t sec = stopTime.tv_sec - mStartTime.tv_sec;
-                long nsec = stopTime.tv_nsec - mStartTime.tv_nsec;
+                auto nsec = stopTime.tv_nsec - mStartTime.tv_nsec;
                 if (nsec < 0) {
                     --sec;
                     nsec += 1000000000;
                 }
 
-                if ((sec + 1) > ((long)(INT_MAX / mSamplingRate))) {
+                if ((sec + 1) > ((time_t)(INT_MAX / mSamplingRate))) {
                     mMaxSmp = sec * mSamplingRate;
                 } else {
                     // mSamplingRate is always > 1000
@@ -1257,8 +1257,8 @@
 
     AudioTrack::Buffer *buffer = static_cast<AudioTrack::Buffer *>(info);
     ToneGenerator *lpToneGen = static_cast<ToneGenerator *>(user);
-    short *lpOut = buffer->i16;
-    unsigned int lNumSmp = buffer->size/sizeof(short);
+    int16_t *lpOut = buffer->i16;
+    unsigned int lNumSmp = buffer->size/sizeof(int16_t);
     const ToneDescriptor *lpToneDesc = lpToneGen->mpToneDesc;
 
     if (buffer->size == 0) return;
@@ -1329,7 +1329,7 @@
             if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
                 lWaveCmd = WaveGenerator::WAVEGEN_STOP;
                 unsigned int lFreqIdx = 0;
-                unsigned short lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+                uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
 
                 while (lFrequency != 0) {
                     WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
@@ -1379,16 +1379,18 @@
                         lWaveCmd = WaveGenerator::WAVEGEN_START;
                     }
 
-                    ALOGV("New segment %d, Next Time: %d", lpToneGen->mCurSegment,
-                            (lpToneGen->mNextSegSmp*1000)/lpToneGen->mSamplingRate);
+                    ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
+                            ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+
 
                 } else {
                     lGenSmp = 0;
                     ALOGV("End repeat, time: %d", (unsigned int)(systemTime()/1000000));
                 }
             } else {
-                ALOGV("New segment %d, Next Time: %d", lpToneGen->mCurSegment,
-                        (lpToneGen->mNextSegSmp*1000)/lpToneGen->mSamplingRate);
+                ALOGV("New segment %d, Next Time: %lld", lpToneGen->mCurSegment,
+                        ((long long)(lpToneGen->mNextSegSmp)*1000)/lpToneGen->mSamplingRate);
+
                 if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
                     // If next segment is not silent,  OFF -> ON transition : reset wave generator
                     lWaveCmd = WaveGenerator::WAVEGEN_START;
@@ -1415,7 +1417,7 @@
         if (lGenSmp) {
             // If samples must be generated, call all active wave generators and acumulate waves in lpOut
             unsigned int lFreqIdx = 0;
-            unsigned short lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
+            uint16_t lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[lFreqIdx];
 
             while (lFrequency != 0) {
                 WaveGenerator *lpWaveGen = lpToneGen->mWaveGens.valueFor(lFrequency);
@@ -1654,17 +1656,17 @@
 //
 ////////////////////////////////////////////////////////////////////////////////
 ToneGenerator::WaveGenerator::WaveGenerator(uint32_t samplingRate,
-        unsigned short frequency, float volume) {
+        uint16_t frequency, float volume) {
     double d0;
     double F_div_Fs;  // frequency / samplingRate
 
     F_div_Fs = frequency / (double)samplingRate;
     d0 = - (float)GEN_AMP * sin(2 * M_PI * F_div_Fs);
-    mS2_0 = (short)d0;
+    mS2_0 = (int16_t)d0;
     mS1 = 0;
     mS2 = mS2_0;
 
-    mAmplitude_Q15 = (short)(32767. * 32767. * volume / GEN_AMP);
+    mAmplitude_Q15 = (int16_t)(32767. * 32767. * volume / GEN_AMP);
     // take some margin for amplitude fluctuation
     if (mAmplitude_Q15 > 32500)
         mAmplitude_Q15 = 32500;
@@ -1672,7 +1674,7 @@
     d0 = 32768.0 * cos(2 * M_PI * F_div_Fs);  // Q14*2*cos()
     if (d0 > 32767)
         d0 = 32767;
-    mA1_Q14 = (short) d0;
+    mA1_Q14 = (int16_t) d0;
 
     ALOGV("WaveGenerator init, mA1_Q14: %d, mS2_0: %d, mAmplitude_Q15: %d",
             mA1_Q14, mS2_0, mAmplitude_Q15);
@@ -1710,7 +1712,7 @@
 //        none
 //
 ////////////////////////////////////////////////////////////////////////////////
-void ToneGenerator::WaveGenerator::getSamples(short *outBuffer,
+void ToneGenerator::WaveGenerator::getSamples(int16_t *outBuffer,
         unsigned int count, unsigned int command) {
     long lS1, lS2;
     long lA1, lAmplitude;
@@ -1741,7 +1743,7 @@
             lS2 = lS1;
             lS1 = Sample;
             Sample = ((lAmplitude>>16) * Sample) >> S_Q15;
-            *(outBuffer++) += (short)Sample;  // put result in buffer
+            *(outBuffer++) += (int16_t)Sample;  // put result in buffer
             lAmplitude -= dec;
         }
     } else {
@@ -1753,7 +1755,7 @@
             lS2 = lS1;
             lS1 = Sample;
             Sample = (lAmplitude * Sample) >> S_Q15;
-            *(outBuffer++) += (short)Sample;  // put result in buffer
+            *(outBuffer++) += (int16_t)Sample;  // put result in buffer
         }
     }
 
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index bfc068b..6bd4137 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -90,27 +90,34 @@
      */
     static status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor);
 
-
     /*
-     * Returns the descriptor for the specified effect uuid.
+     * Returns a descriptor for the specified effect uuid or type.
+     *
+     * Lookup an effect by uuid, or if that's unspecified (EFFECT_UUID_NULL),
+     * do so by type and preferred flags instead.
      *
      * Parameters:
      *      uuid:       pointer to effect uuid.
+     *      type:       pointer to effect type uuid.
+     *      preferredTypeFlags: if multiple effects of the given type exist,
+     *                  one with a matching type flag will be chosen over one without.
+     *                  Use EFFECT_FLAG_TYPE_MASK to indicate no preference.
      *      descriptor: address where the effect descriptor should be returned.
      *
      * Returned status (from utils/Errors.h) can be:
      *      NO_ERROR        successful operation.
      *      PERMISSION_DENIED could not get AudioFlinger interface
      *      NO_INIT         effect library failed to initialize
-     *      BAD_VALUE       invalid uuid or descriptor pointers
+     *      BAD_VALUE       invalid type or descriptor pointers
      *      NAME_NOT_FOUND  no effect with this uuid found
      *
      * Returned value
      *   *descriptor updated with effect descriptor
      */
     static status_t getEffectDescriptor(const effect_uuid_t *uuid,
-                                        effect_descriptor_t *descriptor) /*const*/;
-
+                                        const effect_uuid_t *type,
+                                        uint32_t preferredTypeFlag,
+                                        effect_descriptor_t *descriptor);
 
     /*
      * Returns a list of descriptors corresponding to the pre processings enabled by default
@@ -144,6 +151,132 @@
                                               uint32_t *count);
 
     /*
+     * Gets a new system-wide unique effect id.
+     *
+     * Parameters:
+     *      id: The address to return the generated id.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     * Returned value
+     *   *id:  The new unique system-wide effect id.
+     */
+    static status_t newEffectUniqueId(audio_unique_id_t* id);
+
+    /*
+     * Static methods for adding/removing system-wide effects.
+     */
+
+    /*
+     * Adds an effect to the list of default output effects for a given source type.
+     *
+     * If the effect is no longer available when a source of the given type
+     * is created, the system will continue without adding it.
+     *
+     * Parameters:
+     *   typeStr:  Type uuid of effect to be a default: can be null if uuidStr is specified.
+     *             This may correspond to the OpenSL ES interface implemented by this effect,
+     *             or could be some vendor-defined type.
+     *   opPackageName: The package name used for app op checks.
+     *   uuidStr:  Uuid of effect to be a default: can be null if type is specified.
+     *             This uuid corresponds to a particular implementation of an effect type.
+     *             Note if both uuidStr and typeStr are specified, typeStr is ignored.
+     *   priority: Requested priority for effect control: the priority level corresponds to the
+     *             value of priority parameter: negative values indicate lower priorities, positive
+     *             values higher priorities, 0 being the normal priority.
+     *   source:   The source this effect should be a default for.
+     *   id:       Address where the system-wide unique id of the default effect should be returned.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     *      NO_INIT         effect library failed to initialize.
+     *      BAD_VALUE       invalid source, type uuid or implementation uuid.
+     *      NAME_NOT_FOUND  no effect with this uuid or type found.
+     *
+     * Returned value
+     *   *id:  The system-wide unique id of the added default effect.
+     */
+    static status_t addSourceDefaultEffect(const char* typeStr,
+                                           const String16& opPackageName,
+                                           const char* uuidStr,
+                                           int32_t priority,
+                                           audio_source_t source,
+                                           audio_unique_id_t* id);
+
+    /*
+     * Adds an effect to the list of default output effects for a given stream type.
+     *
+     * If the effect is no longer available when a stream of the given type
+     * is created, the system will continue without adding it.
+     *
+     * Parameters:
+     *   typeStr:  Type uuid of effect to be a default: can be null if uuidStr is specified.
+     *             This may correspond to the OpenSL ES interface implemented by this effect,
+     *             or could be some vendor-defined type.
+     *   opPackageName: The package name used for app op checks.
+     *   uuidStr:  Uuid of effect to be a default: can be null if type is specified.
+     *             This uuid corresponds to a particular implementation of an effect type.
+     *             Note if both uuidStr and typeStr are specified, typeStr is ignored.
+     *   priority: Requested priority for effect control: the priority level corresponds to the
+     *             value of priority parameter: negative values indicate lower priorities, positive
+     *             values higher priorities, 0 being the normal priority.
+     *   usage:    The usage this effect should be a default for. Unrecognized values will be
+     *             treated as AUDIO_USAGE_UNKNOWN.
+     *   id:       Address where the system-wide unique id of the default effect should be returned.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     *      NO_INIT         effect library failed to initialize.
+     *      BAD_VALUE       invalid type uuid or implementation uuid.
+     *      NAME_NOT_FOUND  no effect with this uuid or type found.
+     *
+     * Returned value
+     *   *id:  The system-wide unique id of the added default effect.
+     */
+    static status_t addStreamDefaultEffect(const char* typeStr,
+                                           const String16& opPackageName,
+                                           const char* uuidStr,
+                                           int32_t priority,
+                                           audio_usage_t usage,
+                                           audio_unique_id_t* id);
+
+    /*
+     * Removes an effect from the list of default output effects for a given source type.
+     *
+     * Parameters:
+     *      id: The system-wide unique id of the effect that should no longer be a default.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     *      NO_INIT         effect library failed to initialize.
+     *      BAD_VALUE       invalid id.
+     */
+    static status_t removeSourceDefaultEffect(audio_unique_id_t id);
+
+    /*
+     * Removes an effect from the list of default output effects for a given stream type.
+     *
+     * Parameters:
+     *      id: The system-wide unique id of the effect that should no longer be a default.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     *      NO_INIT         effect library failed to initialize.
+     *      BAD_VALUE       invalid id.
+     */
+    static status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
+    /*
      * Events used by callback function (effect_callback_t).
      */
     enum event_type {
@@ -414,6 +547,7 @@
      effect_descriptor_t     mDescriptor;        // effect descriptor
      int32_t                 mId;                // system wide unique effect engine instance ID
      Mutex                   mLock;              // Mutex for mEnabled access
+     Mutex                   mConstructLock;     // Mutex for integrality construction
 
      String16                mOpPackageName;     // The package name used for app op checks.
 
@@ -440,12 +574,22 @@
         virtual void controlStatusChanged(bool controlGranted) {
             sp<AudioEffect> effect = mEffect.promote();
             if (effect != 0) {
+                {
+                    // Got the mConstructLock means the construction of AudioEffect
+                    // has finished, we should release the mConstructLock immediately.
+                    AutoMutex lock(effect->mConstructLock);
+                }
                 effect->controlStatusChanged(controlGranted);
             }
         }
         virtual void enableStatusChanged(bool enabled) {
             sp<AudioEffect> effect = mEffect.promote();
             if (effect != 0) {
+                {
+                    // Got the mConstructLock means the construction of AudioEffect
+                    // has finished, we should release the mConstructLock immediately.
+                    AutoMutex lock(effect->mConstructLock);
+                }
                 effect->enableStatusChanged(enabled);
             }
         }
@@ -456,6 +600,11 @@
                                      void *pReplyData) {
             sp<AudioEffect> effect = mEffect.promote();
             if (effect != 0) {
+                {
+                    // Got the mConstructLock means the construction of AudioEffect
+                    // has finished, we should release the mConstructLock immediately.
+                    AutoMutex lock(effect->mConstructLock);
+                }
                 effect->commandExecuted(
                     cmdCode, cmdSize, pCmdData, replySize, pReplyData);
             }
@@ -465,6 +614,11 @@
         virtual void binderDied(const wp<IBinder>& /*who*/) {
             sp<AudioEffect> effect = mEffect.promote();
             if (effect != 0) {
+                {
+                    // Got the mConstructLock means the construction of AudioEffect
+                    // has finished, we should release the mConstructLock immediately.
+                    AutoMutex lock(effect->mConstructLock);
+                }
                 effect->binderDied();
             }
         }
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
index 73ee0a7..35d2e85 100644
--- a/media/libaudioclient/include/media/AudioPolicyHelper.h
+++ b/media/libaudioclient/include/media/AudioPolicyHelper.h
@@ -19,6 +19,43 @@
 #include <system/audio.h>
 
 static inline
+audio_stream_type_t audio_usage_to_stream_type(const audio_usage_t usage)
+{
+    switch(usage) {
+        case AUDIO_USAGE_MEDIA:
+        case AUDIO_USAGE_GAME:
+        case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        case AUDIO_USAGE_ASSISTANT:
+            return AUDIO_STREAM_MUSIC;
+        case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+            return AUDIO_STREAM_ACCESSIBILITY;
+        case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+            return AUDIO_STREAM_SYSTEM;
+        case AUDIO_USAGE_VOICE_COMMUNICATION:
+            return AUDIO_STREAM_VOICE_CALL;
+
+        case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+            return AUDIO_STREAM_DTMF;
+
+        case AUDIO_USAGE_ALARM:
+            return AUDIO_STREAM_ALARM;
+        case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+            return AUDIO_STREAM_RING;
+
+        case AUDIO_USAGE_NOTIFICATION:
+        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+        case AUDIO_USAGE_NOTIFICATION_EVENT:
+            return AUDIO_STREAM_NOTIFICATION;
+
+        case AUDIO_USAGE_UNKNOWN:
+        default:
+            return AUDIO_STREAM_MUSIC;
+    }
+}
+
+static inline
 audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
 {
     // flags to stream type mapping
@@ -30,38 +67,7 @@
     }
 
     // usage to stream type mapping
-    switch (attr->usage) {
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-    case AUDIO_USAGE_ASSISTANT:
-        return AUDIO_STREAM_MUSIC;
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        return AUDIO_STREAM_ACCESSIBILITY;
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return AUDIO_STREAM_SYSTEM;
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return AUDIO_STREAM_VOICE_CALL;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return AUDIO_STREAM_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-        return AUDIO_STREAM_ALARM;
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return AUDIO_STREAM_RING;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return AUDIO_STREAM_NOTIFICATION;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return AUDIO_STREAM_MUSIC;
-    }
+    return audio_usage_to_stream_type(attr->usage);
 }
 
 static inline
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index cf446a5..806280a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -85,7 +85,7 @@
 
         union {
             void*       raw;
-            short*      i16;        // signed 16-bit
+            int16_t*    i16;        // signed 16-bit
             int8_t*     i8;         // unsigned 8-bit, offset by 0x80
                                     // input to obtainBuffer(): unused, output: pointer to buffer
         };
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 4c0f796..adfee8b 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -224,15 +224,9 @@
                                      audio_output_flags_t flags,
                                      audio_port_handle_t *selectedDeviceId,
                                      audio_port_handle_t *portId);
-    static status_t startOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session);
-    static status_t stopOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session);
-    static void releaseOutput(audio_io_handle_t output,
-                              audio_stream_type_t stream,
-                              audio_session_t session);
+    static status_t startOutput(audio_port_handle_t portId);
+    static status_t stopOutput(audio_port_handle_t portId);
+    static void releaseOutput(audio_port_handle_t portId);
 
     // Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
     // or release it with releaseInput().
@@ -328,9 +322,9 @@
     static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
 
     static status_t startAudioSource(const struct audio_port_config *source,
-                                      const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle);
-    static status_t stopAudioSource(audio_patch_handle_t handle);
+                                     const audio_attributes_t *attributes,
+                                     audio_port_handle_t *portId);
+    static status_t stopAudioSource(audio_port_handle_t portId);
 
     static status_t setMasterMono(bool mono);
     static status_t getMasterMono(bool *mono);
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 3eb627d..59c6f4c 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -102,7 +102,7 @@
 
         union {
             void*       raw;
-            short*      i16;      // signed 16-bit
+            int16_t*    i16;      // signed 16-bit
             int8_t*     i8;       // unsigned 8-bit, offset by 0x80
         };                        // input to obtainBuffer(): unused, output: pointer to buffer
     };
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index e6bf72f..31326ab 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -428,7 +428,9 @@
     virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
 
     virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID,
-                                        effect_descriptor_t *pDescriptor) const = 0;
+                                         const effect_uuid_t *pTypeUUID,
+                                         uint32_t preferredTypeFlag,
+                                         effect_descriptor_t *pDescriptor) const = 0;
 
     virtual sp<IEffect> createEffect(
                                     effect_descriptor_t *pDesc,
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index c3876af..fdd8d57 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -66,15 +66,9 @@
                                       audio_output_flags_t flags,
                                       audio_port_handle_t *selectedDeviceId,
                                       audio_port_handle_t *portId) = 0;
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session) = 0;
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session) = 0;
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session) = 0;
+    virtual status_t startOutput(audio_port_handle_t portId) = 0;
+    virtual status_t stopOutput(audio_port_handle_t portId) = 0;
+    virtual void releaseOutput(audio_port_handle_t portId) = 0;
     virtual status_t  getInputForAttr(const audio_attributes_t *attr,
                               audio_io_handle_t *input,
                               audio_session_t session,
@@ -115,6 +109,20 @@
     virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count) = 0;
+    virtual status_t addSourceDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_source_t source,
+                                            audio_unique_id_t* id) = 0;
+    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_usage_t usage,
+                                            audio_unique_id_t* id) = 0;
+    virtual status_t removeSourceDefaultEffect(audio_unique_id_t id) = 0;
+    virtual status_t removeStreamDefaultEffect(audio_unique_id_t id) = 0;
    // Check if offload is possible for given format, stream type, sample rate,
     // bit rate, duration, video and streaming or offload property is enabled
     virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
@@ -159,8 +167,8 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle) = 0;
-    virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
+                                      audio_port_handle_t *portId) = 0;
+    virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
diff --git a/media/libaudioclient/include/media/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
index 247703f..e0e3bb1 100644
--- a/media/libaudioclient/include/media/ToneGenerator.h
+++ b/media/libaudioclient/include/media/ToneGenerator.h
@@ -256,9 +256,9 @@
     class ToneSegment {
     public:
         unsigned int duration;
-        unsigned short waveFreq[TONEGEN_MAX_WAVES+1];
-        unsigned short loopCnt;
-        unsigned short loopIndx;
+        uint16_t waveFreq[TONEGEN_MAX_WAVES+1];
+        uint16_t loopCnt;
+        uint16_t loopIndx;
     };
 
     class ToneDescriptor {
@@ -279,14 +279,14 @@
     unsigned int mMaxSmp;  // Maximum number of audio samples played (maximun tone duration)
     int mDurationMs;  // Maximum tone duration in ms
 
-    unsigned short mCurSegment;  // Current segment index in ToneDescriptor segments[]
-    unsigned short mCurCount;  // Current sequence repeat count
-    volatile unsigned short mState;  // ToneGenerator state (tone_state)
-    unsigned short mRegion;
+    uint16_t mCurSegment;  // Current segment index in ToneDescriptor segments[]
+    uint16_t mCurCount;  // Current sequence repeat count
+    volatile uint16_t mState;  // ToneGenerator state (tone_state)
+    uint16_t mRegion;
     const ToneDescriptor *mpToneDesc;  // pointer to active tone descriptor
     const ToneDescriptor *mpNewToneDesc;  // pointer to next active tone descriptor
 
-    unsigned short mLoopCounter; // Current tone loopback count
+    uint16_t mLoopCounter; // Current tone loopback count
 
     uint32_t mSamplingRate;  // AudioFlinger Sampling rate
     sp<AudioTrack> mpAudioTrack;  // Pointer to audio track used for playback
@@ -314,26 +314,26 @@
             WAVEGEN_STOP  // Stop wave on zero crossing
         };
 
-        WaveGenerator(uint32_t samplingRate, unsigned short frequency,
+        WaveGenerator(uint32_t samplingRate, uint16_t frequency,
                 float volume);
         ~WaveGenerator();
 
-        void getSamples(short *outBuffer, unsigned int count,
+        void getSamples(int16_t *outBuffer, unsigned int count,
                 unsigned int command);
 
     private:
-        static const short GEN_AMP = 32000;  // amplitude of generator
-        static const short S_Q14 = 14;  // shift for Q14
-        static const short S_Q15 = 15;  // shift for Q15
+        static const int16_t GEN_AMP = 32000;  // amplitude of generator
+        static const int16_t S_Q14 = 14;  // shift for Q14
+        static const int16_t S_Q15 = 15;  // shift for Q15
 
-        short mA1_Q14;  // Q14 coefficient
+        int16_t mA1_Q14;  // Q14 coefficient
         // delay line of full amplitude generator
         long mS1, mS2;  // delay line S2 oldest
-        short mS2_0;  // saved value for reinitialisation
-        short mAmplitude_Q15;  // Q15 amplitude
+        int16_t mS2_0;  // saved value for reinitialisation
+        int16_t mAmplitude_Q15;  // Q15 amplitude
     };
 
-    KeyedVector<unsigned short, WaveGenerator *> mWaveGens;  // list of active wave generators.
+    KeyedVector<uint16_t, WaveGenerator *> mWaveGens;  // list of active wave generators.
 };
 
 }
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index b23e018..bfa80e8 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -192,7 +192,17 @@
                     const native_handle *handle = hidlInfo.sharedMemory.handle();
                     if (handle->numFds > 0) {
                         info->shared_memory_fd = handle->data[0];
+#if MAJOR_VERSION == 4
+                        info->flags = audio_mmap_buffer_flag(hidlInfo.flags);
+#endif
                         info->buffer_size_frames = hidlInfo.bufferSizeFrames;
+                        // Negative buffer size frame was a hack in O and P to
+                        // indicate that the buffer is shareable to applications
+                        if (info->buffer_size_frames < 0) {
+                            info->buffer_size_frames *= -1;
+                            info->flags = audio_mmap_buffer_flag(
+                                    info->flags | AUDIO_MMAP_APPLICATION_SHAREABLE);
+                        }
                         info->burst_size_frames = hidlInfo.burstSizeFrames;
                         // info->shared_memory_address is not needed in HIDL context
                         info->shared_memory_address = NULL;
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index f6f817a..f3ea826 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -875,7 +875,7 @@
                 t->hook = Track::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
                         t->mMixerInFormat, t->mMixerFormat);
                 ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                        "Track %d needs downmix + resample", i);
+                        "Track %d needs downmix + resample", name);
             } else {
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
                     t->hook = Track::getTrackHook(
@@ -890,7 +890,7 @@
                     t->hook = Track::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
                             t->mMixerInFormat, t->mMixerFormat);
                     ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                            "Track %d needs downmix", i);
+                            "Track %d needs downmix", name);
                 }
             }
         }
diff --git a/media/libaudioprocessing/AudioResamplerDyn.h b/media/libaudioprocessing/AudioResamplerDyn.h
index 92144d0..479142e 100644
--- a/media/libaudioprocessing/AudioResamplerDyn.h
+++ b/media/libaudioprocessing/AudioResamplerDyn.h
@@ -55,6 +55,11 @@
     virtual size_t resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
 
+    void reset() override {
+        AudioResampler::reset();
+        mInBuffer.reset();
+    }
+
     // Make available key design criteria for testing
     int getHalfLength() const {
         return mConstants.mHalfNumCoefs;
diff --git a/media/libaudioprocessing/include/media/AudioResamplerPublic.h b/media/libaudioprocessing/include/media/AudioResamplerPublic.h
index 055f724..50ca33d 100644
--- a/media/libaudioprocessing/include/media/AudioResamplerPublic.h
+++ b/media/libaudioprocessing/include/media/AudioResamplerPublic.h
@@ -104,8 +104,8 @@
         const AudioPlaybackRate &pr2) {
     return fabs(pr1.mSpeed - pr2.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
            fabs(pr1.mPitch - pr2.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA &&
-           pr2.mStretchMode == pr2.mStretchMode &&
-           pr2.mFallbackMode == pr2.mFallbackMode;
+           pr1.mStretchMode == pr2.mStretchMode &&
+           pr1.mFallbackMode == pr2.mFallbackMode;
 }
 
 static inline bool isAudioPlaybackRateValid(const AudioPlaybackRate &playbackRate) {
diff --git a/media/libeffects/config/include/media/EffectsConfig.h b/media/libeffects/config/include/media/EffectsConfig.h
index 55b946f..fa0415b 100644
--- a/media/libeffects/config/include/media/EffectsConfig.h
+++ b/media/libeffects/config/include/media/EffectsConfig.h
@@ -96,7 +96,7 @@
     /** Parsed config, nullptr if the xml lib could not load the file */
     std::unique_ptr<Config> parsedConfig;
     size_t nbSkippedElement; //< Number of skipped invalid library, effect or processing chain
-    const char* configPath; //< Path to the loaded configuration
+    const std::string configPath; //< Path to the loaded configuration
 };
 
 /** Parses the provided effect configuration.
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index d79501f..351b1ee 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -250,14 +250,14 @@
     return true;
 }
 
-/** Internal version of the public parse(const char* path) with precondition `path != nullptr`. */
-ParsingResult parseWithPath(const char* path) {
+/** Internal version of the public parse(const char* path) where path always exist. */
+ParsingResult parseWithPath(std::string&& path) {
     XMLDocument doc;
-    doc.LoadFile(path);
+    doc.LoadFile(path.c_str());
     if (doc.Error()) {
-        ALOGE("Failed to parse %s: Tinyxml2 error (%d): %s", path,
+        ALOGE("Failed to parse %s: Tinyxml2 error (%d): %s", path.c_str(),
               doc.ErrorID(), doc.ErrorStr());
-        return {nullptr, 0, path};
+        return {nullptr, 0, std::move(path)};
     }
 
     auto config = std::make_unique<Config>();
@@ -295,7 +295,7 @@
             }
         }
     }
-    return {std::move(config), nbSkippedElements, path};
+    return {std::move(config), nbSkippedElements, std::move(path)};
 }
 
 }; // namespace
@@ -310,14 +310,14 @@
         if (access(defaultPath.c_str(), R_OK) != 0) {
             continue;
         }
-        auto result = parseWithPath(defaultPath.c_str());
+        auto result = parseWithPath(std::move(defaultPath));
         if (result.parsedConfig != nullptr) {
             return result;
         }
     }
 
     ALOGE("Could not parse effect configuration in any of the default locations.");
-    return {nullptr, 0, nullptr};
+    return {nullptr, 0, ""};
 }
 
 } // namespace effectsConfig
diff --git a/media/libeffects/factory/EffectsXmlConfigLoader.cpp b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
index 7a7d431..052a88b 100644
--- a/media/libeffects/factory/EffectsXmlConfigLoader.cpp
+++ b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
@@ -327,7 +327,8 @@
                                            &gSkippedEffects, &gSubEffectList);
 
     ALOGE_IF(result.nbSkippedElement != 0, "%zu errors during loading of configuration: %s",
-             result.nbSkippedElement, result.configPath ?: "No config file found");
+             result.nbSkippedElement,
+             result.configPath.empty() ? "No config file found" : result.configPath.c_str());
 
     return result.nbSkippedElement;
 }
diff --git a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
index 9d29cf1..d61efd3 100644
--- a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
+++ b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
@@ -30,6 +30,26 @@
 #include <audio_effects/effect_loudnessenhancer.h>
 #include "dsp/core/dynamic_range_compression.h"
 
+// BUILD_FLOAT targets building a float effect instead of the legacy int16_t effect.
+#define BUILD_FLOAT
+
+#ifdef BUILD_FLOAT
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_FLOAT;
+
+#else
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+static inline int16_t clamp16(int32_t sample)
+{
+    if ((sample>>15) ^ (sample>>31))
+        sample = 0x7FFF ^ (sample>>31);
+    return sample;
+}
+
+#endif // BUILD_FLOAT
+
 extern "C" {
 
 // effect_handle_t interface implementation for LE effect
@@ -80,13 +100,6 @@
     }
 }
 
-static inline int16_t clamp16(int32_t sample)
-{
-    if ((sample>>15) ^ (sample>>31))
-        sample = 0x7FFF ^ (sample>>31);
-    return sample;
-}
-
 //----------------------------------------------------------------------------
 // LE_setConfig()
 //----------------------------------------------------------------------------
@@ -111,7 +124,7 @@
     if (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) return -EINVAL;
     if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
             pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
-    if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) return -EINVAL;
+    if (pConfig->inputCfg.format != kProcessFormat) return -EINVAL;
 
     pContext->mConfig = *pConfig;
 
@@ -159,7 +172,7 @@
 
     pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
     pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.inputCfg.format = kProcessFormat;
     pContext->mConfig.inputCfg.samplingRate = 44100;
     pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -167,7 +180,7 @@
     pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
     pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
     pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.outputCfg.format = kProcessFormat;
     pContext->mConfig.outputCfg.samplingRate = 44100;
     pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -284,18 +297,41 @@
 
     //ALOGV("LE about to process %d samples", inBuffer->frameCount);
     uint16_t inIdx;
+#ifdef BUILD_FLOAT
+    constexpr float scale = 1 << 15; // power of 2 is lossless conversion to int16_t range
+    constexpr float inverseScale = 1.f / scale;
+    const float inputAmp = pow(10, pContext->mTargetGainmB/2000.0f) * scale;
+#else
     float inputAmp = pow(10, pContext->mTargetGainmB/2000.0f);
+#endif
     float leftSample, rightSample;
     for (inIdx = 0 ; inIdx < inBuffer->frameCount ; inIdx++) {
         // makeup gain is applied on the input of the compressor
+#ifdef BUILD_FLOAT
+        leftSample  = inputAmp * inBuffer->f32[2*inIdx];
+        rightSample = inputAmp * inBuffer->f32[2*inIdx +1];
+        pContext->mCompressor->Compress(&leftSample, &rightSample);
+        inBuffer->f32[2*inIdx]    = leftSample * inverseScale;
+        inBuffer->f32[2*inIdx +1] = rightSample * inverseScale;
+#else
         leftSample  = inputAmp * (float)inBuffer->s16[2*inIdx];
         rightSample = inputAmp * (float)inBuffer->s16[2*inIdx +1];
         pContext->mCompressor->Compress(&leftSample, &rightSample);
         inBuffer->s16[2*inIdx]    = (int16_t) leftSample;
         inBuffer->s16[2*inIdx +1] = (int16_t) rightSample;
+#endif // BUILD_FLOAT
     }
 
     if (inBuffer->raw != outBuffer->raw) {
+#ifdef BUILD_FLOAT
+        if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+            for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
+                outBuffer->f32[i] += inBuffer->f32[i];
+            }
+        } else {
+            memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(float));
+        }
+#else
         if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
             for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
                 outBuffer->s16[i] = clamp16(outBuffer->s16[i] + inBuffer->s16[i]);
@@ -303,6 +339,7 @@
         } else {
             memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(int16_t));
         }
+#endif // BUILD_FLOAT
     }
     if (pContext->mState != LOUDNESS_ENHANCER_STATE_ACTIVE) {
         return -ENODATA;
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 04c2692..53d266a 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -1198,13 +1198,7 @@
     for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
         if (MemTab.Region[i].Size != 0){
             if (MemTab.Region[i].pBaseAddress != NULL){
-                ALOGV("\tLvmEffect_free - START freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
-
                 free(MemTab.Region[i].pBaseAddress);
-
-                ALOGV("\tLvmEffect_free - END   freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }else{
                 ALOGV("\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer %" PRIu32
                         " bytes for region %u at %p ERROR\n",
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index e1c03f9..686ec4c 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -612,13 +612,7 @@
     for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
         if (MemTab.Region[i].Size != 0){
             if (MemTab.Region[i].pBaseAddress != NULL){
-                ALOGV("\tfree() - START freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
-
                 free(MemTab.Region[i].pBaseAddress);
-
-                ALOGV("\tfree() - END   freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }else{
                 ALOGV("\tLVM_ERROR : free() - trying to free with NULL pointer %" PRIu32 " bytes "
                         "for region %u at %p ERROR\n",
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index f2844ed..b914f4b 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -889,7 +889,7 @@
         delete session->procFrame;
         session->procFrame = NULL;
         delete session->apm;
-        session->apm = NULL;
+        session->apm = NULL; // NOLINT(clang-analyzer-cplusplus.NewDelete)
     }
     return status;
 }
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index 70409de..3534149 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -19,7 +19,8 @@
 LOCAL_MODULE:= libvisualizer
 
 LOCAL_C_INCLUDES := \
-	$(call include-path-for, audio-effects)
+	$(call include-path-for, audio-effects) \
+	$(call include-path-for, audio-utils)
 
 
 LOCAL_HEADER_LIBRARIES += libhardware_headers
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 807f24d..e2ccfb7 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -24,11 +24,25 @@
 #include <string.h>
 #include <time.h>
 
+#include <algorithm> // max
 #include <new>
 
 #include <log/log.h>
 
 #include <audio_effects/effect_visualizer.h>
+#include <audio_utils/primitives.h>
+
+#define BUILD_FLOAT
+
+#ifdef BUILD_FLOAT
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_FLOAT;
+
+#else
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+#endif // BUILD_FLOAT
 
 extern "C" {
 
@@ -146,7 +160,7 @@
     if (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) return -EINVAL;
     if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
             pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
-    if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) return -EINVAL;
+    if (pConfig->inputCfg.format != kProcessFormat) return -EINVAL;
 
     pContext->mConfig = *pConfig;
 
@@ -192,7 +206,7 @@
 {
     pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
     pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.inputCfg.format = kProcessFormat;
     pContext->mConfig.inputCfg.samplingRate = 44100;
     pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -200,7 +214,7 @@
     pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
     pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
     pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.outputCfg.format = kProcessFormat;
     pContext->mConfig.outputCfg.samplingRate = 44100;
     pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -301,15 +315,8 @@
 //--- Effect Control Interface Implementation
 //
 
-static inline int16_t clamp16(int32_t sample)
-{
-    if ((sample>>15) ^ (sample>>31))
-        sample = 0x7FFF ^ (sample>>31);
-    return sample;
-}
-
 int Visualizer_process(
-        effect_handle_t self,audio_buffer_t *inBuffer, audio_buffer_t *outBuffer)
+        effect_handle_t self, audio_buffer_t *inBuffer, audio_buffer_t *outBuffer)
 {
     VisualizerContext * pContext = (VisualizerContext *)self;
 
@@ -324,20 +331,28 @@
         return -EINVAL;
     }
 
+    const size_t sampleLen = inBuffer->frameCount * pContext->mChannelCount;
+
     // perform measurements if needed
     if (pContext->mMeasurementMode & MEASUREMENT_MODE_PEAK_RMS) {
         // find the peak and RMS squared for the new buffer
-        uint32_t inIdx;
-        int16_t maxSample = 0;
         float rmsSqAcc = 0;
-        for (inIdx = 0 ; inIdx < inBuffer->frameCount * pContext->mChannelCount ; inIdx++) {
-            if (inBuffer->s16[inIdx] > maxSample) {
-                maxSample = inBuffer->s16[inIdx];
-            } else if (-inBuffer->s16[inIdx] > maxSample) {
-                maxSample = -inBuffer->s16[inIdx];
-            }
-            rmsSqAcc += (inBuffer->s16[inIdx] * inBuffer->s16[inIdx]);
+
+#ifdef BUILD_FLOAT
+        float maxSample = 0.f;
+        for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+            maxSample = fmax(maxSample, fabs(inBuffer->f32[inIdx]));
+            rmsSqAcc += inBuffer->f32[inIdx] * inBuffer->f32[inIdx];
         }
+        maxSample *= 1 << 15; // scale to int16_t, with exactly 1 << 15 representing positive num.
+        rmsSqAcc *= 1 << 30; // scale to int16_t * 2
+#else
+        int maxSample = 0;
+        for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+            maxSample = std::max(maxSample, std::abs(int32_t(inBuffer->s16[inIdx])));
+            rmsSqAcc += inBuffer->s16[inIdx] * inBuffer->s16[inIdx];
+        }
+#endif
         // store the measurement
         pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mPeakU16 = (uint16_t)maxSample;
         pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mRmsSquared =
@@ -348,32 +363,59 @@
         }
     }
 
-    // all code below assumes stereo 16 bit PCM output and input
+#ifdef BUILD_FLOAT
+    float fscale; // multiplicative scale
+#else
     int32_t shift;
+#endif // BUILD_FLOAT
 
     if (pContext->mScalingMode == VISUALIZER_SCALING_MODE_NORMALIZED) {
         // derive capture scaling factor from peak value in current buffer
         // this gives more interesting captures for display.
-        shift = 32;
-        int len = inBuffer->frameCount * 2;
-        for (int i = 0; i < len; i++) {
+
+#ifdef BUILD_FLOAT
+        float maxSample = 0.f;
+        for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+            maxSample = fmax(maxSample, fabs(inBuffer->f32[inIdx]));
+        }
+        if (maxSample > 0.f) {
+            constexpr float halfish = 127.f / 256.f;
+            fscale = halfish / maxSample;
+            int exp; // unused
+            const float significand = frexp(fscale, &exp);
+            if (significand == 0.5f) {
+                fscale *= 255.f / 256.f; // avoid returning unaltered PCM signal
+            }
+        } else {
+            // scale doesn't matter, the values are all 0.
+            fscale = 1.f;
+        }
+#else
+        int32_t orAccum = 0;
+        for (size_t i = 0; i < sampleLen; ++i) {
             int32_t smp = inBuffer->s16[i];
             if (smp < 0) smp = -smp - 1; // take care to keep the max negative in range
-            int32_t clz = __builtin_clz(smp);
-            if (shift > clz) shift = clz;
+            orAccum |= smp;
         }
+
         // A maximum amplitude signal will have 17 leading zeros, which we want to
         // translate to a shift of 8 (for converting 16 bit to 8 bit)
-        shift = 25 - shift;
+        shift = 25 - __builtin_clz(orAccum);
+
         // Never scale by less than 8 to avoid returning unaltered PCM signal.
         if (shift < 3) {
             shift = 3;
         }
         // add one to combine the division by 2 needed after summing left and right channels below
         shift++;
+#endif // BUILD_FLOAT
     } else {
         assert(pContext->mScalingMode == VISUALIZER_SCALING_MODE_AS_PLAYED);
+#ifdef BUILD_FLOAT
+        fscale = 0.5f;  // default divide by 2 to account for sum of L + R.
+#else
         shift = 9;
+#endif // BUILD_FLOAT
     }
 
     uint32_t captIdx;
@@ -386,9 +428,13 @@
             // wrap around
             captIdx = 0;
         }
-        int32_t smp = inBuffer->s16[2 * inIdx] + inBuffer->s16[2 * inIdx + 1];
-        smp = smp >> shift;
+#ifdef BUILD_FLOAT
+        const float smp = (inBuffer->f32[2 * inIdx] + inBuffer->f32[2 * inIdx + 1]) * fscale;
+        buf[captIdx] = clamp8_from_float(smp);
+#else
+        const int32_t smp = (inBuffer->s16[2 * inIdx] + inBuffer->s16[2 * inIdx + 1]) >> shift;
         buf[captIdx] = ((uint8_t)smp)^0x80;
+#endif // BUILD_FLOAT
     }
 
     // XXX the following two should really be atomic, though it probably doesn't
@@ -400,6 +446,15 @@
     }
 
     if (inBuffer->raw != outBuffer->raw) {
+#ifdef BUILD_FLOAT
+        if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+            for (size_t i = 0; i < sampleLen; ++i) {
+                outBuffer->f32[i] += inBuffer->f32[i];
+            }
+        } else {
+            memcpy(outBuffer->raw, inBuffer->raw, sampleLen * sizeof(float));
+        }
+#else
         if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
             for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
                 outBuffer->s16[i] = clamp16(outBuffer->s16[i] + inBuffer->s16[i]);
@@ -407,6 +462,7 @@
         } else {
             memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(int16_t));
         }
+#endif // BUILD_FLOAT
     }
     if (pContext->mState != VISUALIZER_STATE_ACTIVE) {
         return -ENODATA;
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 01f014f..a977300 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -86,7 +86,7 @@
     sp<IMemory> mMemory;
     std::unique_ptr<HeifStream> mStream;
     bool mEOS;
-    std::unique_ptr<uint8_t> mCache;
+    std::unique_ptr<uint8_t[]> mCache;
     off64_t mCachedOffset;
     size_t mCachedSize;
     size_t mCacheBufferSize;
@@ -165,7 +165,7 @@
         // it's reaching max cache buffer size, need to roll window, and possibly
         // expand the cache buffer.
         size_t newCacheBufferSize = mCacheBufferSize;
-        std::unique_ptr<uint8_t> newCache;
+        std::unique_ptr<uint8_t[]> newCache;
         uint8_t* dst = mCache.get();
         if (newCacheBufferSize < kMaxCacheBufferSize) {
             newCacheBufferSize = kMaxCacheBufferSize;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index a22819a..e6d6b3e 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -20,7 +20,7 @@
     vndk: {
         enabled: true,
     },
-    srcs: ["AudioParameter.cpp", "TypeConverter.cpp", "TimeCheck.cpp"],
+    srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
     cflags: [
         "-Werror",
         "-Wno-error=deprecated-declarations",
diff --git a/media/libmedia/IMediaExtractorService.cpp b/media/libmedia/IMediaExtractorService.cpp
index d7533ca..0295abc 100644
--- a/media/libmedia/IMediaExtractorService.cpp
+++ b/media/libmedia/IMediaExtractorService.cpp
@@ -23,7 +23,6 @@
 #include <sys/types.h>
 #include <binder/Parcel.h>
 #include <media/IMediaExtractorService.h>
-#include <media/MediaExtractor.h>
 
 namespace android {
 
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index aade69a..e0f5a40 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -546,8 +546,8 @@
 
             if (info->mHasRefProfile) {
 
-                CamcorderProfile *profile =
-                    new CamcorderProfile(
+                std::unique_ptr<CamcorderProfile> profile =
+                    std::make_unique<CamcorderProfile>(
                             *mCamcorderProfiles[info->mRefProfileIndex]);
 
                 // Overwrite the quality
@@ -581,7 +581,7 @@
                         mCamcorderProfiles[info->mRefProfileIndex]->mQuality,
                         profile->mQuality, cameraId);
 
-                mCamcorderProfiles.add(profile);
+                mCamcorderProfiles.add(profile.release());
             }
         }
     }
diff --git a/media/libmedia/MediaUtils.cpp b/media/libmedia/MediaUtils.cpp
index bcdc3bd..320c7a9 100644
--- a/media/libmedia/MediaUtils.cpp
+++ b/media/libmedia/MediaUtils.cpp
@@ -34,7 +34,7 @@
     size_t percentageOfTotalMem) {
 
     if (running_with_asan()) {
-        ALOGW("Running with ASan, skip enforcing memory limitations.");
+        ALOGW("Running with (HW)ASan, skip enforcing memory limitations.");
         return;
     }
 
diff --git a/media/libmedia/MediaUtils.h b/media/libmedia/MediaUtils.h
index a678bcc..26075c4 100644
--- a/media/libmedia/MediaUtils.h
+++ b/media/libmedia/MediaUtils.h
@@ -20,9 +20,10 @@
 namespace android {
 
 extern "C" void __asan_init(void) __attribute__((weak));
+extern "C" void __hwasan_init(void) __attribute__((weak));
 
 static inline int running_with_asan() {
-    return &__asan_init != 0;
+    return &__asan_init != 0 || &__hwasan_init != 0;
 }
 
 /**
diff --git a/media/libmedia/MidiIoWrapper.cpp b/media/libmedia/MidiIoWrapper.cpp
index 5ca3b48..1150d61 100644
--- a/media/libmedia/MidiIoWrapper.cpp
+++ b/media/libmedia/MidiIoWrapper.cpp
@@ -23,6 +23,7 @@
 #include <fcntl.h>
 
 #include <media/MidiIoWrapper.h>
+#include <media/MediaExtractorPluginApi.h>
 
 static int readAt(void *handle, void *buffer, int pos, int size) {
     return ((android::MidiIoWrapper*)handle)->readAt(buffer, pos, size);
@@ -61,6 +62,51 @@
     }
 }
 
+class DataSourceUnwrapper : public DataSourceBase {
+
+public:
+    explicit DataSourceUnwrapper(CDataSource *csource) {
+        mSource = csource;
+    }
+    virtual status_t initCheck() const { return OK; }
+
+    // Returns the number of bytes read, or -1 on failure. It's not an error if
+    // this returns zero; it just means the given offset is equal to, or
+    // beyond, the end of the source.
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
+        return mSource->readAt(mSource->handle, offset, data, size);
+    }
+
+    // May return ERROR_UNSUPPORTED.
+    virtual status_t getSize(off64_t *size) {
+        return mSource->getSize(mSource->handle, size);
+    }
+
+    virtual bool getUri(char * /*uriString*/, size_t /*bufferSize*/) {
+        return false;
+    }
+
+    virtual uint32_t flags() {
+        return 0;
+    }
+
+    virtual void close() {};
+private:
+    CDataSource *mSource;
+};
+
+MidiIoWrapper::MidiIoWrapper(CDataSource *csource) {
+    ALOGV("MidiIoWrapper(CDataSource)");
+    mFd = -1;
+    mDataSource = new DataSourceUnwrapper(csource);
+    off64_t l;
+    if (mDataSource->getSize(&l) == OK) {
+        mLength = l;
+    } else {
+        mLength = 0;
+    }
+}
+
 MidiIoWrapper::~MidiIoWrapper() {
     ALOGV("~MidiIoWrapper");
     if (mFd >= 0) {
diff --git a/media/libmedia/include/media/MidiIoWrapper.h b/media/libmedia/include/media/MidiIoWrapper.h
index b5e565e..6309dda 100644
--- a/media/libmedia/include/media/MidiIoWrapper.h
+++ b/media/libmedia/include/media/MidiIoWrapper.h
@@ -23,11 +23,14 @@
 
 namespace android {
 
+struct CDataSource;
+
 class MidiIoWrapper {
 public:
     explicit MidiIoWrapper(const char *path);
     explicit MidiIoWrapper(int fd, off64_t offset, int64_t size);
     explicit MidiIoWrapper(DataSourceBase *source);
+    explicit MidiIoWrapper(CDataSource *csource);
 
     ~MidiIoWrapper();
 
diff --git a/media/libmediaextractor/Android.bp b/media/libmediaextractor/Android.bp
index b9b47cd..0208ad4 100644
--- a/media/libmediaextractor/Android.bp
+++ b/media/libmediaextractor/Android.bp
@@ -29,7 +29,6 @@
         "MediaBufferGroup.cpp",
         "MediaSource.cpp",
         "MediaTrack.cpp",
-        "MediaExtractor.cpp",
         "MetaData.cpp",
         "MetaDataBase.cpp",
         "VorbisComment.cpp",
diff --git a/media/libmediaextractor/MediaExtractor.cpp b/media/libmediaextractor/MediaExtractor.cpp
deleted file mode 100644
index a6b3dc9..0000000
--- a/media/libmediaextractor/MediaExtractor.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaExtractor"
-#include <utils/Log.h>
-#include <pwd.h>
-
-#include <media/MediaExtractor.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-MediaExtractor::MediaExtractor() {
-    if (!LOG_NDEBUG) {
-        uid_t uid = getuid();
-        struct passwd *pw = getpwuid(uid);
-        ALOGV("extractor created in uid: %d (%s)", getuid(), pw->pw_name);
-    }
-}
-
-MediaExtractor::~MediaExtractor() {}
-
-uint32_t MediaExtractor::flags() const {
-    return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_PAUSE | CAN_SEEK;
-}
-
-}  // namespace android
diff --git a/media/libmediaextractor/include/media/DataSource.h b/media/libmediaextractor/include/media/DataSource.h
index 0e59f39..cb96ff5 100644
--- a/media/libmediaextractor/include/media/DataSource.h
+++ b/media/libmediaextractor/include/media/DataSource.h
@@ -22,6 +22,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/DataSourceBase.h>
 #include <media/IDataSource.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <utils/threads.h>
@@ -34,7 +35,7 @@
 
 class DataSource : public DataSourceBase, public virtual RefBase {
 public:
-    DataSource() {}
+    DataSource() : mWrapper(NULL) {}
 
     // returns a pointer to IDataSource if it is wrapped.
     virtual sp<IDataSource> getIDataSource() const {
@@ -69,10 +70,35 @@
         return String8("application/octet-stream");
     }
 
+    CDataSource *wrap() {
+        if (mWrapper) {
+            return mWrapper;
+        }
+        mWrapper = new CDataSource();
+        mWrapper->handle = this;
+
+        mWrapper->readAt = [](void *handle, off64_t offset, void *data, size_t size) -> ssize_t {
+            return ((DataSource*)handle)->readAt(offset, data, size);
+        };
+        mWrapper->getSize = [](void *handle, off64_t *size) -> status_t {
+            return ((DataSource*)handle)->getSize(size);
+        };
+        mWrapper->flags = [](void *handle) -> uint32_t {
+            return ((DataSource*)handle)->flags();
+        };
+        mWrapper->getUri = [](void *handle, char *uriString, size_t bufferSize) -> bool {
+            return ((DataSource*)handle)->getUri(uriString, bufferSize);
+        };
+        return mWrapper;
+    }
+
 protected:
-    virtual ~DataSource() {}
+    virtual ~DataSource() {
+        delete mWrapper;
+    }
 
 private:
+    CDataSource *mWrapper;
     DataSource(const DataSource &);
     DataSource &operator=(const DataSource &);
 };
diff --git a/media/libmediaextractor/include/media/MediaExtractor.h b/media/libmediaextractor/include/media/MediaExtractor.h
deleted file mode 100644
index 4ba98da..0000000
--- a/media/libmediaextractor/include/media/MediaExtractor.h
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_EXTRACTOR_H_
-
-#define MEDIA_EXTRACTOR_H_
-
-#include <stdio.h>
-#include <vector>
-
-#include <utils/Errors.h>
-#include <utils/Log.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-class DataSourceBase;
-class MetaDataBase;
-struct MediaTrack;
-
-
-class ExtractorAllocTracker {
-public:
-    ExtractorAllocTracker() {
-        ALOGD("extractor allocated: %p", this);
-    }
-    virtual ~ExtractorAllocTracker() {
-        ALOGD("extractor freed: %p", this);
-    }
-};
-
-
-class MediaExtractor
-// : public ExtractorAllocTracker
-{
-public:
-    virtual ~MediaExtractor();
-    virtual size_t countTracks() = 0;
-    virtual MediaTrack *getTrack(size_t index) = 0;
-
-    enum GetTrackMetaDataFlags {
-        kIncludeExtensiveMetaData = 1
-    };
-    virtual status_t getTrackMetaData(
-            MetaDataBase& meta,
-            size_t index, uint32_t flags = 0) = 0;
-
-    // Return container specific meta-data. The default implementation
-    // returns an empty metadata object.
-    virtual status_t getMetaData(MetaDataBase& meta) = 0;
-
-    enum Flags {
-        CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
-        CAN_SEEK_FORWARD   = 2,  // the "seek 10secs forward button"
-        CAN_PAUSE          = 4,
-        CAN_SEEK           = 8,  // the "seek bar"
-    };
-
-    // If subclasses do _not_ override this, the default is
-    // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
-    virtual uint32_t flags() const;
-
-    virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
-        return INVALID_OPERATION;
-    }
-
-    virtual const char * name() { return "<unspecified>"; }
-
-    typedef MediaExtractor* (*CreatorFunc)(
-            DataSourceBase *source, void *meta);
-    typedef void (*FreeMetaFunc)(void *meta);
-
-    // The sniffer can optionally fill in an opaque object, "meta", that helps
-    // the corresponding extractor initialize its state without duplicating
-    // effort already exerted by the sniffer. If "freeMeta" is given, it will be
-    // called against the opaque object when it is no longer used.
-    typedef CreatorFunc (*SnifferFunc)(
-            DataSourceBase *source, float *confidence,
-            void **meta, FreeMetaFunc *freeMeta);
-
-    typedef struct {
-        const uint8_t b[16];
-    } uuid_t;
-
-    typedef struct {
-        // version number of this structure
-        const uint32_t def_version;
-
-        // A unique identifier for this extractor.
-        // See below for a convenience macro to create this from a string.
-        uuid_t extractor_uuid;
-
-        // Version number of this extractor. When two extractors with the same
-        // uuid are encountered, the one with the largest version number will
-        // be used.
-        const uint32_t extractor_version;
-
-        // a human readable name
-        const char *extractor_name;
-
-        // the sniffer function
-        const SnifferFunc sniff;
-    } ExtractorDef;
-
-    static const uint32_t EXTRACTORDEF_VERSION = 1;
-
-    typedef ExtractorDef (*GetExtractorDef)();
-
-protected:
-    MediaExtractor();
-
-private:
-    MediaExtractor(const MediaExtractor &);
-    MediaExtractor &operator=(const MediaExtractor &);
-};
-
-// purposely not defined anywhere so that this will fail to link if
-// expressions below are not evaluated at compile time
-int invalid_uuid_string(const char *);
-
-template <typename T, size_t N>
-constexpr uint8_t _digitAt_(const T (&s)[N], const size_t n) {
-    return s[n] >= '0' && s[n] <= '9' ? s[n] - '0'
-            : s[n] >= 'a' && s[n] <= 'f' ? s[n] - 'a' + 10
-                    : s[n] >= 'A' && s[n] <= 'F' ? s[n] - 'A' + 10
-                            : invalid_uuid_string("uuid: bad digits");
-}
-
-template <typename T, size_t N>
-constexpr uint8_t _hexByteAt_(const T (&s)[N], size_t n) {
-    return (_digitAt_(s, n) << 4) + _digitAt_(s, n + 1);
-}
-
-constexpr bool _assertIsDash_(char c) {
-    return c == '-' ? true : invalid_uuid_string("Wrong format");
-}
-
-template <size_t N>
-constexpr MediaExtractor::uuid_t constUUID(const char (&s) [N]) {
-    static_assert(N == 37, "uuid: wrong length");
-    return
-            _assertIsDash_(s[8]),
-            _assertIsDash_(s[13]),
-            _assertIsDash_(s[18]),
-            _assertIsDash_(s[23]),
-            MediaExtractor::uuid_t {{
-                _hexByteAt_(s, 0),
-                _hexByteAt_(s, 2),
-                _hexByteAt_(s, 4),
-                _hexByteAt_(s, 6),
-                _hexByteAt_(s, 9),
-                _hexByteAt_(s, 11),
-                _hexByteAt_(s, 14),
-                _hexByteAt_(s, 16),
-                _hexByteAt_(s, 19),
-                _hexByteAt_(s, 21),
-                _hexByteAt_(s, 24),
-                _hexByteAt_(s, 26),
-                _hexByteAt_(s, 28),
-                _hexByteAt_(s, 30),
-                _hexByteAt_(s, 32),
-                _hexByteAt_(s, 34),
-            }};
-}
-// Convenience macro to create a uuid_t from a string literal, which should
-// be formatted as "12345678-1234-1234-1234-123456789abc", as generated by
-// e.g. https://www.uuidgenerator.net/ or the 'uuidgen' linux command.
-// Hex digits may be upper or lower case.
-//
-// The macro call is otherwise equivalent to specifying the structure directly
-// (e.g. UUID("7d613858-5837-4a38-84c5-332d1cddee27") is the same as
-//       {{0x7d, 0x61, 0x38, 0x58, 0x58, 0x37, 0x4a, 0x38,
-//         0x84, 0xc5, 0x33, 0x2d, 0x1c, 0xdd, 0xee, 0x27}})
-
-#define UUID(str) []{ constexpr MediaExtractor::uuid_t uuid = constUUID(str); return uuid; }()
-
-
-
-}  // namespace android
-
-#endif  // MEDIA_EXTRACTOR_H_
diff --git a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
index dfe34e8..2e9aede 100644
--- a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
@@ -53,6 +53,7 @@
     kKeyFrameRate         = 'frmR',  // int32_t (video frame rate fps)
     kKeyBitRate           = 'brte',  // int32_t (bps)
     kKeyMaxBitRate        = 'mxBr',  // int32_t (bps)
+    kKeyBitsPerSample     = 'bits',  // int32_t (bits per sample)
     kKeyStreamHeader      = 'stHd',  // raw data
     kKeyESDS              = 'esds',  // raw data
     kKeyAACProfile        = 'aacp',  // int32_t
@@ -225,6 +226,7 @@
     kKeyExifOffset       = 'exof', // int64_t, Exif data offset
     kKeyExifSize         = 'exsz', // int64_t, Exif data size
     kKeyIsExif           = 'exif', // bool (int32_t) buffer contains exif data block
+    kKeyPcmBigEndian     = 'pcmb', // bool (int32_t)
 };
 
 enum {
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 1fa8789..0fb5abc 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -9,6 +9,7 @@
 
     srcs: [
         "JAudioTrack.cpp",
+        "JavaVMHelper.cpp",
         "MediaPlayer2AudioOutput.cpp",
         "mediaplayer2.cpp",
     ],
@@ -49,6 +50,10 @@
         "media_plugin_headers",
     ],
 
+    include_dirs: [
+        "frameworks/base/core/jni",
+    ],
+
     static_libs: [
         "libmedia_helper",
         "libstagefright_nuplayer2",
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index ac0cc57..778ae1b 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -21,7 +21,7 @@
 #include "mediaplayer2/JAudioTrack.h"
 
 #include <android_media_AudioErrors.h>
-#include <android_runtime/AndroidRuntime.h>
+#include <mediaplayer2/JavaVMHelper.h>
 
 namespace android {
 
@@ -39,7 +39,7 @@
         const audio_attributes_t* pAttributes,        // AudioAttributes
         float maxRequiredSpeed) {                     // bufferSizeInBytes
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
     mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
 
@@ -116,19 +116,19 @@
 }
 
 JAudioTrack::~JAudioTrack() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     env->DeleteGlobalRef(mAudioTrackCls);
 }
 
 size_t JAudioTrack::frameCount() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetBufferSizeInFrames = env->GetMethodID(
             mAudioTrackCls, "getBufferSizeInFrames", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
 }
 
 size_t JAudioTrack::channelCount() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
 }
@@ -143,7 +143,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
             mAudioTrackCls, "getPlaybackHeadPosition", "()I");
     *position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
@@ -152,7 +152,7 @@
 }
 
 bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
     jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
@@ -189,7 +189,7 @@
 status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
     // TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
     // Should we do the same thing?
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
     jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
@@ -224,7 +224,7 @@
 }
 
 const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jmethodID jGetPlaybackParams = env->GetMethodID(
             mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
@@ -266,7 +266,7 @@
         return media::VolumeShaper::Status(BAD_VALUE);
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
             "(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
@@ -282,7 +282,7 @@
 }
 
 status_t JAudioTrack::setAuxEffectSendLevel(float level) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
             mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
@@ -290,14 +290,14 @@
 }
 
 status_t JAudioTrack::attachAuxEffect(int effectId) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
     int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
     return javaToNativeStatus(result);
 }
 
 status_t JAudioTrack::setVolume(float left, float right) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     // TODO: Java setStereoVolume is deprecated. Do we really need this method?
     jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
@@ -305,14 +305,14 @@
 }
 
 status_t JAudioTrack::setVolume(float volume) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
     return javaToNativeStatus(result);
 }
 
 status_t JAudioTrack::start() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
     // TODO: Should we catch the Java IllegalStateException from play()?
     env->CallVoidMethod(mAudioTrackObj, jPlay);
@@ -324,7 +324,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jbyteArray jAudioData = env->NewByteArray(size);
     env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
 
@@ -353,7 +353,7 @@
 }
 
 void JAudioTrack::stop() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
     env->CallVoidMethod(mAudioTrackObj, jStop);
     // TODO: Should we catch IllegalStateException?
@@ -365,20 +365,20 @@
 }
 
 void JAudioTrack::flush() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
     env->CallVoidMethod(mAudioTrackObj, jFlush);
 }
 
 void JAudioTrack::pause() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
     env->CallVoidMethod(mAudioTrackObj, jPause);
     // TODO: Should we catch IllegalStateException?
 }
 
 bool JAudioTrack::isPlaying() const {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
     int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
 
@@ -393,7 +393,7 @@
 }
 
 uint32_t JAudioTrack::getSampleRate() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
 }
@@ -403,7 +403,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetBufferSizeInFrames = env->GetMethodID(
             mAudioTrackCls, "getBufferSizeInFrames", "()I");
     int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
@@ -417,7 +417,7 @@
 }
 
 audio_format_t JAudioTrack::format() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
     int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
     return audioFormatToNative(javaFormat);
@@ -454,7 +454,7 @@
 }
 
 audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
             "()Landroid/media/AudioDeviceInfo;");
     jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
@@ -469,14 +469,14 @@
 }
 
 audio_session_t JAudioTrack::getAudioSessionId() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
     jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
     return (audio_session_t) sessionId;
 }
 
 status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
     jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
             jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
@@ -550,7 +550,7 @@
         return NULL;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     // Referenced "android_media_VolumeShaper.h".
     jfloatArray xarray = nullptr;
@@ -595,7 +595,7 @@
 jobject JAudioTrack::createVolumeShaperOperationObj(
         const sp<media::VolumeShaper::Operation>& operation) {
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
     jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
@@ -647,7 +647,7 @@
 }
 
 jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2Impl$StreamEventCallback");
     jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
     jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
@@ -655,7 +655,7 @@
 }
 
 jobject JAudioTrack::createCallbackExecutor() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
     jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
             "newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
diff --git a/media/libmediaplayer2/JavaVMHelper.cpp b/media/libmediaplayer2/JavaVMHelper.cpp
new file mode 100644
index 0000000..90aaa7f
--- /dev/null
+++ b/media/libmediaplayer2/JavaVMHelper.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JavaVMHelper"
+
+#include "mediaplayer2/JavaVMHelper.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <stdlib.h>
+
+namespace android {
+
+// static
+std::atomic<JavaVM *> JavaVMHelper::sJavaVM(NULL);
+
+// static
+JNIEnv *JavaVMHelper::getJNIEnv() {
+    JNIEnv *env;
+    JavaVM *vm = sJavaVM.load();
+    CHECK(vm != NULL);
+
+    if (vm->GetEnv((void **)&env, JNI_VERSION_1_4) != JNI_OK) {
+        return NULL;
+    }
+
+    return env;
+}
+
+// static
+void JavaVMHelper::setJavaVM(JavaVM *vm) {
+    sJavaVM.store(vm);
+}
+
+}  // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
new file mode 100644
index 0000000..35091b7
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JAVA_VM_HELPER_H_
+
+#define JAVA_VM_HELPER_H_
+
+#include "jni.h"
+
+#include <atomic>
+
+namespace android {
+
+struct JavaVMHelper {
+    static JNIEnv *getJNIEnv();
+    static void setJavaVM(JavaVM *vm);
+
+private:
+    // Once a valid JavaVM has been set, it should never be reset or changed.
+    // However, as it may be accessed from multiple threads, access needs to be
+    // synchronized.
+    static std::atomic<JavaVM *> sJavaVM;
+};
+
+}  // namespace android
+
+#endif  // JAVA_VM_HELPER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
index 02bf891..a6bf543 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
@@ -171,7 +171,6 @@
 
     virtual status_t prepareAsync() = 0;
     virtual status_t start() = 0;
-    virtual status_t stop() = 0;
     virtual status_t pause() = 0;
     virtual bool isPlaying() = 0;
     virtual status_t setPlaybackSettings(const AudioPlaybackRate& rate) {
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
index 2fb5a2c..4b0a960 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
@@ -96,17 +96,20 @@
 enum media2_info_type {
     // 0xx
     MEDIA2_INFO_UNKNOWN = 1,
-    // The player was started because it was used as the next player for another
-    // player, which just completed playback
-    MEDIA2_INFO_STARTED_AS_NEXT = 2,
+    // The player just started the playback of this data source.
+    MEDIA2_INFO_DATA_SOURCE_START = 2,
     // The player just pushed the very first video frame for rendering
     MEDIA2_INFO_VIDEO_RENDERING_START = 3,
     // The player just pushed the very first audio frame for rendering
     MEDIA2_INFO_AUDIO_RENDERING_START = 4,
     // The player just completed the playback of this data source
-    MEDIA2_INFO_PLAYBACK_COMPLETE = 5,
-    // The player just completed the playback of the full play list
-    MEDIA2_INFO_PLAYLIST_END = 6,
+    MEDIA2_INFO_DATA_SOURCE_END = 5,
+    // The player just completed the playback of all data sources.
+    // But this is not visible in native code. Just keep this entry for completeness.
+    MEDIA2_INFO_DATA_SOURCE_LIST_END = 6,
+    // The player just completed an iteration of playback loop. This event is sent only when
+    // looping is enabled.
+    MEDIA2_INFO_DATA_SOURCE_REPEAT = 7,
 
     //1xx
     // The player just prepared a data source.
@@ -165,8 +168,7 @@
     MEDIA_PLAYER2_PREPARED           = 1 << 3,
     MEDIA_PLAYER2_STARTED            = 1 << 4,
     MEDIA_PLAYER2_PAUSED             = 1 << 5,
-    MEDIA_PLAYER2_STOPPED            = 1 << 6,
-    MEDIA_PLAYER2_PLAYBACK_COMPLETE  = 1 << 7
+    MEDIA_PLAYER2_PLAYBACK_COMPLETE  = 1 << 6
 };
 
 // Keep KEY_PARAMETER_* in sync with MediaPlayer2.java.
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index 3af212e..43fba23 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -65,7 +65,6 @@
             status_t        setBufferingSettings(const BufferingSettings& buffering);
             status_t        prepareAsync();
             status_t        start();
-            status_t        stop();
             status_t        pause();
             bool            isPlaying();
             mediaplayer2_states getState();
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index 4fb47b8..f0ea59e 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -750,7 +750,7 @@
 status_t MediaPlayer2::prepareAsync() {
     ALOGV("prepareAsync");
     Mutex::Autolock _l(mLock);
-    if ((mPlayer != 0) && (mCurrentState & (MEDIA_PLAYER2_INITIALIZED | MEDIA_PLAYER2_STOPPED))) {
+    if ((mPlayer != 0) && (mCurrentState & MEDIA_PLAYER2_INITIALIZED)) {
         if (mAudioAttributesParcel != NULL) {
             status_t err = setAudioAttributes_l(*mAudioAttributesParcel);
             if (err != OK) {
@@ -806,24 +806,6 @@
     return ret;
 }
 
-status_t MediaPlayer2::stop() {
-    ALOGV("stop");
-    Mutex::Autolock _l(mLock);
-    if (mCurrentState & MEDIA_PLAYER2_STOPPED) return NO_ERROR;
-    if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER2_STARTED | MEDIA_PLAYER2_PREPARED |
-                    MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE ) ) ) {
-        status_t ret = mPlayer->stop();
-        if (ret != NO_ERROR) {
-            mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
-        } else {
-            mCurrentState = MEDIA_PLAYER2_STOPPED;
-        }
-        return ret;
-    }
-    ALOGE("stop called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
-    return INVALID_OPERATION;
-}
-
 status_t MediaPlayer2::pause() {
     ALOGV("pause");
     Mutex::Autolock _l(mLock);
@@ -873,8 +855,7 @@
     if (mCurrentState & MEDIA_PLAYER2_STARTED) {
         return MEDIAPLAYER2_STATE_PLAYING;
     }
-    if (mCurrentState
-        & (MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_STOPPED | MEDIA_PLAYER2_PLAYBACK_COMPLETE)) {
+    if (mCurrentState & (MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE)) {
         return MEDIAPLAYER2_STATE_PAUSED;
     }
     // now only mCurrentState & MEDIA_PLAYER2_PREPARED is true
@@ -890,7 +871,7 @@
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
-    if (mPlayer == 0 || (mCurrentState & MEDIA_PLAYER2_STOPPED)) {
+    if (mPlayer == 0) {
         return INVALID_OPERATION;
     }
 
@@ -982,7 +963,7 @@
     Mutex::Autolock _l(mLock);
     ALOGV("getDuration_l");
     bool isValidState = (mCurrentState & (MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
-            MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_STOPPED | MEDIA_PLAYER2_PLAYBACK_COMPLETE));
+            MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE));
     if (mPlayer == 0 || !isValidState) {
         ALOGE("Attempt to call getDuration in wrong state: mPlayer=%p, mCurrentState=%u",
                 mPlayer.get(), mCurrentState);
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
index 196b103..e317e23 100644
--- a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
@@ -28,13 +28,12 @@
 #include <media/IMediaExtractorService.h>
 #include <media/IMediaSource.h>
 #include <media/MediaHTTPService.h>
-#include <media/MediaExtractor.h>
 #include <media/MediaSource.h>
 #include <media/NdkWrapper.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/ClearDataSourceFactory.h>
 #include <media/stagefright/InterfaceUtils.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaClock.h>
@@ -368,7 +367,7 @@
             String8 contentType;
 
             if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
-                mHttpSource = DataSourceFactory::CreateMediaHTTP(mHTTPService);
+                mHttpSource = ClearDataSourceFactory::CreateMediaHTTP(mHTTPService);
                 if (mHttpSource == NULL) {
                     ALOGE("Failed to create http source!");
                     notifyPreparedAndCleanup(UNKNOWN_ERROR);
@@ -378,7 +377,7 @@
 
             mLock.unlock();
             // This might take long time if connection has some issue.
-            sp<DataSource> dataSource = DataSourceFactory::CreateFromURI(
+            sp<DataSource> dataSource = ClearDataSourceFactory::CreateFromURI(
                    mHTTPService, uri, &mUriHeaders, &contentType,
                    static_cast<HTTPBase *>(mHttpSource.get()));
             mLock.lock();
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 060b698..b6b9b78 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -1684,6 +1684,7 @@
     }
 
     startPlaybackTimer("onstart");
+    notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_START, 0);
 
     postScanSources();
 }
@@ -2474,8 +2475,8 @@
     if (mDriver != NULL) {
         sp<NuPlayer2Driver> driver = mDriver.promote();
         if (driver != NULL) {
-            notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_PLAYBACK_COMPLETE, 0);
-            notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_STARTED_AS_NEXT, 0);
+            notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_END, 0);
+            notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_START, 0);
         }
     }
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
index e48e388..e215965 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
@@ -372,10 +372,16 @@
                             timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
                     mDTVCCPacket->setRange(0, 0);
                 }
+                if (mDTVCCPacket->size() + 2 > mDTVCCPacket->capacity()) {
+                    return false;
+                }
                 memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
                 mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
                 br.skipBits(16);
             } else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
+                if (mDTVCCPacket->size() + 2 > mDTVCCPacket->capacity()) {
+                    return false;
+                }
                 memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
                 mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
                 br.skipBits(16);
@@ -403,6 +409,9 @@
                         line21CCBuf = new ABuffer((cc_count - i) * sizeof(CCData));
                         line21CCBuf->setRange(0, 0);
                     }
+                    if (line21CCBuf->size() + sizeof(cc) > line21CCBuf->capacity()) {
+                        return false;
+                    }
                     memcpy(line21CCBuf->data() + line21CCBuf->size(), &cc, sizeof(cc));
                     line21CCBuf->setRange(0, line21CCBuf->size() + sizeof(CCData));
                 }
@@ -464,6 +473,9 @@
             size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
             if (mSelectedTrack == (ssize_t)trackIndex) {
                 sp<ABuffer> ccPacket = new ABuffer(block_size);
+                if (ccPacket->capacity() == 0) {
+                    return false;
+                }
                 memcpy(ccPacket->data(), br.data(), block_size);
                 mCCMap.add(timeUs, ccPacket);
             }
@@ -527,10 +539,12 @@
         ccBuf = new ABuffer(size);
         ccBuf->setRange(0, 0);
 
-        for (ssize_t i = 0; i <= index; ++i) {
-            sp<ABuffer> buf = mCCMap.valueAt(i);
-            memcpy(ccBuf->data() + ccBuf->size(), buf->data(), buf->size());
-            ccBuf->setRange(0, ccBuf->size() + buf->size());
+        if (ccBuf->capacity() > 0) {
+            for (ssize_t i = 0; i <= index; ++i) {
+                sp<ABuffer> buf = mCCMap.valueAt(i);
+                memcpy(ccBuf->data() + ccBuf->size(), buf->data(), buf->size());
+                ccBuf->setRange(0, ccBuf->size() + buf->size());
+            }
         }
     }
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index 645138a..931b86e 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -1088,6 +1088,12 @@
                         static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
                 }
                 if (mediaBuf != NULL) {
+                    if (mediaBuf->size() > codecBuffer->capacity()) {
+                        handleError(ERROR_BUFFER_TOO_SMALL);
+                        mDequeuedInputBuffers.push_back(bufferIx);
+                        return false;
+                    }
+
                     codecBuffer->setRange(0, mediaBuf->size());
                     memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 03d17a5..f85e3a2 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -272,13 +272,6 @@
             mState = STATE_PREPARING;
             mPlayer->prepareAsync();
             return OK;
-        case STATE_STOPPED:
-            // this is really just paused. handle as seek to start
-            mAtEOS = false;
-            mState = STATE_STOPPED_AND_PREPARING;
-            mPlayer->seekToAsync(0, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */,
-                    true /* needNotify */);
-            return OK;
         default:
             return INVALID_OPERATION;
     };
@@ -293,7 +286,6 @@
 status_t NuPlayer2Driver::start_l() {
     switch (mState) {
         case STATE_PAUSED:
-        case STATE_STOPPED_AND_PREPARED:
         case STATE_PREPARED:
         {
             mPlayer->start();
@@ -320,34 +312,6 @@
     return OK;
 }
 
-status_t NuPlayer2Driver::stop() {
-    ALOGD("stop(%p)", this);
-    Mutex::Autolock autoLock(mLock);
-
-    switch (mState) {
-        case STATE_RUNNING:
-            mPlayer->pause();
-            // fall through
-
-        case STATE_PAUSED:
-            mState = STATE_STOPPED;
-            //notifyListener_l(MEDIA2_STOPPED);
-            break;
-
-        case STATE_PREPARED:
-        case STATE_STOPPED:
-        case STATE_STOPPED_AND_PREPARING:
-        case STATE_STOPPED_AND_PREPARED:
-            mState = STATE_STOPPED;
-            break;
-
-        default:
-            return INVALID_OPERATION;
-    }
-
-    return OK;
-}
-
 status_t NuPlayer2Driver::pause() {
     ALOGD("pause(%p)", this);
     // The NuPlayerRenderer may get flushed if pause for long enough, e.g. the pause timeout tear
@@ -391,7 +355,6 @@
             mState = STATE_PAUSED;
         } else if (rate.mSpeed != 0.f
                 && (mState == STATE_PAUSED
-                    || mState == STATE_STOPPED_AND_PREPARED
                     || mState == STATE_PREPARED)) {
             err = start_l();
         }
@@ -419,7 +382,6 @@
 
     switch (mState) {
         case STATE_PREPARED:
-        case STATE_STOPPED_AND_PREPARED:
         case STATE_PAUSED:
         case STATE_RUNNING:
         {
@@ -601,10 +563,6 @@
             break;
     }
 
-    if (mState != STATE_STOPPED) {
-        // notifyListener_l(MEDIA2_STOPPED);
-    }
-
     mState = STATE_RESET_IN_PROGRESS;
     mPlayer->resetAsync();
 
@@ -780,20 +738,7 @@
     ALOGV("notifySeekComplete(%p)", this);
     Mutex::Autolock autoLock(mLock);
     mSeekInProgress = false;
-    notifySeekComplete_l(srcId);
-}
-
-void NuPlayer2Driver::notifySeekComplete_l(int64_t srcId) {
-    bool wasSeeking = true;
-    if (mState == STATE_STOPPED_AND_PREPARING) {
-        wasSeeking = false;
-        mState = STATE_STOPPED_AND_PREPARED;
-        mCondition.broadcast();
-    } else if (mState == STATE_STOPPED) {
-        // no need to notify listener
-        return;
-    }
-    notifyListener_l(srcId, wasSeeking ? MEDIA2_SEEK_COMPLETE : MEDIA2_PREPARED);
+    notifyListener_l(srcId, MEDIA2_SEEK_COMPLETE);
 }
 
 status_t NuPlayer2Driver::dump(
@@ -930,7 +875,12 @@
                             // the last little bit of audio. In looping mode, we need to restart it.
                             mAudioSink->start();
                         }
-                        // don't send completion event when looping
+
+                        sp<AMessage> notify = new AMessage(kWhatNotifyListener, this);
+                        notify->setInt64("srcId", srcId);
+                        notify->setInt32("messageId", MEDIA2_INFO);
+                        notify->setInt32("ext1", MEDIA2_INFO_DATA_SOURCE_REPEAT);
+                        notify->post();
                         return;
                     }
                     if (property_get_bool("persist.debug.sf.stats", false)) {
@@ -1073,9 +1023,6 @@
         case STATE_RUNNING: rval = "RUNNING"; break;
         case STATE_PAUSED: rval = "PAUSED"; break;
         case STATE_RESET_IN_PROGRESS: rval = "RESET_IN_PROGRESS"; break;
-        case STATE_STOPPED: rval = "STOPPED"; break;
-        case STATE_STOPPED_AND_PREPARING: rval = "STOPPED_AND_PREPARING"; break;
-        case STATE_STOPPED_AND_PREPARED: rval = "STOPPED_AND_PREPARED"; break;
         default:
             // yes, this buffer is shared and vulnerable to races
             snprintf(rawbuffer, sizeof(rawbuffer), "%d", state);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
index 4da2566..6d5a007 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
@@ -40,31 +40,31 @@
             BufferingSettings* buffering /* nonnull */) override;
     virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
 
-    virtual status_t prepareAsync();
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t pause();
-    virtual bool isPlaying();
-    virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
-    virtual status_t getPlaybackSettings(AudioPlaybackRate *rate);
-    virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
-    virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps);
+    virtual status_t prepareAsync() override;
+    virtual status_t start() override;
+    virtual status_t pause() override;
+    virtual bool isPlaying() override;
+    virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate) override;
+    virtual status_t getPlaybackSettings(AudioPlaybackRate *rate) override;
+    virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) override;
+    virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps) override;
     virtual status_t seekTo(
-            int64_t msec, MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC);
-    virtual status_t getCurrentPosition(int64_t *msec);
-    virtual status_t getDuration(int64_t *msec);
-    virtual status_t reset();
+            int64_t msec,
+            MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
+    virtual status_t getCurrentPosition(int64_t *msec) override;
+    virtual status_t getDuration(int64_t *msec) override;
+    virtual status_t reset() override;
     virtual status_t notifyAt(int64_t mediaTimeUs) override;
-    virtual status_t setLooping(int loop);
-    virtual status_t invoke(const Parcel &request, Parcel *reply);
-    virtual void setAudioSink(const sp<AudioSink> &audioSink);
-    virtual status_t setParameter(int key, const Parcel &request);
-    virtual status_t getParameter(int key, Parcel *reply);
+    virtual status_t setLooping(int loop) override;
+    virtual status_t invoke(const Parcel &request, Parcel *reply) override;
+    virtual void setAudioSink(const sp<AudioSink> &audioSink) override;
+    virtual status_t setParameter(int key, const Parcel &request) override;
+    virtual status_t getParameter(int key, Parcel *reply) override;
 
     virtual status_t getMetadata(
-            const media::Metadata::Filter& ids, Parcel *records);
+            const media::Metadata::Filter& ids, Parcel *records) override;
 
-    virtual status_t dump(int fd, const Vector<String16> &args) const;
+    virtual status_t dump(int fd, const Vector<String16> &args) const override;
 
     virtual void onMessageReceived(const sp<AMessage> &msg) override;
 
@@ -77,7 +77,6 @@
     void notifyMoreRebufferingTimeUs(int64_t srcId, int64_t timeUs);
     void notifyRebufferingWhenExit(int64_t srcId, bool status);
     void notifySeekComplete(int64_t srcId);
-    void notifySeekComplete_l(int64_t srcId);
     void notifyListener(int64_t srcId, int msg, int ext1 = 0, int ext2 = 0,
                         const Parcel *in = NULL);
     void notifyFlagsChanged(int64_t srcId, uint32_t flags);
@@ -99,9 +98,6 @@
         STATE_RUNNING,
         STATE_PAUSED,
         STATE_RESET_IN_PROGRESS,
-        STATE_STOPPED,                  // equivalent to PAUSED
-        STATE_STOPPED_AND_PREPARING,    // equivalent to PAUSED, but seeking
-        STATE_STOPPED_AND_PREPARED,     // equivalent to PAUSED, but seek complete
     };
 
     std::string stateString(State state);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 23d66bb..8cd6eda 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -25,7 +25,6 @@
 #include <cutils/properties.h>
 #include <media/DataSource.h>
 #include <media/MediaBufferHolder.h>
-#include <media/MediaExtractor.h>
 #include <media/MediaSource.h>
 #include <media/IMediaExtractorService.h>
 #include <media/IMediaHTTPService.h>
@@ -38,6 +37,7 @@
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaClock.h>
 #include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index a5f5fc6..0784939 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1120,6 +1120,7 @@
             } else if (what == DecoderBase::kWhatShutdownCompleted) {
                 ALOGV("%s shutdown completed", audio ? "audio" : "video");
                 if (audio) {
+                    Mutex::Autolock autoLock(mDecoderLock);
                     mAudioDecoder.clear();
                     mAudioDecoderError = false;
                     ++mAudioDecoderGeneration;
@@ -1127,6 +1128,7 @@
                     CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
                     mFlushingAudio = SHUT_DOWN;
                 } else {
+                    Mutex::Autolock autoLock(mDecoderLock);
                     mVideoDecoder.clear();
                     mVideoDecoderError = false;
                     ++mVideoDecoderGeneration;
@@ -1447,29 +1449,6 @@
             break;
         }
 
-        case kWhatGetStats:
-        {
-            ALOGV("kWhatGetStats");
-
-            Vector<sp<AMessage>> *trackStats;
-            CHECK(msg->findPointer("trackstats", (void**)&trackStats));
-
-            trackStats->clear();
-            if (mVideoDecoder != NULL) {
-                trackStats->push_back(mVideoDecoder->getStats());
-            }
-            if (mAudioDecoder != NULL) {
-                trackStats->push_back(mAudioDecoder->getStats());
-            }
-
-            // respond for synchronization
-            sp<AMessage> response = new AMessage;
-            sp<AReplyToken> replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
-            response->postReply(replyID);
-            break;
-        }
-
         default:
             TRESPASS();
             break;
@@ -1817,6 +1796,7 @@
           (long long)currentPositionUs, forceNonOffload, needsToCreateAudioDecoder);
     if (mAudioDecoder != NULL) {
         mAudioDecoder->pause();
+        Mutex::Autolock autoLock(mDecoderLock);
         mAudioDecoder.clear();
         mAudioDecoderError = false;
         ++mAudioDecoderGeneration;
@@ -1935,6 +1915,8 @@
         }
     }
 
+    Mutex::Autolock autoLock(mDecoderLock);
+
     if (audio) {
         sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
         ++mAudioDecoderGeneration;
@@ -2236,13 +2218,15 @@
 void NuPlayer::getStats(Vector<sp<AMessage> > *trackStats) {
     CHECK(trackStats != NULL);
 
-    ALOGV("NuPlayer::getStats()");
-    sp<AMessage> msg = new AMessage(kWhatGetStats, this);
-    msg->setPointer("trackstats", trackStats);
+    trackStats->clear();
 
-    sp<AMessage> response;
-    (void) msg->postAndAwaitResponse(&response);
-    // response is for synchronization, ignore contents
+    Mutex::Autolock autoLock(mDecoderLock);
+    if (mVideoDecoder != NULL) {
+        trackStats->push_back(mVideoDecoder->getStats());
+    }
+    if (mAudioDecoder != NULL) {
+        trackStats->push_back(mAudioDecoder->getStats());
+    }
 }
 
 sp<MetaData> NuPlayer::getFileMeta() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index e400d16..9f5be06 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -159,7 +159,6 @@
         kWhatPrepareDrm                 = 'pDrm',
         kWhatReleaseDrm                 = 'rDrm',
         kWhatMediaClockNotify           = 'mckN',
-        kWhatGetStats                   = 'gSts',
     };
 
     wp<NuPlayerDriver> mDriver;
@@ -175,6 +174,7 @@
     sp<DecoderBase> mVideoDecoder;
     bool mOffloadAudio;
     sp<DecoderBase> mAudioDecoder;
+    Mutex mDecoderLock;  // guard |mAudioDecoder| and |mVideoDecoder|.
     sp<CCDecoder> mCCDecoder;
     sp<Renderer> mRenderer;
     sp<ALooper> mRendererLooper;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 69cd82e..050e4fb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -1069,6 +1069,12 @@
                         static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
                 }
                 if (mediaBuf != NULL) {
+                    if (mediaBuf->size() > codecBuffer->capacity()) {
+                        handleError(ERROR_BUFFER_TOO_SMALL);
+                        mDequeuedInputBuffers.push_back(bufferIx);
+                        return false;
+                    }
+
                     codecBuffer->setRange(0, mediaBuf->size());
                     memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index bde0862..8d876da 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -239,8 +239,14 @@
         size_t *encryptedbytes)
 {
     // size needed to store all the crypto data
-    size_t cryptosize = sizeof(CryptoInfo) +
-                        sizeof(CryptoPlugin::SubSample) * numSubSamples;
+    size_t cryptosize;
+    // sizeof(CryptoInfo) + sizeof(CryptoPlugin::SubSample) * numSubSamples;
+    if (__builtin_mul_overflow(sizeof(CryptoPlugin::SubSample), numSubSamples, &cryptosize) ||
+            __builtin_add_overflow(cryptosize, sizeof(CryptoInfo), &cryptosize)) {
+        ALOGE("crypto size overflow");
+        return NULL;
+    }
+
     CryptoInfo *ret = (CryptoInfo*) malloc(cryptosize);
     if (ret == NULL) {
         ALOGE("couldn't allocate %zu bytes", cryptosize);
diff --git a/media/libnblog/NBLog.cpp b/media/libnblog/NBLog.cpp
index d6fa3e3..d659445 100644
--- a/media/libnblog/NBLog.cpp
+++ b/media/libnblog/NBLog.cpp
@@ -20,11 +20,8 @@
 
 #include <algorithm>
 #include <climits>
-#include <deque>
-#include <fstream>
-#include <iostream>
 #include <math.h>
-#include <numeric>
+#include <unordered_set>
 #include <vector>
 #include <stdarg.h>
 #include <stdint.h>
@@ -64,7 +61,11 @@
 // ---------------------------------------------------------------------------
 
 /*static*/
-std::unique_ptr<NBLog::AbstractEntry> NBLog::AbstractEntry::buildEntry(const uint8_t *ptr) {
+std::unique_ptr<NBLog::AbstractEntry> NBLog::AbstractEntry::buildEntry(const uint8_t *ptr)
+{
+    if (ptr == nullptr) {
+        return nullptr;
+    }
     const uint8_t type = EntryIterator(ptr)->type;
     switch (type) {
     case EVENT_START_FMT:
@@ -78,31 +79,33 @@
     }
 }
 
-NBLog::AbstractEntry::AbstractEntry(const uint8_t *entry) : mEntry(entry) {
+NBLog::AbstractEntry::AbstractEntry(const uint8_t *entry) : mEntry(entry)
+{
 }
 
 // ---------------------------------------------------------------------------
 
-NBLog::EntryIterator NBLog::FormatEntry::begin() const {
+NBLog::EntryIterator NBLog::FormatEntry::begin() const
+{
     return EntryIterator(mEntry);
 }
 
-const char *NBLog::FormatEntry::formatString() const {
+const char *NBLog::FormatEntry::formatString() const
+{
     return (const char*) mEntry + offsetof(entry, data);
 }
 
-size_t NBLog::FormatEntry::formatStringLength() const {
+size_t NBLog::FormatEntry::formatStringLength() const
+{
     return mEntry[offsetof(entry, length)];
 }
 
-NBLog::EntryIterator NBLog::FormatEntry::args() const {
+NBLog::EntryIterator NBLog::FormatEntry::args() const
+{
     auto it = begin();
-    // skip start fmt
-    ++it;
-    // skip timestamp
-    ++it;
-    // skip hash
-    ++it;
+    ++it; // skip start fmt
+    ++it; // skip timestamp
+    ++it; // skip hash
     // Skip author if present
     if (it->type == EVENT_AUTHOR) {
         ++it;
@@ -110,54 +113,47 @@
     return it;
 }
 
-int64_t NBLog::FormatEntry::timestamp() const {
+int64_t NBLog::FormatEntry::timestamp() const
+{
     auto it = begin();
-    // skip start fmt
-    ++it;
+    ++it; // skip start fmt
     return it.payload<int64_t>();
 }
 
-NBLog::log_hash_t NBLog::FormatEntry::hash() const {
+NBLog::log_hash_t NBLog::FormatEntry::hash() const
+{
     auto it = begin();
-    // skip start fmt
-    ++it;
-    // skip timestamp
-    ++it;
+    ++it; // skip start fmt
+    ++it; // skip timestamp
     // unaligned 64-bit read not supported
     log_hash_t hash;
     memcpy(&hash, it->data, sizeof(hash));
     return hash;
 }
 
-int NBLog::FormatEntry::author() const {
+int NBLog::FormatEntry::author() const
+{
     auto it = begin();
-    // skip start fmt
-    ++it;
-    // skip timestamp
-    ++it;
-    // skip hash
-    ++it;
+    ++it; // skip start fmt
+    ++it; // skip timestamp
+    ++it; // skip hash
     // if there is an author entry, return it, return -1 otherwise
-    if (it->type == EVENT_AUTHOR) {
-        return it.payload<int>();
-    }
-    return -1;
+    return it->type == EVENT_AUTHOR ? it.payload<int>() : -1;
 }
 
 NBLog::EntryIterator NBLog::FormatEntry::copyWithAuthor(
-        std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+        std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const
+{
     auto it = begin();
-    // copy fmt start entry
-    it.copyTo(dst);
-    // copy timestamp
-    (++it).copyTo(dst);    // copy hash
-    (++it).copyTo(dst);
+    it.copyTo(dst);     // copy fmt start entry
+    (++it).copyTo(dst); // copy timestamp
+    (++it).copyTo(dst); // copy hash
     // insert author entry
-    size_t authorEntrySize = NBLog::Entry::kOverhead + sizeof(author);
+    size_t authorEntrySize = Entry::kOverhead + sizeof(author);
     uint8_t authorEntry[authorEntrySize];
     authorEntry[offsetof(entry, type)] = EVENT_AUTHOR;
     authorEntry[offsetof(entry, length)] =
-        authorEntry[authorEntrySize + NBLog::Entry::kPreviousLengthOffset] =
+        authorEntry[authorEntrySize + Entry::kPreviousLengthOffset] =
         sizeof(author);
     *(int*) (&authorEntry[offsetof(entry, data)]) = author;
     dst->write(authorEntry, authorEntrySize);
@@ -170,86 +166,104 @@
     return it;
 }
 
-void NBLog::EntryIterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const {
-    size_t length = ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
-    dst->write(ptr, length);
+void NBLog::EntryIterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const
+{
+    size_t length = mPtr[offsetof(entry, length)] + Entry::kOverhead;
+    dst->write(mPtr, length);
 }
 
-void NBLog::EntryIterator::copyData(uint8_t *dst) const {
-    memcpy((void*) dst, ptr + offsetof(entry, data), ptr[offsetof(entry, length)]);
+void NBLog::EntryIterator::copyData(uint8_t *dst) const
+{
+    memcpy((void*) dst, mPtr + offsetof(entry, data), mPtr[offsetof(entry, length)]);
 }
 
-NBLog::EntryIterator::EntryIterator()
-    : ptr(nullptr) {}
+NBLog::EntryIterator::EntryIterator()   // Dummy initialization.
+    : mPtr(nullptr)
+{
+}
 
 NBLog::EntryIterator::EntryIterator(const uint8_t *entry)
-    : ptr(entry) {}
+    : mPtr(entry)
+{
+}
 
 NBLog::EntryIterator::EntryIterator(const NBLog::EntryIterator &other)
-    : ptr(other.ptr) {}
-
-const NBLog::entry& NBLog::EntryIterator::operator*() const {
-    return *(entry*) ptr;
+    : mPtr(other.mPtr)
+{
 }
 
-const NBLog::entry* NBLog::EntryIterator::operator->() const {
-    return (entry*) ptr;
+const NBLog::entry& NBLog::EntryIterator::operator*() const
+{
+    return *(entry*) mPtr;
 }
 
-NBLog::EntryIterator& NBLog::EntryIterator::operator++() {
-    ptr += ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+const NBLog::entry* NBLog::EntryIterator::operator->() const
+{
+    return (entry*) mPtr;
+}
+
+NBLog::EntryIterator& NBLog::EntryIterator::operator++()
+{
+    mPtr += mPtr[offsetof(entry, length)] + Entry::kOverhead;
     return *this;
 }
 
-NBLog::EntryIterator& NBLog::EntryIterator::operator--() {
-    ptr -= ptr[NBLog::Entry::kPreviousLengthOffset] + NBLog::Entry::kOverhead;
+NBLog::EntryIterator& NBLog::EntryIterator::operator--()
+{
+    mPtr -= mPtr[Entry::kPreviousLengthOffset] + Entry::kOverhead;
     return *this;
 }
 
-NBLog::EntryIterator NBLog::EntryIterator::next() const {
+NBLog::EntryIterator NBLog::EntryIterator::next() const
+{
     EntryIterator aux(*this);
     return ++aux;
 }
 
-NBLog::EntryIterator NBLog::EntryIterator::prev() const {
+NBLog::EntryIterator NBLog::EntryIterator::prev() const
+{
     EntryIterator aux(*this);
     return --aux;
 }
 
-int NBLog::EntryIterator::operator-(const NBLog::EntryIterator &other) const {
-    return ptr - other.ptr;
+int NBLog::EntryIterator::operator-(const NBLog::EntryIterator &other) const
+{
+    return mPtr - other.mPtr;
 }
 
-bool NBLog::EntryIterator::operator!=(const EntryIterator &other) const {
-    return ptr != other.ptr;
+bool NBLog::EntryIterator::operator!=(const EntryIterator &other) const
+{
+    return mPtr != other.mPtr;
 }
 
-bool NBLog::EntryIterator::hasConsistentLength() const {
-    return ptr[offsetof(entry, length)] == ptr[ptr[offsetof(entry, length)] +
-        NBLog::Entry::kOverhead + NBLog::Entry::kPreviousLengthOffset];
+bool NBLog::EntryIterator::hasConsistentLength() const
+{
+    return mPtr[offsetof(entry, length)] == mPtr[mPtr[offsetof(entry, length)] +
+        Entry::kOverhead + Entry::kPreviousLengthOffset];
 }
 
 // ---------------------------------------------------------------------------
 
-int64_t NBLog::HistogramEntry::timestamp() const {
+int64_t NBLog::HistogramEntry::timestamp() const
+{
     return EntryIterator(mEntry).payload<HistTsEntry>().ts;
 }
 
-NBLog::log_hash_t NBLog::HistogramEntry::hash() const {
+NBLog::log_hash_t NBLog::HistogramEntry::hash() const
+{
     return EntryIterator(mEntry).payload<HistTsEntry>().hash;
 }
 
-int NBLog::HistogramEntry::author() const {
+int NBLog::HistogramEntry::author() const
+{
     EntryIterator it(mEntry);
-    if (it->length == sizeof(HistTsEntryWithAuthor)) {
-        return it.payload<HistTsEntryWithAuthor>().author;
-    } else {
-        return -1;
-    }
+    return it->length == sizeof(HistTsEntryWithAuthor)
+            ? it.payload<HistTsEntryWithAuthor>().author : -1;
 }
 
 NBLog::EntryIterator NBLog::HistogramEntry::copyWithAuthor(
-        std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+        std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const
+{
     // Current histogram entry has {type, length, struct HistTsEntry, length}.
     // We now want {type, length, struct HistTsEntryWithAuthor, length}
     uint8_t buffer[Entry::kOverhead + sizeof(HistTsEntryWithAuthor)];
@@ -438,7 +452,7 @@
         return;
     }
     Entry entry = Entry(EVENT_END_FMT, NULL, 0);
-    log(&entry, true);
+    log(entry, true);
 }
 
 void NBLog::Writer::logHash(log_hash_t hash)
@@ -464,12 +478,19 @@
     }
 }
 
+void NBLog::Writer::logMonotonicCycleTime(uint32_t monotonicNs)
+{
+    if (!mEnabled) {
+        return;
+    }
+    log(EVENT_MONOTONIC_CYCLE_TIME, &monotonicNs, sizeof(&monotonicNs));
+}
+
 void NBLog::Writer::logFormat(const char *fmt, log_hash_t hash, ...)
 {
     if (!mEnabled) {
         return;
     }
-
     va_list ap;
     va_start(ap, hash);
     Writer::logVFormat(fmt, hash, ap);
@@ -550,20 +571,20 @@
         return;
     }
     Entry etr(event, data, length);
-    log(&etr, true /*trusted*/);
+    log(etr, true /*trusted*/);
 }
 
-void NBLog::Writer::log(const NBLog::Entry *etr, bool trusted)
+void NBLog::Writer::log(const NBLog::Entry &etr, bool trusted)
 {
     if (!mEnabled) {
         return;
     }
     if (!trusted) {
-        log(etr->mEvent, etr->mData, etr->mLength);
+        log(etr.mEvent, etr.mData, etr.mLength);
         return;
     }
-    size_t need = etr->mLength + Entry::kOverhead;    // mEvent, mLength, data[mLength], mLength
-                                                      // need = number of bytes written to FIFO
+    const size_t need = etr.mLength + Entry::kOverhead; // mEvent, mLength, data[mLength], mLength
+                                                        // need = number of bytes written to FIFO
 
     // FIXME optimize this using memcpy for the data part of the Entry.
     // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy.
@@ -571,7 +592,7 @@
     uint8_t temp[Entry::kMaxLength + Entry::kOverhead];
     // write this data to temp array
     for (size_t i = 0; i < need; i++) {
-        temp[i] = etr->copyEntryDataAt(i);
+        temp[i] = etr.copyEntryDataAt(i);
     }
     // write to circular buffer
     mFifoWriter->write(temp, need);
@@ -688,15 +709,21 @@
 
 // ---------------------------------------------------------------------------
 
-const std::set<NBLog::Event> NBLog::Reader::startingTypes {NBLog::Event::EVENT_START_FMT,
+const std::unordered_set<NBLog::Event> NBLog::Reader::startingTypes {
+        NBLog::Event::EVENT_START_FMT,
         NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS,
-        NBLog::Event::EVENT_AUDIO_STATE};
-const std::set<NBLog::Event> NBLog::Reader::endingTypes   {NBLog::Event::EVENT_END_FMT,
+        NBLog::Event::EVENT_AUDIO_STATE,
+        NBLog::Event::EVENT_MONOTONIC_CYCLE_TIME
+};
+const std::unordered_set<NBLog::Event> NBLog::Reader::endingTypes   {
+        NBLog::Event::EVENT_END_FMT,
         NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS,
-        NBLog::Event::EVENT_AUDIO_STATE};
+        NBLog::Event::EVENT_AUDIO_STATE,
+        NBLog::Event::EVENT_MONOTONIC_CYCLE_TIME
+};
 
-NBLog::Reader::Reader(const void *shared, size_t size)
-    : mFd(-1), mIndent(0), mLost(0),
+NBLog::Reader::Reader(const void *shared, size_t size, const std::string &name)
+    : mName(name),
       mShared((/*const*/ Shared *) shared), /*mIMemory*/
       mFifo(mShared != NULL ?
         new audio_utils_fifo(size, sizeof(uint8_t),
@@ -705,8 +732,8 @@
 {
 }
 
-NBLog::Reader::Reader(const sp<IMemory>& iMemory, size_t size)
-    : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
+NBLog::Reader::Reader(const sp<IMemory>& iMemory, size_t size, const std::string &name)
+    : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size, name)
 {
     mIMemory = iMemory;
 }
@@ -718,7 +745,7 @@
 }
 
 const uint8_t *NBLog::Reader::findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
-                                            const std::set<Event> &types) {
+                                            const std::unordered_set<Event> &types) {
     while (back + Entry::kPreviousLengthOffset >= front) {
         const uint8_t *prev = back - back[Entry::kPreviousLengthOffset] - Entry::kOverhead;
         if (prev < front || prev + prev[offsetof(entry, length)] +
@@ -738,29 +765,47 @@
 // Copies content of a Reader FIFO into its Snapshot
 // The Snapshot has the same raw data, but represented as a sequence of entries
 // and an EntryIterator making it possible to process the data.
-std::unique_ptr<NBLog::Reader::Snapshot> NBLog::Reader::getSnapshot()
+std::unique_ptr<NBLog::Snapshot> NBLog::Reader::getSnapshot()
 {
     if (mFifoReader == NULL) {
-        return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
+        return std::make_unique<Snapshot>();
     }
-    // make a copy to avoid race condition with writer
-    size_t capacity = mFifo->capacity();
 
     // This emulates the behaviour of audio_utils_fifo_reader::read, but without incrementing the
     // reader index. The index is incremented after handling corruption, to after the last complete
     // entry of the buffer
-    size_t lost;
+    size_t lost = 0;
     audio_utils_iovec iovec[2];
-    ssize_t availToRead = mFifoReader->obtain(iovec, capacity, NULL /*timeout*/, &lost);
+    const size_t capacity = mFifo->capacity();
+    ssize_t availToRead;
+    // A call to audio_utils_fifo_reader::obtain() places the read pointer one buffer length
+    // before the writer's pointer (since mFifoReader was constructed with flush=false). The
+    // do while loop is an attempt to read all of the FIFO's contents regardless of how behind
+    // the reader is with respect to the writer. However, the following scheduling sequence is
+    // possible and can lead to a starvation situation:
+    // - Writer T1 writes, overrun with respect to Reader T2
+    // - T2 calls obtain() and gets EOVERFLOW, T2 ptr placed one buffer size behind T1 ptr
+    // - T1 write, overrun
+    // - T2 obtain(), EOVERFLOW (and so on...)
+    // To address this issue, we limit the number of tries for the reader to catch up with
+    // the writer.
+    int tries = 0;
+    size_t lostTemp;
+    do {
+        availToRead = mFifoReader->obtain(iovec, capacity, NULL /*timeout*/, &lostTemp);
+        lost += lostTemp;
+    } while (availToRead < 0 || ++tries <= kMaxObtainTries);
+
     if (availToRead <= 0) {
-        return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
+        ALOGW_IF(availToRead < 0, "NBLog Reader %s failed to catch up with Writer", mName.c_str());
+        return std::make_unique<Snapshot>();
     }
 
     std::unique_ptr<Snapshot> snapshot(new Snapshot(availToRead));
     memcpy(snapshot->mData, (const char *) mFifo->buffer() + iovec[0].mOffset, iovec[0].mLength);
     if (iovec[1].mLength > 0) {
         memcpy(snapshot->mData + (iovec[0].mLength),
-            (const char *) mFifo->buffer() + iovec[1].mOffset, iovec[1].mLength);
+                (const char *) mFifo->buffer() + iovec[1].mOffset, iovec[1].mLength);
     }
 
     // Handle corrupted buffer
@@ -800,22 +845,16 @@
 
     snapshot->mLost = lost;
     return snapshot;
-
 }
 
 // Takes raw content of the local merger FIFO, processes log entries, and
 // writes the data to a map of class PerformanceAnalysis, based on their thread ID.
-void NBLog::MergeReader::getAndProcessSnapshot(NBLog::Reader::Snapshot &snapshot)
+void NBLog::MergeReader::getAndProcessSnapshot(NBLog::Snapshot &snapshot, int author)
 {
-    String8 timestamp, body;
-
-    for (auto entry = snapshot.begin(); entry != snapshot.end();) {
-        switch (entry->type) {
-        case EVENT_START_FMT:
-            entry = handleFormat(FormatEntry(entry), &timestamp, &body);
-            break;
+    for (const entry &etr : snapshot) {
+        switch (etr.type) {
         case EVENT_HISTOGRAM_ENTRY_TS: {
-            HistTsEntryWithAuthor *data = (HistTsEntryWithAuthor *) (entry->data);
+            HistTsEntry *data = (HistTsEntry *) (etr.data);
             // TODO This memcpies are here to avoid unaligned memory access crash.
             // There's probably a more efficient way to do it
             log_hash_t hash;
@@ -824,61 +863,84 @@
             memcpy(&ts, &data->ts, sizeof(ts));
             // TODO: hash for histogram ts and audio state need to match
             // and correspond to audio production source file location
-            mThreadPerformanceAnalysis[data->author][0 /*hash*/].logTsEntry(ts);
-            ++entry;
-            break;
-        }
+            mThreadPerformanceAnalysis[author][0 /*hash*/].logTsEntry(ts);
+        } break;
         case EVENT_AUDIO_STATE: {
-            HistTsEntryWithAuthor *data = (HistTsEntryWithAuthor *) (entry->data);
+            HistTsEntry *data = (HistTsEntry *) (etr.data);
             // TODO This memcpies are here to avoid unaligned memory access crash.
             // There's probably a more efficient way to do it
             log_hash_t hash;
             memcpy(&hash, &(data->hash), sizeof(hash));
-            // TODO: remove ts if unused
-            int64_t ts;
-            memcpy(&ts, &data->ts, sizeof(ts));
-            mThreadPerformanceAnalysis[data->author][0 /*hash*/].handleStateChange();
-            ++entry;
-            break;
-        }
+            mThreadPerformanceAnalysis[author][0 /*hash*/].handleStateChange();
+        } break;
         case EVENT_END_FMT:
-            body.appendFormat("warning: got to end format event");
-            ++entry;
-            break;
         case EVENT_RESERVED:
+        case EVENT_UPPER_BOUND:
+            ALOGW("warning: unexpected event %d", etr.type);
         default:
-            body.appendFormat("warning: unexpected event %d", entry->type);
-            ++entry;
             break;
         }
     }
-    // FIXME: decide whether to print the warnings here or elsewhere
-    if (!body.isEmpty()) {
-        dumpLine(timestamp, body);
-    }
 }
 
 void NBLog::MergeReader::getAndProcessSnapshot()
 {
-    // get a snapshot, process it
-    std::unique_ptr<Snapshot> snap = getSnapshot();
-    getAndProcessSnapshot(*snap);
+    // get a snapshot of each reader and process them
+    // TODO insert lock here
+    const size_t nLogs = mReaders.size();
+    std::vector<std::unique_ptr<Snapshot>> snapshots(nLogs);
+    for (size_t i = 0; i < nLogs; i++) {
+        snapshots[i] = mReaders[i]->getSnapshot();
+    }
+    // TODO unlock lock here
+    for (size_t i = 0; i < nLogs; i++) {
+        if (snapshots[i] != nullptr) {
+            getAndProcessSnapshot(*(snapshots[i]), i);
+        }
+    }
 }
 
-void NBLog::MergeReader::dump(int fd, int indent) {
+void NBLog::MergeReader::dump(int fd, int indent)
+{
     // TODO: add a mutex around media.log dump
     ReportPerformance::dump(fd, indent, mThreadPerformanceAnalysis);
 }
 
-// Writes a string to the console
-void NBLog::Reader::dumpLine(const String8 &timestamp, String8 &body)
+// TODO for future compatibility, would prefer to have a dump() go to string, and then go
+// to fd only when invoked through binder.
+void NBLog::DumpReader::dump(int fd, size_t indent)
 {
-    if (mFd >= 0) {
-        dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
-    } else {
-        ALOGI("%.*s%s %s", mIndent, "", timestamp.string(), body.string());
+    if (fd < 0) return;
+    std::unique_ptr<Snapshot> snapshot = getSnapshot();
+    if (snapshot == nullptr) {
+        return;
     }
-    body.clear();
+    String8 timestamp, body;
+
+    // TODO all logged types should have a printable format.
+    for (auto it = snapshot->begin(); it != snapshot->end(); ++it) {
+        switch (it->type) {
+        case EVENT_START_FMT:
+            it = handleFormat(FormatEntry(it), &timestamp, &body);
+            break;
+        case EVENT_MONOTONIC_CYCLE_TIME: {
+            uint32_t monotonicNs;
+            memcpy(&monotonicNs, it->data, sizeof(monotonicNs));
+            body.appendFormat("Thread cycle took %u ns", monotonicNs);
+        } break;
+        case EVENT_END_FMT:
+        case EVENT_RESERVED:
+        case EVENT_UPPER_BOUND:
+            body.appendFormat("warning: unexpected event %d", it->type);
+        default:
+            break;
+        }
+        if (!body.isEmpty()) {
+            dprintf(fd, "%.*s%s %s\n", (int)indent, "", timestamp.string(), body.string());
+            body.clear();
+        }
+        timestamp.clear();
+    }
 }
 
 bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
@@ -886,52 +948,69 @@
     return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
 }
 
-// ---------------------------------------------------------------------------
-
-void NBLog::appendTimestamp(String8 *body, const void *data) {
+void NBLog::DumpReader::appendTimestamp(String8 *body, const void *data)
+{
+    if (body == nullptr || data == nullptr) {
+        return;
+    }
     int64_t ts;
     memcpy(&ts, data, sizeof(ts));
     body->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
                     (int) ((ts / (1000 * 1000)) % 1000));
 }
 
-void NBLog::appendInt(String8 *body, const void *data) {
+void NBLog::DumpReader::appendInt(String8 *body, const void *data)
+{
+    if (body == nullptr || data == nullptr) {
+        return;
+    }
     int x = *((int*) data);
     body->appendFormat("<%d>", x);
 }
 
-void NBLog::appendFloat(String8 *body, const void *data) {
+void NBLog::DumpReader::appendFloat(String8 *body, const void *data)
+{
+    if (body == nullptr || data == nullptr) {
+        return;
+    }
     float f;
-    memcpy(&f, data, sizeof(float));
+    memcpy(&f, data, sizeof(f));
     body->appendFormat("<%f>", f);
 }
 
-void NBLog::appendPID(String8 *body, const void* data, size_t length) {
+void NBLog::DumpReader::appendPID(String8 *body, const void* data, size_t length)
+{
+    if (body == nullptr || data == nullptr) {
+        return;
+    }
     pid_t id = *((pid_t*) data);
     char * name = &((char*) data)[sizeof(pid_t)];
     body->appendFormat("<PID: %d, name: %.*s>", id, (int) (length - sizeof(pid_t)), name);
 }
 
-String8 NBLog::bufferDump(const uint8_t *buffer, size_t size)
+String8 NBLog::DumpReader::bufferDump(const uint8_t *buffer, size_t size)
 {
     String8 str;
+    if (buffer == nullptr) {
+        return str;
+    }
     str.append("[ ");
-    for(size_t i = 0; i < size; i++)
-    {
+    for(size_t i = 0; i < size; i++) {
         str.appendFormat("%d ", buffer[i]);
     }
     str.append("]");
     return str;
 }
 
-String8 NBLog::bufferDump(const EntryIterator &it)
+String8 NBLog::DumpReader::bufferDump(const EntryIterator &it)
 {
     return bufferDump(it, it->length + Entry::kOverhead);
 }
 
-NBLog::EntryIterator NBLog::Reader::handleFormat(const FormatEntry &fmtEntry,
-                                                         String8 *timestamp,
-                                                         String8 *body) {
+NBLog::EntryIterator NBLog::DumpReader::handleFormat(const FormatEntry &fmtEntry,
+                                                           String8 *timestamp,
+                                                           String8 *body)
+{
     // log timestamp
     int64_t ts = fmtEntry.timestamp();
     timestamp->clear();
@@ -947,7 +1026,7 @@
     handleAuthor(fmtEntry, body);
 
     // log string
-    NBLog::EntryIterator arg = fmtEntry.args();
+    EntryIterator arg = fmtEntry.args();
 
     const char* fmt = fmtEntry.formatString();
     size_t fmt_length = fmtEntry.formatStringLength();
@@ -1016,7 +1095,6 @@
         ++arg;
     }
     ALOGW_IF(arg->type != EVENT_END_FMT, "Expected end of format, got %d", arg->type);
-    ++arg;
     return arg;
 }
 
@@ -1026,13 +1104,14 @@
         new audio_utils_fifo(size, sizeof(uint8_t),
             mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
       mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL)
-      {}
+{
+}
 
-void NBLog::Merger::addReader(const NBLog::NamedReader &reader) {
-
+void NBLog::Merger::addReader(const sp<NBLog::Reader> &reader)
+{
     // FIXME This is called by binder thread in MediaLogService::registerWriter
-    //       but the access to shared variable mNamedReaders is not yet protected by a lock.
-    mNamedReaders.push_back(reader);
+    //       but the access to shared variable mReaders is not yet protected by a lock.
+    mReaders.push_back(reader);
 }
 
 // items placed in priority queue during merge
@@ -1044,25 +1123,23 @@
     MergeItem(int64_t ts, int index): ts(ts), index(index) {}
 };
 
-// operators needed for priority queue in merge
-// bool operator>(const int64_t &t1, const int64_t &t2) {
-//     return t1.tv_sec > t2.tv_sec || (t1.tv_sec == t2.tv_sec && t1.tv_nsec > t2.tv_nsec);
-// }
-
-bool operator>(const struct MergeItem &i1, const struct MergeItem &i2) {
+bool operator>(const struct MergeItem &i1, const struct MergeItem &i2)
+{
     return i1.ts > i2.ts || (i1.ts == i2.ts && i1.index > i2.index);
 }
 
 // Merge registered readers, sorted by timestamp, and write data to a single FIFO in local memory
-void NBLog::Merger::merge() {
-    // FIXME This is called by merge thread
-    //       but the access to shared variable mNamedReaders is not yet protected by a lock.
-    int nLogs = mNamedReaders.size();
-    std::vector<std::unique_ptr<NBLog::Reader::Snapshot>> snapshots(nLogs);
-    std::vector<NBLog::EntryIterator> offsets(nLogs);
+void NBLog::Merger::merge()
+{
+    if (true) return; // Merging is not necessary at the moment, so this is to disable it
+                      // and bypass compiler warnings about member variables not being used.
+    const int nLogs = mReaders.size();
+    std::vector<std::unique_ptr<Snapshot>> snapshots(nLogs);
+    std::vector<EntryIterator> offsets;
+    offsets.reserve(nLogs);
     for (int i = 0; i < nLogs; ++i) {
-        snapshots[i] = mNamedReaders[i].reader()->getSnapshot();
-        offsets[i] = snapshots[i]->begin();
+        snapshots[i] = mReaders[i]->getSnapshot();
+        offsets.push_back(snapshots[i]->begin());
     }
     // initialize offsets
     // TODO custom heap implementation could allow to update top, improving performance
@@ -1071,17 +1148,19 @@
     for (int i = 0; i < nLogs; ++i)
     {
         if (offsets[i] != snapshots[i]->end()) {
-            int64_t ts = AbstractEntry::buildEntry(offsets[i])->timestamp();
-            timestamps.emplace(ts, i);
+            std::unique_ptr<AbstractEntry> abstractEntry = AbstractEntry::buildEntry(offsets[i]);
+            if (abstractEntry == nullptr) {
+                continue;
+            }
+            timestamps.emplace(abstractEntry->timestamp(), i);
         }
     }
 
     while (!timestamps.empty()) {
-        // find minimum timestamp
-        int index = timestamps.top().index;
+        int index = timestamps.top().index;     // find minimum timestamp
         // copy it to the log, increasing offset
-        offsets[index] = AbstractEntry::buildEntry(offsets[index])->copyWithAuthor(mFifoWriter,
-                                                                                   index);
+        offsets[index] = AbstractEntry::buildEntry(offsets[index])->
+            copyWithAuthor(mFifoWriter, index);
         // update data structures
         timestamps.pop();
         if (offsets[index] != snapshots[index]->end()) {
@@ -1091,20 +1170,27 @@
     }
 }
 
-const std::vector<NBLog::NamedReader>& NBLog::Merger::getNamedReaders() const {
-    // FIXME This is returning a reference to a shared variable that needs a lock
-    return mNamedReaders;
+const std::vector<sp<NBLog::Reader>>& NBLog::Merger::getReaders() const
+{
+    //AutoMutex _l(mLock);
+    return mReaders;
 }
 
 // ---------------------------------------------------------------------------
 
 NBLog::MergeReader::MergeReader(const void *shared, size_t size, Merger &merger)
-    : Reader(shared, size), mNamedReaders(merger.getNamedReaders()) {}
+    : Reader(shared, size, "MergeReader"), mReaders(merger.getReaders())
+{
+}
 
-void NBLog::MergeReader::handleAuthor(const NBLog::AbstractEntry &entry, String8 *body) {
+void NBLog::MergeReader::handleAuthor(const NBLog::AbstractEntry &entry, String8 *body)
+{
     int author = entry.author();
+    if (author == -1) {
+        return;
+    }
     // FIXME Needs a lock
-    const char* name = mNamedReaders[author].name();
+    const char* name = mReaders[author]->name().c_str();
     body->appendFormat("%s: ", name);
 }
 
@@ -1113,23 +1199,27 @@
 NBLog::MergeThread::MergeThread(NBLog::Merger &merger, NBLog::MergeReader &mergeReader)
     : mMerger(merger),
       mMergeReader(mergeReader),
-      mTimeoutUs(0) {}
+      mTimeoutUs(0)
+{
+}
 
-NBLog::MergeThread::~MergeThread() {
+NBLog::MergeThread::~MergeThread()
+{
     // set exit flag, set timeout to 0 to force threadLoop to exit and wait for the thread to join
     requestExit();
     setTimeoutUs(0);
     join();
 }
 
-bool NBLog::MergeThread::threadLoop() {
+bool NBLog::MergeThread::threadLoop()
+{
     bool doMerge;
     {
         AutoMutex _l(mMutex);
         // If mTimeoutUs is negative, wait on the condition variable until it's positive.
-        // If it's positive, wait kThreadSleepPeriodUs and then merge
-        nsecs_t waitTime = mTimeoutUs > 0 ? kThreadSleepPeriodUs * 1000 : LLONG_MAX;
-        mCond.waitRelative(mMutex, waitTime);
+        // If it's positive, merge. The minimum period between waking the condition variable
+        // is handled in AudioFlinger::MediaLogNotifier::threadLoop().
+        mCond.wait(mMutex);
         doMerge = mTimeoutUs > 0;
         mTimeoutUs -= kThreadSleepPeriodUs;
     }
@@ -1144,11 +1234,13 @@
     return true;
 }
 
-void NBLog::MergeThread::wakeup() {
+void NBLog::MergeThread::wakeup()
+{
     setTimeoutUs(kThreadWakeupPeriodUs);
 }
 
-void NBLog::MergeThread::setTimeoutUs(int time) {
+void NBLog::MergeThread::setTimeoutUs(int time)
+{
     AutoMutex _l(mMutex);
     mTimeoutUs = time;
     mCond.signal();
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
index f09e93d..3418dc0 100644
--- a/media/libnblog/PerformanceAnalysis.cpp
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -32,6 +32,7 @@
 #include <sys/prctl.h>
 #include <time.h>
 #include <new>
+#include <audio_utils/LogPlot.h>
 #include <audio_utils/roundup.h>
 #include <media/nblog/NBLog.h>
 #include <media/nblog/PerformanceAnalysis.h>
@@ -208,27 +209,6 @@
     return isOutlier;
 }
 
-static int widthOf(int x) {
-    int width = 0;
-    if (x < 0) {
-        width++;
-        x = x == INT_MIN ? INT_MAX : -x;
-    }
-    // assert (x >= 0)
-    do {
-        ++width;
-        x /= 10;
-    } while (x > 0);
-    return width;
-}
-
-// computes the column width required for a specific histogram value
-inline int numberWidth(double number, int leftPadding) {
-    // Added values account for whitespaces needed around numbers, and for the
-    // dot and decimal digit not accounted for by widthOf
-    return std::max(std::max(widthOf(static_cast<int>(number)) + 3, 2), leftPadding + 1);
-}
-
 // rounds value to precision based on log-distance from mean
 __attribute__((no_sanitize("signed-integer-overflow")))
 inline double logRound(double x, double mean) {
@@ -254,7 +234,7 @@
 // of PerformanceAnalysis
 void PerformanceAnalysis::reportPerformance(String8 *body, int author, log_hash_t hash,
                                             int maxHeight) {
-    if (mHists.empty()) {
+    if (mHists.empty() || body == nullptr) {
         return;
     }
 
@@ -273,69 +253,16 @@
         }
     }
 
-    // underscores and spaces length corresponds to maximum width of histogram
-    static const int kLen = 200;
-    std::string underscores(kLen, '_');
-    std::string spaces(kLen, ' ');
-
-    auto it = buckets.begin();
-    double maxDelta = it->first;
-    int maxCount = it->second;
-    // Compute maximum values
-    while (++it != buckets.end()) {
-        if (it->first > maxDelta) {
-            maxDelta = it->first;
-        }
-        if (it->second > maxCount) {
-            maxCount = it->second;
-        }
-    }
-    int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
-    const int leftPadding = widthOf(1 << height);
-    const int bucketWidth = numberWidth(maxDelta, leftPadding);
-    int scalingFactor = 1;
-    // scale data if it exceeds maximum height
-    if (height > maxHeight) {
-        scalingFactor = (height + maxHeight) / maxHeight;
-        height /= scalingFactor;
-    }
-    body->appendFormat("\n%*s %3.2f %s", leftPadding + 11,
-            "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:");
-    body->appendFormat("\n%*s%d, %lld, %lld\n", leftPadding + 11,
+    static const int SIZE = 128;
+    char title[SIZE];
+    snprintf(title, sizeof(title), "\n%s %3.2f %s\n%s%d, %lld, %lld\n",
+            "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:",
             "Thread, hash, starting timestamp: ", author,
-            static_cast<long long int>(hash), static_cast<long long int>(startingTs));
-    // write histogram label line with bucket values
-    body->appendFormat("\n%s", " ");
-    body->appendFormat("%*s", leftPadding, " ");
-    for (auto const &x : buckets) {
-        const int colWidth = numberWidth(x.first, leftPadding);
-        body->appendFormat("%*d", colWidth, x.second);
-    }
-    // write histogram ascii art
-    body->appendFormat("\n%s", " ");
-    for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
-        const int value = 1 << row;
-        body->appendFormat("%.*s", leftPadding, spaces.c_str());
-        for (auto const &x : buckets) {
-            const int colWidth = numberWidth(x.first, leftPadding);
-            body->appendFormat("%.*s%s", colWidth - 1,
-                               spaces.c_str(), x.second < value ? " " : "|");
-        }
-        body->appendFormat("\n%s", " ");
-    }
-    // print x-axis
-    const int columns = static_cast<int>(buckets.size());
-    body->appendFormat("%*c", leftPadding, ' ');
-    body->appendFormat("%.*s", (columns + 1) * bucketWidth, underscores.c_str());
-    body->appendFormat("\n%s", " ");
+            static_cast<long long>(hash), static_cast<long long>(startingTs));
+    static const char * const kLabel = "ms";
 
-    // write footer with bucket labels
-    body->appendFormat("%*s", leftPadding, " ");
-    for (auto const &x : buckets) {
-        const int colWidth = numberWidth(x.first, leftPadding);
-        body->appendFormat("%*.*f", colWidth, 1, x.first);
-    }
-    body->appendFormat("%.*s%s", bucketWidth, spaces.c_str(), "ms\n");
+    body->appendFormat("%s",
+            audio_utils_plot_histogram(buckets, title, kLabel, maxHeight).c_str());
 
     // Now report glitches
     body->appendFormat("\ntime elapsed between glitches and glitch timestamps:\n");
diff --git a/media/libnblog/include/media/nblog/NBLog.h b/media/libnblog/include/media/nblog/NBLog.h
index fb6f179..c9bfaae 100644
--- a/media/libnblog/include/media/nblog/NBLog.h
+++ b/media/libnblog/include/media/nblog/NBLog.h
@@ -19,9 +19,8 @@
 #ifndef ANDROID_MEDIA_NBLOG_H
 #define ANDROID_MEDIA_NBLOG_H
 
-#include <deque>
 #include <map>
-#include <set>
+#include <unordered_set>
 #include <vector>
 
 #include <audio_utils/fifo.h>
@@ -64,23 +63,18 @@
         EVENT_AUDIO_STATE,          // audio on/off event: logged on FastMixer::onStateChange call
         EVENT_END_FMT,              // end of logFormat argument list
 
+        // Types representing audio performance metrics
+        EVENT_LATENCY,              // TODO classify specifically what this is
+        EVENT_CPU_FREQUENCY,        // instantaneous CPU frequency in kHz
+        EVENT_MONOTONIC_CYCLE_TIME, // thread per-cycle monotonic time
+        EVENT_CPU_CYCLE_TIME,       // thread per-cycle cpu time
+
         EVENT_UPPER_BOUND,          // to check for invalid events
     };
 
 private:
 
     // ---------------------------------------------------------------------------
-    // API for handling format entry operations
-
-    // a formatted entry has the following structure:
-    //    * START_FMT entry, containing the format string
-    //    * TIMESTAMP entry
-    //    * HASH entry
-    //    * author entry of the thread that generated it (optional, present in merged log)
-    //    * format arg1
-    //    * format arg2
-    //    * ...
-    //    * END_FMT entry
 
     // entry representation in memory
     struct entry {
@@ -98,7 +92,12 @@
     // entry iterator
     class EntryIterator {
     public:
+        // Used for dummy initialization. Performing operations on a default-constructed
+        // EntryIterator other than assigning it to another valid EntryIterator
+        // is undefined behavior.
         EntryIterator();
+        // Caller's responsibility to make sure entry is not nullptr.
+        // Passing in nullptr can result in undefined behavior.
         explicit EntryIterator(const uint8_t *entry);
         EntryIterator(const EntryIterator &other);
 
@@ -109,7 +108,9 @@
         EntryIterator&       operator++(); // ++i
         // back to previous entry
         EntryIterator&       operator--(); // --i
+        // returns an EntryIterator corresponding to the next entry
         EntryIterator        next() const;
+        // returns an EntryIterator corresponding to the previous entry
         EntryIterator        prev() const;
         bool            operator!=(const EntryIterator &other) const;
         int             operator-(const EntryIterator &other) const;
@@ -120,25 +121,25 @@
 
         template<typename T>
         inline const T& payload() {
-            return *reinterpret_cast<const T *>(ptr + offsetof(entry, data));
+            return *reinterpret_cast<const T *>(mPtr + offsetof(entry, data));
         }
 
         inline operator const uint8_t*() const {
-            return ptr;
+            return mPtr;
         }
 
     private:
-        const uint8_t  *ptr;
+        const uint8_t  *mPtr;   // Should not be nullptr except for dummy initialization
     };
 
+    // ---------------------------------------------------------------------------
+    // The following classes are used for merging into the Merger's buffer.
+
     class AbstractEntry {
     public:
-
-        // Entry starting in the given pointer
-        explicit AbstractEntry(const uint8_t *entry);
         virtual ~AbstractEntry() {}
 
-        // build concrete entry of appropriate class from pointer
+        // build concrete entry of appropriate class from ptr.
         static std::unique_ptr<AbstractEntry> buildEntry(const uint8_t *ptr);
 
         // get format entry timestamp
@@ -158,11 +159,24 @@
                                                 int author) const = 0;
 
     protected:
+        // Entry starting in the given pointer, which shall not be nullptr.
+        explicit AbstractEntry(const uint8_t *entry);
         // copies ordinary entry from src to dst, and returns length of entry
         // size_t      copyEntry(audio_utils_fifo_writer *dst, const iterator &it);
         const uint8_t  *mEntry;
     };
 
+    // API for handling format entry operations
+
+    // a formatted entry has the following structure:
+    //    * START_FMT entry, containing the format string
+    //    * TIMESTAMP entry
+    //    * HASH entry
+    //    * author entry of the thread that generated it (optional, present in merged log)
+    //    * format arg1
+    //    * format arg2
+    //    * ...
+    //    * END_FMT entry
     class FormatEntry : public AbstractEntry {
     public:
         // explicit FormatEntry(const EntryIterator &it);
@@ -194,7 +208,6 @@
         // copy entry, adding author before timestamp, returns size of original entry
         virtual EntryIterator    copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
                                                 int author) const override;
-
     };
 
     class HistogramEntry : public AbstractEntry {
@@ -211,13 +224,22 @@
 
         virtual EntryIterator    copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
                                                 int author) const override;
-
     };
 
     // ---------------------------------------------------------------------------
 
-    // representation of a single log entry in private memory
-    struct Entry {
+    // representation of a single log entry in shared memory
+    //  byte[0]             mEvent
+    //  byte[1]             mLength
+    //  byte[2]             mData[0]
+    //  ...
+    //  byte[2+i]           mData[i]
+    //  ...
+    //  byte[2+mLength-1]   mData[mLength-1]
+    //  byte[2+mLength]     duplicate copy of mLength to permit reverse scan
+    //  byte[3+mLength]     start of next log entry
+    class Entry {
+    public:
         Entry(Event event, const void *data, size_t length)
             : mEvent(event), mLength(length), mData(data) { }
         /*virtual*/ ~Entry() { }
@@ -256,24 +278,6 @@
         int value;
     }; //TODO __attribute__((packed));
 
-    // representation of a single log entry in shared memory
-    //  byte[0]             mEvent
-    //  byte[1]             mLength
-    //  byte[2]             mData[0]
-    //  ...
-    //  byte[2+i]           mData[i]
-    //  ...
-    //  byte[2+mLength-1]   mData[mLength-1]
-    //  byte[2+mLength]     duplicate copy of mLength to permit reverse scan
-    //  byte[3+mLength]     start of next log entry
-
-    static void    appendInt(String8 *body, const void *data);
-    static void    appendFloat(String8 *body, const void *data);
-    static void    appendPID(String8 *body, const void *data, size_t length);
-    static void    appendTimestamp(String8 *body, const void *data);
-    static size_t  fmtEntryLength(const uint8_t *data);
-    static String8 bufferDump(const uint8_t *buffer, size_t size);
-    static String8 bufferDump(const EntryIterator &it);
 public:
 
     // Located in shared memory, must be POD.
@@ -339,12 +343,15 @@
         virtual void    logInteger(const int x);
         virtual void    logFloat(const float x);
         virtual void    logPID();
-        virtual void    logFormat(const char *fmt, log_hash_t hash, ...);
-        virtual void    logVFormat(const char *fmt, log_hash_t hash, va_list ap);
         virtual void    logStart(const char *fmt);
         virtual void    logEnd();
         virtual void    logHash(log_hash_t hash);
+        // The functions below are not in LockedWriter yet.
+        virtual void    logFormat(const char *fmt, log_hash_t hash, ...);
+        virtual void    logVFormat(const char *fmt, log_hash_t hash, va_list ap);
         virtual void    logEventHistTs(Event event, log_hash_t hash);
+        virtual void    logMonotonicCycleTime(uint32_t monotonicNs);
+        // End of functions that are not in LockedWriter yet.
 
         virtual bool    isEnabled() const;
 
@@ -360,7 +367,7 @@
         // writes a single Entry to the FIFO
         void    log(Event event, const void *data, size_t length);
         // checks validity of an event before calling log above this one
-        void    log(const Entry *entry, bool trusted = false);
+        void    log(const Entry &entry, bool trusted = false);
 
         Shared* const   mShared;    // raw pointer to shared memory
         sp<IMemory>     mIMemory;   // ref-counted version, initialized in constructor
@@ -406,99 +413,86 @@
 
     // ---------------------------------------------------------------------------
 
+    // A snapshot of a readers buffer
+    // This is raw data. No analysis has been done on it
+    class Snapshot {
+    public:
+        Snapshot() = default;
+
+        explicit Snapshot(size_t bufferSize) : mData(new uint8_t[bufferSize]) {}
+
+        ~Snapshot() { delete[] mData; }
+
+        // amount of data lost (given by audio_utils_fifo_reader)
+        size_t   lost() const { return mLost; }
+
+        // iterator to beginning of readable segment of snapshot
+        // data between begin and end has valid entries
+        EntryIterator begin() const { return mBegin; }
+
+        // iterator to end of readable segment of snapshot
+        EntryIterator end() const { return mEnd; }
+
+    private:
+        friend class Reader;
+        uint8_t * const       mData{};
+        size_t                mLost{0};
+        EntryIterator mBegin;
+        EntryIterator mEnd;
+    };
+
     class Reader : public RefBase {
     public:
-        // A snapshot of a readers buffer
-        // This is raw data. No analysis has been done on it
-        class Snapshot {
-        public:
-            Snapshot() : mData(NULL), mLost(0) {}
-
-            Snapshot(size_t bufferSize) : mData(new uint8_t[bufferSize]) {}
-
-            ~Snapshot() { delete[] mData; }
-
-            // copy of the buffer
-            uint8_t *data() const { return mData; }
-
-            // amount of data lost (given by audio_utils_fifo_reader)
-            size_t   lost() const { return mLost; }
-
-            // iterator to beginning of readable segment of snapshot
-            // data between begin and end has valid entries
-            EntryIterator begin() { return mBegin; }
-
-            // iterator to end of readable segment of snapshot
-            EntryIterator end() { return mEnd; }
-
-        private:
-            friend class MergeReader;
-            friend class Reader;
-            uint8_t              *mData;
-            size_t                mLost;
-            EntryIterator mBegin;
-            EntryIterator mEnd;
-        };
-
         // Input parameter 'size' is the desired size of the timeline in byte units.
         // The size of the shared memory must be at least Timeline::sharedSize(size).
-        Reader(const void *shared, size_t size);
-        Reader(const sp<IMemory>& iMemory, size_t size);
-
+        Reader(const void *shared, size_t size, const std::string &name);
+        Reader(const sp<IMemory>& iMemory, size_t size, const std::string &name);
         virtual ~Reader();
 
         // get snapshot of readers fifo buffer, effectively consuming the buffer
         std::unique_ptr<Snapshot> getSnapshot();
-
         bool     isIMemory(const sp<IMemory>& iMemory) const;
-
-    protected:
-        // print a summary of the performance to the console
-        void    dumpLine(const String8& timestamp, String8& body);
-        EntryIterator   handleFormat(const FormatEntry &fmtEntry,
-                                     String8 *timestamp,
-                                     String8 *body);
-        int mFd;                // file descriptor
-        int mIndent;            // indentation level
-        int mLost;              // bytes of data lost before buffer was read
+        const std::string &name() const { return mName; }
 
     private:
-        static const std::set<Event> startingTypes;
-        static const std::set<Event> endingTypes;
-
+        static constexpr int kMaxObtainTries = 3;
+        // startingTypes and endingTypes are used to check for log corruption.
+        static const std::unordered_set<Event> startingTypes;
+        static const std::unordered_set<Event> endingTypes;
         // declared as const because audio_utils_fifo() constructor
         sp<IMemory> mIMemory;       // ref-counted version, assigned only in constructor
 
+        const std::string mName;            // name of reader (actually name of writer)
         /*const*/ Shared* const mShared;    // raw pointer to shared memory, actually const but not
         audio_utils_fifo * const mFifo;                 // FIFO itself,
-        // non-NULL unless constructor fails
+                                                        // non-NULL unless constructor fails
         audio_utils_fifo_reader * const mFifoReader;    // used to read from FIFO,
-        // non-NULL unless constructor fails
+                                                        // non-NULL unless constructor fails
 
         // Searches for the last entry of type <type> in the range [front, back)
         // back has to be entry-aligned. Returns nullptr if none enconuntered.
         static const uint8_t *findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
-                                                   const std::set<Event> &types);
-
-        // dummy method for handling absent author entry
-        virtual void handleAuthor(const AbstractEntry& /*fmtEntry*/, String8* /*body*/) {}
+                                                   const std::unordered_set<Event> &types);
     };
 
-    // Wrapper for a reader with a name. Contains a pointer to the reader and a pointer to the name
-    class NamedReader {
+    class DumpReader : public Reader {
     public:
-        NamedReader() { mName[0] = '\0'; } // for Vector
-        NamedReader(const sp<NBLog::Reader>& reader, const char *name) :
-            mReader(reader)
-            { strlcpy(mName, name, sizeof(mName)); }
-        ~NamedReader() { }
-        const sp<NBLog::Reader>&  reader() const { return mReader; }
-        const char*               name() const { return mName; }
-
+        DumpReader(const void *shared, size_t size, const std::string &name)
+            : Reader(shared, size, name) {}
+        DumpReader(const sp<IMemory>& iMemory, size_t size, const std::string &name)
+            : Reader(iMemory, size, name) {}
+        void dump(int fd, size_t indent = 0);
     private:
-        sp<NBLog::Reader>   mReader;
-        static const size_t kMaxName = 32;
-        char                mName[kMaxName];
+        void handleAuthor(const AbstractEntry& fmtEntry __unused, String8* body __unused) {}
+        EntryIterator handleFormat(const FormatEntry &fmtEntry, String8 *timestamp, String8 *body);
+
+        static void    appendInt(String8 *body, const void *data);
+        static void    appendFloat(String8 *body, const void *data);
+        static void    appendPID(String8 *body, const void *data, size_t length);
+        static void    appendTimestamp(String8 *body, const void *data);
+        //static size_t  fmtEntryLength(const uint8_t *data);   // TODO Eric remove if not used
+        static String8 bufferDump(const uint8_t *buffer, size_t size);
+        static String8 bufferDump(const EntryIterator &it);
     };
 
     // ---------------------------------------------------------------------------
@@ -511,18 +505,18 @@
 
         virtual ~Merger() {}
 
-        void addReader(const NamedReader &reader);
+        void addReader(const sp<NBLog::Reader> &reader);
         // TODO add removeReader
         void merge();
 
         // FIXME This is returning a reference to a shared variable that needs a lock
-        const std::vector<NamedReader>& getNamedReaders() const;
+        const std::vector<sp<Reader>>& getReaders() const;
 
     private:
         // vector of the readers the merger is supposed to merge from.
         // every reader reads from a writer's buffer
         // FIXME Needs to be protected by a lock
-        std::vector<NamedReader> mNamedReaders;
+        std::vector<sp<Reader>> mReaders;
 
         Shared * const mShared; // raw pointer to shared memory
         std::unique_ptr<audio_utils_fifo> mFifo; // FIFO itself
@@ -530,7 +524,7 @@
     };
 
     // This class has a pointer to the FIFO in local memory which stores the merged
-    // data collected by NBLog::Merger from all NamedReaders. It is used to process
+    // data collected by NBLog::Merger from all Readers. It is used to process
     // this data and write the result to PerformanceAnalysis.
     class MergeReader : public Reader {
     public:
@@ -538,14 +532,15 @@
 
         void dump(int fd, int indent = 0);
         // process a particular snapshot of the reader
-        void getAndProcessSnapshot(Snapshot & snap);
+        void getAndProcessSnapshot(Snapshot &snap, int author);
         // call getSnapshot of the content of the reader's buffer and process the data
         void getAndProcessSnapshot();
 
     private:
         // FIXME Needs to be protected by a lock,
         //       because even though our use of it is read-only there may be asynchronous updates
-        const std::vector<NamedReader>& mNamedReaders;
+        // The object is owned by the Merger class.
+        const std::vector<sp<Reader>>& mReaders;
 
         // analyzes, compresses and stores the merged data
         // contains a separate instance for every author (thread), and for every source file
diff --git a/media/libnblog/include/media/nblog/PerformanceAnalysis.h b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
index ddfe9d6..56e0ea6 100644
--- a/media/libnblog/include/media/nblog/PerformanceAnalysis.h
+++ b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
@@ -25,6 +25,8 @@
 
 namespace android {
 
+class String8;
+
 namespace ReportPerformance {
 
 class PerformanceAnalysis;
diff --git a/media/libnblog/include/media/nblog/ReportPerformance.h b/media/libnblog/include/media/nblog/ReportPerformance.h
index ec0842f..1b11197 100644
--- a/media/libnblog/include/media/nblog/ReportPerformance.h
+++ b/media/libnblog/include/media/nblog/ReportPerformance.h
@@ -23,9 +23,6 @@
 
 namespace android {
 
-// The String8 class is used by reportPerformance function
-class String8;
-
 namespace ReportPerformance {
 
 constexpr int kMsPerSec = 1000;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 7f39d10..3526047 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5556,6 +5556,11 @@
             break;
         }
 
+        case kWhatCheckIfStuck: {
+            ALOGV("No-op by default");
+            break;
+        }
+
         default:
             return false;
     }
@@ -7873,6 +7878,18 @@
             break;
         }
 
+        case kWhatCheckIfStuck:
+        {
+            int32_t generation = 0;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation == mCodec->mStateGeneration) {
+                mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+            }
+
+            handled = true;
+            break;
+        }
+
         default:
             handled = BaseState::onMessageReceived(msg);
             break;
@@ -7884,6 +7901,11 @@
 void ACodec::OutputPortSettingsChangedState::stateEntered() {
     ALOGV("[%s] Now handling output port settings change",
          mCodec->mComponentName.c_str());
+
+    // If we haven't transitioned after 3 seconds, we're probably stuck.
+    sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+    msg->setInt32("generation", mCodec->mStateGeneration);
+    msg->post(3000000);
 }
 
 bool ACodec::OutputPortSettingsChangedState::onOMXFrameRendered(
@@ -8146,6 +8168,11 @@
     ALOGV("[%s] Now Flushing", mCodec->mComponentName.c_str());
 
     mFlushComplete[kPortIndexInput] = mFlushComplete[kPortIndexOutput] = false;
+
+    // If we haven't transitioned after 3 seconds, we're probably stuck.
+    sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+    msg->setInt32("generation", mCodec->mStateGeneration);
+    msg->post(3000000);
 }
 
 bool ACodec::FlushingState::onMessageReceived(const sp<AMessage> &msg) {
@@ -8160,6 +8187,7 @@
                 msg->setInt32("generation", mCodec->mStateGeneration);
                 msg->post(3000000);
             }
+            handled = true;
             break;
         }
 
@@ -8180,6 +8208,18 @@
             break;
         }
 
+        case kWhatCheckIfStuck:
+        {
+            int32_t generation = 0;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation == mCodec->mStateGeneration) {
+                mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+            }
+
+            handled = true;
+            break;
+        }
+
         default:
             handled = BaseState::onMessageReceived(msg);
             break;
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 710ae68..266a240 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -129,6 +129,7 @@
         secureHandle = static_cast<native_handle_t *>(secureData->getDestinationPointer());
     }
     ssize_t result = -1;
+    ssize_t codecDataOffset = 0;
     if (mCrypto != NULL) {
         ICrypto::DestinationBuffer destination;
         if (secure) {
@@ -180,9 +181,16 @@
 
         Status status = Status::OK;
         hidl_string detailedError;
+        ScramblingControl sctrl = ScramblingControl::UNSCRAMBLED;
+
+        if (key != NULL) {
+            sctrl = (ScramblingControl)key[0];
+            // Adjust for the PES offset
+            codecDataOffset = key[2] | (key[3] << 8);
+        }
 
         auto returnVoid = mDescrambler->descramble(
-                key != NULL ? (ScramblingControl)key[0] : ScramblingControl::UNSCRAMBLED,
+                sctrl,
                 hidlSubSamples,
                 srcBuffer,
                 0,
@@ -202,6 +210,11 @@
             return UNKNOWN_ERROR;
         }
 
+        if (result < codecDataOffset) {
+            ALOGD("invalid codec data offset: %zd, result %zd", codecDataOffset, result);
+            return BAD_VALUE;
+        }
+
         ALOGV("descramble succeeded, %zd bytes", result);
 
         if (dstBuffer.type == BufferType::SHARED_MEMORY) {
@@ -210,7 +223,7 @@
         }
     }
 
-    it->mCodecBuffer->setRange(0, result);
+    it->mCodecBuffer->setRange(codecDataOffset, result - codecDataOffset);
 
     // Copy metadata from client to codec buffer.
     it->mCodecBuffer->meta()->clear();
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 48e351b..fec40b6 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -105,6 +105,7 @@
         "DataConverter.cpp",
         "DataSourceFactory.cpp",
         "DataURISource.cpp",
+        "ClearFileSource.cpp",
         "FileSource.cpp",
         "FrameDecoder.cpp",
         "HTTPBase.cpp",
@@ -119,8 +120,10 @@
         "MediaCodecList.cpp",
         "MediaCodecListOverrides.cpp",
         "MediaCodecSource.cpp",
+        "MediaExtractor.cpp",
         "MediaExtractorFactory.cpp",
         "MediaSync.cpp",
+        "http/ClearMediaHTTP.cpp",
         "http/MediaHTTP.cpp",
         "MediaMuxer.cpp",
         "NuCachedSource2.cpp",
@@ -232,13 +235,14 @@
     srcs: [
         "CallbackDataSource.cpp",
         "CallbackMediaSource.cpp",
-        "DataSourceFactory.cpp",
+        "ClearDataSourceFactory.cpp",
+        "ClearFileSource.cpp",
         "DataURISource.cpp",
-        "FileSource.cpp",
         "HTTPBase.cpp",
         "HevcUtils.cpp",
         "InterfaceUtils.cpp",
         "MediaClock.cpp",
+        "MediaExtractor.cpp",
         "MediaExtractorFactory.cpp",
         "NdkUtils.cpp",
         "NuCachedSource2.cpp",
@@ -246,13 +250,12 @@
         "RemoteMediaSource.cpp",
         "Utils.cpp",
         "VideoFrameScheduler.cpp",
-        "http/MediaHTTP.cpp",
+        "http/ClearMediaHTTP.cpp",
     ],
 
     shared_libs: [
         "libbinder",
         "libcutils",
-        "libdrmframework",
         "libgui",
         "liblog",
         "libmedia_player2_util",
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index db37021..41f5db0 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -217,6 +217,7 @@
       mNumFramesReceived(0),
       mLastFrameTimestampUs(0),
       mStarted(false),
+      mEos(false),
       mNumFramesEncoded(0),
       mTimeBetweenFrameCaptureUs(0),
       mFirstFrameTimeUs(0),
@@ -880,6 +881,7 @@
     {
         Mutex::Autolock autoLock(mLock);
         mStarted = false;
+        mEos = false;
         mStopSystemTimeUs = -1;
         mFrameAvailableCondition.signal();
 
@@ -1075,7 +1077,7 @@
 
     {
         Mutex::Autolock autoLock(mLock);
-        while (mStarted && mFramesReceived.empty()) {
+        while (mStarted && !mEos && mFramesReceived.empty()) {
             if (NO_ERROR !=
                 mFrameAvailableCondition.waitRelative(mLock,
                     mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
@@ -1091,6 +1093,9 @@
         if (!mStarted) {
             return OK;
         }
+        if (mFramesReceived.empty()) {
+            return ERROR_END_OF_STREAM;
+        }
         frame = *mFramesReceived.begin();
         mFramesReceived.erase(mFramesReceived.begin());
 
@@ -1129,6 +1134,8 @@
     if (mStopSystemTimeUs != -1 && timestampUs >= mStopSystemTimeUs) {
         ALOGV("Drop Camera frame at %lld  stop time: %lld us",
                 (long long)timestampUs, (long long)mStopSystemTimeUs);
+        mEos = true;
+        mFrameAvailableCondition.signal();
         return true;
     }
 
diff --git a/media/libstagefright/ClearDataSourceFactory.cpp b/media/libstagefright/ClearDataSourceFactory.cpp
new file mode 100644
index 0000000..5d23fda
--- /dev/null
+++ b/media/libstagefright/ClearDataSourceFactory.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearDataSourceFactory"
+
+#include "include/HTTPBase.h"
+#include "include/NuCachedSource2.h"
+
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+#include <media/stagefright/ClearFileSource.h>
+#include <media/stagefright/ClearMediaHTTP.h>
+#include <media/stagefright/ClearDataSourceFactory.h>
+#include <media/stagefright/DataURISource.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// static
+sp<DataSource> ClearDataSourceFactory::CreateFromURI(
+        const sp<MediaHTTPService> &httpService,
+        const char *uri,
+        const KeyedVector<String8, String8> *headers,
+        String8 *contentType,
+        HTTPBase *httpSource) {
+    if (contentType != NULL) {
+        *contentType = "";
+    }
+
+    sp<DataSource> source;
+    if (!strncasecmp("file://", uri, 7)) {
+        source = new ClearFileSource(uri + 7);
+    } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
+        if (httpService == NULL) {
+            ALOGE("Invalid http service!");
+            return NULL;
+        }
+
+        if (httpSource == NULL) {
+            sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
+            if (conn == NULL) {
+                ALOGE("Failed to make http connection from http service!");
+                return NULL;
+            }
+            httpSource = new ClearMediaHTTP(conn);
+        }
+
+        String8 cacheConfig;
+        bool disconnectAtHighwatermark = false;
+        KeyedVector<String8, String8> nonCacheSpecificHeaders;
+        if (headers != NULL) {
+            nonCacheSpecificHeaders = *headers;
+            NuCachedSource2::RemoveCacheSpecificHeaders(
+                    &nonCacheSpecificHeaders,
+                    &cacheConfig,
+                    &disconnectAtHighwatermark);
+        }
+
+        if (httpSource->connect(uri, &nonCacheSpecificHeaders) != OK) {
+            ALOGE("Failed to connect http source!");
+            return NULL;
+        }
+
+        if (contentType != NULL) {
+            *contentType = httpSource->getMIMEType();
+        }
+
+        source = NuCachedSource2::Create(
+                httpSource,
+                cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+                disconnectAtHighwatermark);
+    } else if (!strncasecmp("data:", uri, 5)) {
+        source = DataURISource::Create(uri);
+    } else {
+        // Assume it's a filename.
+        source = new ClearFileSource(uri);
+    }
+
+    if (source == NULL || source->initCheck() != OK) {
+        return NULL;
+    }
+
+    return source;
+}
+
+sp<DataSource> ClearDataSourceFactory::CreateFromFd(int fd, int64_t offset, int64_t length) {
+    sp<ClearFileSource> source = new ClearFileSource(fd, offset, length);
+    return source->initCheck() != OK ? nullptr : source;
+}
+
+sp<DataSource> ClearDataSourceFactory::CreateMediaHTTP(const sp<MediaHTTPService> &httpService) {
+    if (httpService == NULL) {
+        return NULL;
+    }
+
+    sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
+    if (conn == NULL) {
+        return NULL;
+    } else {
+        return new ClearMediaHTTP(conn);
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/ClearFileSource.cpp b/media/libstagefright/ClearFileSource.cpp
new file mode 100644
index 0000000..e3a2cb7
--- /dev/null
+++ b/media/libstagefright/ClearFileSource.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearFileSource"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ClearFileSource.h>
+#include <media/stagefright/Utils.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace android {
+
+ClearFileSource::ClearFileSource(const char *filename)
+    : mFd(-1),
+      mOffset(0),
+      mLength(-1),
+      mName("<null>") {
+
+    if (filename) {
+        mName = String8::format("FileSource(%s)", filename);
+    }
+    ALOGV("%s", filename);
+    mFd = open(filename, O_LARGEFILE | O_RDONLY);
+
+    if (mFd >= 0) {
+        mLength = lseek64(mFd, 0, SEEK_END);
+    } else {
+        ALOGE("Failed to open file '%s'. (%s)", filename, strerror(errno));
+    }
+}
+
+ClearFileSource::ClearFileSource(int fd, int64_t offset, int64_t length)
+    : mFd(fd),
+      mOffset(offset),
+      mLength(length),
+      mName("<null>") {
+    ALOGV("fd=%d (%s), offset=%lld, length=%lld",
+            fd, nameForFd(fd).c_str(), (long long) offset, (long long) length);
+
+    if (mOffset < 0) {
+        mOffset = 0;
+    }
+    if (mLength < 0) {
+        mLength = 0;
+    }
+    if (mLength > INT64_MAX - mOffset) {
+        mLength = INT64_MAX - mOffset;
+    }
+    struct stat s;
+    if (fstat(fd, &s) == 0) {
+        if (mOffset > s.st_size) {
+            mOffset = s.st_size;
+            mLength = 0;
+        }
+        if (mOffset + mLength > s.st_size) {
+            mLength = s.st_size - mOffset;
+        }
+    }
+    if (mOffset != offset || mLength != length) {
+        ALOGW("offset/length adjusted from %lld/%lld to %lld/%lld",
+                (long long) offset, (long long) length,
+                (long long) mOffset, (long long) mLength);
+    }
+
+    mName = String8::format(
+            "FileSource(fd(%s), %lld, %lld)",
+            nameForFd(fd).c_str(),
+            (long long) mOffset,
+            (long long) mLength);
+
+}
+
+ClearFileSource::~ClearFileSource() {
+    if (mFd >= 0) {
+        ::close(mFd);
+        mFd = -1;
+    }
+}
+
+status_t ClearFileSource::initCheck() const {
+    return mFd >= 0 ? OK : NO_INIT;
+}
+
+ssize_t ClearFileSource::readAt(off64_t offset, void *data, size_t size) {
+    if (mFd < 0) {
+        return NO_INIT;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    if (mLength >= 0) {
+        if (offset >= mLength) {
+            return 0;  // read beyond EOF.
+        }
+        uint64_t numAvailable = mLength - offset;
+        if ((uint64_t)size > numAvailable) {
+            size = numAvailable;
+        }
+    }
+    return readAt_l(offset, data, size);
+}
+
+ssize_t ClearFileSource::readAt_l(off64_t offset, void *data, size_t size) {
+    off64_t result = lseek64(mFd, offset + mOffset, SEEK_SET);
+    if (result == -1) {
+        ALOGE("seek to %lld failed", (long long)(offset + mOffset));
+        return UNKNOWN_ERROR;
+    }
+
+    return ::read(mFd, data, size);
+}
+
+status_t ClearFileSource::getSize(off64_t *size) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mFd < 0) {
+        return NO_INIT;
+    }
+
+    *size = mLength;
+
+    return OK;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index eef5314..aee7fd8 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -22,90 +22,28 @@
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/Utils.h>
 #include <private/android_filesystem_config.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
 
 namespace android {
 
 FileSource::FileSource(const char *filename)
-    : mFd(-1),
-      mOffset(0),
-      mLength(-1),
-      mName("<null>"),
+    : ClearFileSource(filename),
       mDecryptHandle(NULL),
       mDrmManagerClient(NULL),
       mDrmBufOffset(0),
       mDrmBufSize(0),
       mDrmBuf(NULL){
-
-    if (filename) {
-        mName = String8::format("FileSource(%s)", filename);
-    }
-    ALOGV("%s", filename);
-    mFd = open(filename, O_LARGEFILE | O_RDONLY);
-
-    if (mFd >= 0) {
-        mLength = lseek64(mFd, 0, SEEK_END);
-    } else {
-        ALOGE("Failed to open file '%s'. (%s)", filename, strerror(errno));
-    }
 }
 
 FileSource::FileSource(int fd, int64_t offset, int64_t length)
-    : mFd(fd),
-      mOffset(offset),
-      mLength(length),
-      mName("<null>"),
+    : ClearFileSource(fd, offset, length),
       mDecryptHandle(NULL),
       mDrmManagerClient(NULL),
       mDrmBufOffset(0),
       mDrmBufSize(0),
       mDrmBuf(NULL) {
-    ALOGV("fd=%d (%s), offset=%lld, length=%lld",
-            fd, nameForFd(fd).c_str(), (long long) offset, (long long) length);
-
-    if (mOffset < 0) {
-        mOffset = 0;
-    }
-    if (mLength < 0) {
-        mLength = 0;
-    }
-    if (mLength > INT64_MAX - mOffset) {
-        mLength = INT64_MAX - mOffset;
-    }
-    struct stat s;
-    if (fstat(fd, &s) == 0) {
-        if (mOffset > s.st_size) {
-            mOffset = s.st_size;
-            mLength = 0;
-        }
-        if (mOffset + mLength > s.st_size) {
-            mLength = s.st_size - mOffset;
-        }
-    }
-    if (mOffset != offset || mLength != length) {
-        ALOGW("offset/length adjusted from %lld/%lld to %lld/%lld",
-                (long long) offset, (long long) length,
-                (long long) mOffset, (long long) mLength);
-    }
-
-    mName = String8::format(
-            "FileSource(fd(%s), %lld, %lld)",
-            nameForFd(fd).c_str(),
-            (long long) mOffset,
-            (long long) mLength);
-
 }
 
 FileSource::~FileSource() {
-    if (mFd >= 0) {
-        ::close(mFd);
-        mFd = -1;
-    }
-
     if (mDrmBuf != NULL) {
         delete[] mDrmBuf;
         mDrmBuf = NULL;
@@ -124,10 +62,6 @@
     }
 }
 
-status_t FileSource::initCheck() const {
-    return mFd >= 0 ? OK : NO_INIT;
-}
-
 ssize_t FileSource::readAt(off64_t offset, void *data, size_t size) {
     if (mFd < 0) {
         return NO_INIT;
@@ -147,30 +81,12 @@
 
     if (mDecryptHandle != NULL && DecryptApiType::CONTAINER_BASED
             == mDecryptHandle->decryptApiType) {
-        return readAtDRM(offset, data, size);
+        return readAtDRM_l(offset, data, size);
    } else {
-        off64_t result = lseek64(mFd, offset + mOffset, SEEK_SET);
-        if (result == -1) {
-            ALOGE("seek to %lld failed", (long long)(offset + mOffset));
-            return UNKNOWN_ERROR;
-        }
-
-        return ::read(mFd, data, size);
+        return readAt_l(offset, data, size);
     }
 }
 
-status_t FileSource::getSize(off64_t *size) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mFd < 0) {
-        return NO_INIT;
-    }
-
-    *size = mLength;
-
-    return OK;
-}
-
 sp<DecryptHandle> FileSource::DrmInitialization(const char *mime) {
     if (getuid() == AID_MEDIA_EX) return nullptr; // no DRM in media extractor
     if (mDrmManagerClient == NULL) {
@@ -194,7 +110,7 @@
     return mDecryptHandle;
 }
 
-ssize_t FileSource::readAtDRM(off64_t offset, void *data, size_t size) {
+ssize_t FileSource::readAtDRM_l(off64_t offset, void *data, size_t size) {
     size_t DRM_CACHE_SIZE = 1024;
     if (mDrmBuf == NULL) {
         mDrmBuf = new unsigned char[DRM_CACHE_SIZE];
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 3370df1..c0e65e8 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -528,6 +528,18 @@
 
     ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
 
+    uint32_t standard, range, transfer;
+    if (!outputFormat->findInt32("color-standard", (int32_t*)&standard)) {
+        standard = 0;
+    }
+    if (!outputFormat->findInt32("color-range", (int32_t*)&range)) {
+        range = 0;
+    }
+    if (!outputFormat->findInt32("color-transfer", (int32_t*)&transfer)) {
+        transfer = 0;
+    }
+    converter.setSrcColorSpace(standard, range, transfer);
+
     if (converter.isValid()) {
         converter.convert(
                 (const uint8_t *)videoFrameBuffer->data(),
@@ -699,6 +711,18 @@
 
     ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
 
+    uint32_t standard, range, transfer;
+    if (!outputFormat->findInt32("color-standard", (int32_t*)&standard)) {
+        standard = 0;
+    }
+    if (!outputFormat->findInt32("color-range", (int32_t*)&range)) {
+        range = 0;
+    }
+    if (!outputFormat->findInt32("color-transfer", (int32_t*)&transfer)) {
+        transfer = 0;
+    }
+    converter.setSrcColorSpace(standard, range, transfer);
+
     int32_t dstLeft, dstTop, dstRight, dstBottom;
     dstLeft = mTilesDecoded % mGridCols * width;
     dstTop = mTilesDecoded / mGridCols * height;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 72eff94..f91c543 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -860,7 +860,15 @@
 }
 
 //static
-sp<CodecBase> MediaCodec::GetCodecBase(const AString &name) {
+sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, const char *owner) {
+    if (owner) {
+        if (strncmp(owner, "default", 8) == 0) {
+            return new ACodec;
+        } else if (strncmp(owner, "codec2", 7) == 0) {
+            return CreateCCodec();
+        }
+    }
+
     if (name.startsWithIgnoreCase("c2.")) {
         return CreateCCodec();
     } else if (name.startsWithIgnoreCase("omx.")) {
@@ -884,11 +892,6 @@
     // we need to invest in an extra looper to free the main event
     // queue.
 
-    mCodec = GetCodecBase(name);
-    if (mCodec == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
     mCodecInfo.clear();
 
     bool secureCodec = false;
@@ -922,6 +925,11 @@
         return NAME_NOT_FOUND;
     }
 
+    mCodec = GetCodecBase(name, mCodecInfo->getOwnerName());
+    if (mCodec == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
     if (mIsVideo) {
         // video codec needs dedicated looper
         if (mCodecLooper == NULL) {
@@ -1935,7 +1943,9 @@
                         mAnalyticsItem->setCString(kCodecCodec, mComponentName.c_str());
                     }
 
-                    if (mComponentName.startsWith("OMX.google.")) {
+                    const char *owner = mCodecInfo->getOwnerName();
+                    if (mComponentName.startsWith("OMX.google.")
+                            && (owner == nullptr || strncmp(owner, "default", 8) == 0)) {
                         mFlags |= kFlagUsesSoftwareRenderer;
                     } else {
                         mFlags &= ~kFlagUsesSoftwareRenderer;
@@ -2664,7 +2674,7 @@
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mFlags & kFlagIsAsync) {
-                ALOGE("dequeueOutputBuffer can't be used in async mode");
+                ALOGE("dequeueInputBuffer can't be used in async mode");
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             }
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
new file mode 100644
index 0000000..5e1dc77
--- /dev/null
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaExtractor"
+#include <utils/Log.h>
+#include <pwd.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+MediaExtractor::MediaExtractor() {
+    if (!LOG_NDEBUG) {
+        uid_t uid = getuid();
+        struct passwd *pw = getpwuid(uid);
+        ALOGV("extractor created in uid: %d (%s)", getuid(), pw->pw_name);
+    }
+}
+
+MediaExtractor::~MediaExtractor() {}
+
+uint32_t MediaExtractor::flags() const {
+    return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_PAUSE | CAN_SEEK;
+}
+
+// --------------------------------------------------------------------------------
+MediaExtractorCUnwrapper::MediaExtractorCUnwrapper(CMediaExtractor *wrapper) {
+    this->wrapper = wrapper;
+}
+
+MediaExtractorCUnwrapper::~MediaExtractorCUnwrapper() {
+    wrapper->free(wrapper->data);
+    free(wrapper);
+}
+
+size_t MediaExtractorCUnwrapper::countTracks() {
+    return wrapper->countTracks(wrapper->data);
+}
+
+MediaTrack *MediaExtractorCUnwrapper::getTrack(size_t index) {
+    return wrapper->getTrack(wrapper->data, index);
+}
+
+status_t MediaExtractorCUnwrapper::getTrackMetaData(
+        MetaDataBase& meta, size_t index, uint32_t flags) {
+    return wrapper->getTrackMetaData(wrapper->data, meta, index, flags);
+}
+
+status_t MediaExtractorCUnwrapper::getMetaData(MetaDataBase& meta) {
+    return wrapper->getMetaData(wrapper->data, meta);
+}
+
+const char * MediaExtractorCUnwrapper::name() {
+    return wrapper->name(wrapper->data);
+}
+
+uint32_t MediaExtractorCUnwrapper::flags() const {
+    return wrapper->flags(wrapper->data);
+}
+
+status_t MediaExtractorCUnwrapper::setMediaCas(const uint8_t* casToken, size_t size) {
+    return wrapper->setMediaCas(wrapper->data, casToken, size);
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index 2d4bd39..72ddb71 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -21,9 +21,9 @@
 #include <binder/IServiceManager.h>
 #include <media/DataSource.h>
 #include <media/MediaAnalyticsItem.h>
-#include <media/MediaExtractor.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/InterfaceUtils.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/IMediaExtractor.h>
 #include <media/IMediaExtractorService.h>
@@ -74,25 +74,26 @@
     source->DrmInitialization(nullptr /* mime */);
 
     void *meta = nullptr;
-    MediaExtractor::CreatorFunc creator = NULL;
-    MediaExtractor::FreeMetaFunc freeMeta = nullptr;
+    CreatorFunc creator = NULL;
+    FreeMetaFunc freeMeta = nullptr;
     float confidence;
     sp<ExtractorPlugin> plugin;
-    creator = sniff(source.get(), &confidence, &meta, &freeMeta, plugin);
+    creator = sniff(source, &confidence, &meta, &freeMeta, plugin);
     if (!creator) {
         ALOGV("FAILED to autodetect media content.");
         return NULL;
     }
 
-    MediaExtractor *ret = creator(source.get(), meta);
+    CMediaExtractor *ret = creator(source->wrap(), meta);
     if (meta != nullptr && freeMeta != nullptr) {
         freeMeta(meta);
     }
 
+    MediaExtractor *ex = ret != nullptr ? new MediaExtractorCUnwrapper(ret) : nullptr;
     ALOGV("Created an extractor '%s' with confidence %.2f",
-         ret != nullptr ? ret->name() : "<null>", confidence);
+         ex != nullptr ? ex->name() : "<null>", confidence);
 
-    return CreateIMediaExtractorFromMediaExtractor(ret, source, plugin);
+    return CreateIMediaExtractorFromMediaExtractor(ex, source, plugin);
 }
 
 //static
@@ -103,14 +104,14 @@
 }
 
 struct ExtractorPlugin : public RefBase {
-    MediaExtractor::ExtractorDef def;
+    ExtractorDef def;
     void *libHandle;
     String8 libPath;
     String8 uuidString;
 
-    ExtractorPlugin(MediaExtractor::ExtractorDef definition, void *handle, String8 &path)
+    ExtractorPlugin(ExtractorDef definition, void *handle, String8 &path)
         : def(definition), libHandle(handle), libPath(path) {
-        for (size_t i = 0; i < sizeof MediaExtractor::ExtractorDef::extractor_uuid; i++) {
+        for (size_t i = 0; i < sizeof ExtractorDef::extractor_uuid; i++) {
             uuidString.appendFormat("%02x", def.extractor_uuid.b[i]);
         }
     }
@@ -125,11 +126,12 @@
 Mutex MediaExtractorFactory::gPluginMutex;
 std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
 bool MediaExtractorFactory::gPluginsRegistered = false;
+bool MediaExtractorFactory::gIgnoreVersion = false;
 
 // static
-MediaExtractor::CreatorFunc MediaExtractorFactory::sniff(
-        DataSourceBase *source, float *confidence, void **meta,
-        MediaExtractor::FreeMetaFunc *freeMeta, sp<ExtractorPlugin> &plugin) {
+CreatorFunc MediaExtractorFactory::sniff(
+        const sp<DataSource> &source, float *confidence, void **meta,
+        FreeMetaFunc *freeMeta, sp<ExtractorPlugin> &plugin) {
     *confidence = 0.0f;
     *meta = nullptr;
 
@@ -142,14 +144,15 @@
         plugins = gPlugins;
     }
 
-    MediaExtractor::CreatorFunc curCreator = NULL;
-    MediaExtractor::CreatorFunc bestCreator = NULL;
+    CreatorFunc curCreator = NULL;
+    CreatorFunc bestCreator = NULL;
     for (auto it = plugins->begin(); it != plugins->end(); ++it) {
         ALOGV("sniffing %s", (*it)->def.extractor_name);
         float newConfidence;
         void *newMeta = nullptr;
-        MediaExtractor::FreeMetaFunc newFreeMeta = nullptr;
-        if ((curCreator = (*it)->def.sniff(source, &newConfidence, &newMeta, &newFreeMeta))) {
+        FreeMetaFunc newFreeMeta = nullptr;
+        if ((curCreator = (*it)->def.sniff(
+                        source->wrap(), &newConfidence, &newMeta, &newFreeMeta))) {
             if (newConfidence > *confidence) {
                 *confidence = newConfidence;
                 if (*meta != nullptr && *freeMeta != nullptr) {
@@ -175,7 +178,7 @@
         std::list<sp<ExtractorPlugin>> &pluginList) {
     // sanity check check struct version, uuid, name
     if (plugin->def.def_version == 0
-            || plugin->def.def_version > MediaExtractor::EXTRACTORDEF_VERSION) {
+            || plugin->def.def_version > EXTRACTORDEF_VERSION) {
         ALOGE("don't understand extractor format %u, ignoring.", plugin->def.def_version);
         return;
     }
@@ -191,7 +194,7 @@
     for (auto it = pluginList.begin(); it != pluginList.end(); ++it) {
         if (memcmp(&((*it)->def.extractor_uuid), &plugin->def.extractor_uuid, 16) == 0) {
             // there's already an extractor with the same uuid
-            if ((*it)->def.extractor_version < plugin->def.extractor_version) {
+            if (gIgnoreVersion || (*it)->def.extractor_version < plugin->def.extractor_version) {
                 // this one is newer, replace the old one
                 ALOGW("replacing extractor '%s' version %u with version %u",
                         plugin->def.extractor_name,
@@ -236,8 +239,8 @@
                 // within the apk instead of system libraries already loaded.
                 void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
                 if (libHandle) {
-                    MediaExtractor::GetExtractorDef getDef =
-                        (MediaExtractor::GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
+                    GetExtractorDef getDef =
+                        (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
                     if (getDef) {
                         ALOGV("registering sniffer for %s", libPath.string());
                         RegisterExtractor(
@@ -271,8 +274,8 @@
             String8 libPath = String8(libDirPath) + "/" + libEntry->d_name;
             void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
             if (libHandle) {
-                MediaExtractor::GetExtractorDef getDef =
-                    (MediaExtractor::GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
+                GetExtractorDef getDef =
+                    (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
                 if (getDef) {
                     ALOGV("registering sniffer for %s", libPath.string());
                     RegisterExtractor(
@@ -306,6 +309,8 @@
         return;
     }
 
+    gIgnoreVersion = property_get_bool("debug.extractor.ignore_version", false);
+
     std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
 
     RegisterExtractorsInSystem("/system/lib"
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index 04f6ade..2475e7b 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -16,8 +16,10 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MetaDataUtils"
+#include <utils/Log.h>
 
 #include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaDataUtils.h>
@@ -25,6 +27,10 @@
 namespace android {
 
 bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size) {
+    if (data == nullptr || size == 0) {
+        return false;
+    }
+
     int32_t width;
     int32_t height;
     int32_t sarWidth;
@@ -46,6 +52,44 @@
     return true;
 }
 
+bool MakeAACCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size) {
+    if (data == nullptr || size < 7) {
+        return false;
+    }
+
+    ABitReader bits(data, size);
+
+    // adts_fixed_header
+
+    if (bits.getBits(12) != 0xfffu) {
+        ALOGE("Wrong atds_fixed_header");
+        return false;
+    }
+
+    bits.skipBits(4);  // ID, layer, protection_absent
+
+    unsigned profile = bits.getBits(2);
+    if (profile == 3u) {
+        ALOGE("profile should not be 3");
+        return false;
+    }
+    unsigned sampling_freq_index = bits.getBits(4);
+    bits.getBits(1);  // private_bit
+    unsigned channel_configuration = bits.getBits(3);
+    if (channel_configuration == 0u) {
+        ALOGE("channel_config should not be 0");
+        return false;
+    }
+
+    if (!MakeAACCodecSpecificData(
+            meta, profile, sampling_freq_index, channel_configuration)) {
+        return false;
+    }
+
+    meta.setInt32(kKeyIsADTS, true);
+    return true;
+}
+
 bool MakeAACCodecSpecificData(
         MetaDataBase &meta,
         unsigned profile, unsigned sampling_freq_index,
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 4a7d6ca..8e8c77c 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -24,7 +24,6 @@
 #include "include/NuCachedSource2.h"
 
 #include <media/DataSource.h>
-#include <media/MediaExtractor.h>
 #include <media/MediaSource.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -34,6 +33,7 @@
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index e80ec3b..f8dde79 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -32,6 +32,7 @@
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/stagefright/MetaData.h>
 #include <media/CharacterEncodingDetector.h>
diff --git a/media/libstagefright/StagefrightPluginLoader.cpp b/media/libstagefright/StagefrightPluginLoader.cpp
index 519e870..dd5903a 100644
--- a/media/libstagefright/StagefrightPluginLoader.cpp
+++ b/media/libstagefright/StagefrightPluginLoader.cpp
@@ -46,7 +46,7 @@
     }
     mCreateInputSurface = (CodecBase::CreateInputSurfaceFunc)dlsym(
             mLibHandle, "CreateInputSurface");
-    if (mCreateBuilder == nullptr) {
+    if (mCreateInputSurface == nullptr) {
         ALOGD("Failed to find symbol: CreateInputSurface (%s)", dlerror());
     }
 }
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index cf5e91e..ada37a6 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1577,6 +1577,8 @@
     { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
     { MEDIA_MIMETYPE_AUDIO_OPUS,        AUDIO_FORMAT_OPUS},
     { MEDIA_MIMETYPE_AUDIO_AC3,         AUDIO_FORMAT_AC3},
+    { MEDIA_MIMETYPE_AUDIO_EAC3,        AUDIO_FORMAT_E_AC3},
+    { MEDIA_MIMETYPE_AUDIO_AC4,         AUDIO_FORMAT_AC4},
     { MEDIA_MIMETYPE_AUDIO_FLAC,        AUDIO_FORMAT_FLAC},
     { 0, AUDIO_FORMAT_INVALID }
 };
@@ -1867,4 +1869,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 6819bba..9020fc1 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -475,7 +475,16 @@
                 nextVsyncTime += mVsyncPeriod;
                 if (vsyncsForLastFrame < ULONG_MAX)
                     ++vsyncsForLastFrame;
+            } else if (mTimeCorrection < -correctionLimit * 2
+                    || mTimeCorrection > correctionLimit * 2) {
+                ALOGW("correction beyond limit: %lld vs %lld (vsyncs for last frame: %zu, min: %zu)"
+                        " restarting. render=%lld",
+                        (long long)mTimeCorrection, (long long)correctionLimit,
+                        vsyncsForLastFrame, minVsyncsPerFrame, (long long)origRenderTime);
+                restart();
+                return origRenderTime;
             }
+
             ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
         }
         mLastVsyncTime = nextVsyncTime;
diff --git a/media/libstagefright/bqhelper/GraphicBufferSource.cpp b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
index dd03d38..6d93807 100644
--- a/media/libstagefright/bqhelper/GraphicBufferSource.cpp
+++ b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
@@ -767,7 +767,7 @@
             double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
             if (nFrames < 0.5 - kTimestampFluctuation) {
                 // skip this frame as it's too close to previous capture
-                ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
+                ALOGD("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
                 return false;
             }
             if (nFrames <= 1.0) {
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
index 75ca846..9c0fcfa 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
@@ -186,8 +186,8 @@
 #ifdef DEC_INTERNAL_MEMORY_OPT
                 video->vol[idx] = IMEM_vol[idx];
                 video->memoryUsage += sizeof(Vol);
-                oscl_memset(video->vol[idx], 0, sizeof(Vol));
                 if (video->vol[idx] == NULL) status = PV_FALSE;
+                else oscl_memset(video->vol[idx], 0, sizeof(Vol));
                 stream = IMEM_BitstreamDecVideo;
 #else
                 video->vol[idx] = (Vol *) oscl_malloc(sizeof(Vol));
@@ -213,6 +213,7 @@
                 else
                 {
                     int32 buffer_size;
+                    oscl_memset(stream, 0, sizeof(BitstreamDecVideo));
                     if ((buffer_size = BitstreamOpen(stream, idx)) < 0)
                     {
                         mp4dec_log("InitVideoDecoder(): Can't allocate bitstream buffer.\n");
@@ -339,27 +340,33 @@
 #ifdef DEC_INTERNAL_MEMORY_OPT
     video->currVop->yChan = IMEM_currVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/
     if (video->currVop->yChan == NULL) status = PV_FALSE;
-    video->currVop->uChan = video->currVop->yChan + size;
-    video->currVop->vChan = video->currVop->uChan + (size >> 2);
+    else {
+        video->currVop->uChan = video->currVop->yChan + size;
+        video->currVop->vChan = video->currVop->uChan + (size >> 2);
+    }
 
     video->prevVop->yChan = IMEM_prevVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/
     if (video->prevVop->yChan == NULL) status = PV_FALSE;
-    video->prevVop->uChan = video->prevVop->yChan + size;
-    video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+    else {
+        video->prevVop->uChan = video->prevVop->yChan + size;
+        video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+    }
 #else
     if (size > INT32_MAX / 3) {
         return PV_FALSE;
     }
     video->currVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
     if (video->currVop->yChan == NULL) status = PV_FALSE;
-
-    video->currVop->uChan = video->currVop->yChan + size;
-    video->currVop->vChan = video->currVop->uChan + (size >> 2);
+    else {
+        video->currVop->uChan = video->currVop->yChan + size;
+        video->currVop->vChan = video->currVop->uChan + (size >> 2);
+    }
     video->prevVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
     if (video->prevVop->yChan == NULL) status = PV_FALSE;
-
-    video->prevVop->uChan = video->prevVop->yChan + size;
-    video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+    else {
+        video->prevVop->uChan = video->prevVop->yChan + size;
+        video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+    }
 #endif
     video->memoryUsage += (size * 3);
 #endif   // MEMORY_POOL
@@ -383,8 +390,10 @@
 
             video->prevEnhcVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
             if (video->prevEnhcVop->yChan == NULL) status = PV_FALSE;
-            video->prevEnhcVop->uChan = video->prevEnhcVop->yChan + size;
-            video->prevEnhcVop->vChan = video->prevEnhcVop->uChan + (size >> 2);
+            else {
+                video->prevEnhcVop->uChan = video->prevEnhcVop->yChan + size;
+                video->prevEnhcVop->vChan = video->prevEnhcVop->uChan + (size >> 2);
+            }
             video->memoryUsage += (3 * size / 2);
 #endif
         }
@@ -431,10 +440,12 @@
 #else
     video->sliceNo = (uint8 *) oscl_malloc(nTotalMB);
     if (video->sliceNo == NULL) status = PV_FALSE;
+    else oscl_memset(video->sliceNo, 0, nTotalMB);
     video->memoryUsage += nTotalMB;
 
     video->acPredFlag = (uint8 *) oscl_malloc(nTotalMB * sizeof(uint8));
     if (video->acPredFlag == NULL) status = PV_FALSE;
+    else oscl_memset(video->acPredFlag, 0, nTotalMB * sizeof(uint8));
     video->memoryUsage += (nTotalMB);
 
     if ((size_t)nTotalMB > SIZE_MAX / sizeof(typeDCStore)) {
@@ -442,6 +453,7 @@
     }
     video->predDC = (typeDCStore *) oscl_malloc(nTotalMB * sizeof(typeDCStore));
     if (video->predDC == NULL) status = PV_FALSE;
+    else oscl_memset(video->predDC, 0, nTotalMB * sizeof(typeDCStore));
     video->memoryUsage += (nTotalMB * sizeof(typeDCStore));
 
     if (nMBPerRow > INT32_MAX - 1
@@ -450,6 +462,7 @@
     }
     video->predDCAC_col = (typeDCACStore *) oscl_malloc((nMBPerRow + 1) * sizeof(typeDCACStore));
     if (video->predDCAC_col == NULL) status = PV_FALSE;
+    else oscl_memset(video->predDCAC_col, 0, (nMBPerRow + 1) * sizeof(typeDCACStore));
     video->memoryUsage += ((nMBPerRow + 1) * sizeof(typeDCACStore));
 
     /* element zero will be used for storing vertical (col) AC coefficients */
@@ -459,9 +472,11 @@
     /* Allocating HeaderInfo structure & Quantizer array */
     video->headerInfo.Mode = (uint8 *) oscl_malloc(nTotalMB);
     if (video->headerInfo.Mode == NULL) status = PV_FALSE;
+    else oscl_memset(video->headerInfo.Mode, 0, nTotalMB);
     video->memoryUsage += nTotalMB;
     video->headerInfo.CBP = (uint8 *) oscl_malloc(nTotalMB);
     if (video->headerInfo.CBP == NULL) status = PV_FALSE;
+    else oscl_memset (video->headerInfo.CBP, 0, nTotalMB);
     video->memoryUsage += nTotalMB;
 
     if ((size_t)nTotalMB > SIZE_MAX / sizeof(int16)) {
@@ -469,6 +484,7 @@
     }
     video->QPMB = (int16 *) oscl_malloc(nTotalMB * sizeof(int16));
     if (video->QPMB == NULL) status = PV_FALSE;
+    else memset(video->QPMB, 0x0, nTotalMB * sizeof(int16));
     video->memoryUsage += (nTotalMB * sizeof(int));
 
     /* Allocating macroblock space */
@@ -489,8 +505,10 @@
     }
     video->motX = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);
     if (video->motX == NULL) status = PV_FALSE;
+    else memset(video->motX, 0, sizeof(MOT) * 4 * nTotalMB);
     video->motY = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);
     if (video->motY == NULL) status = PV_FALSE;
+    else memset(video->motY, 0, sizeof(MOT) * 4 * nTotalMB);
     video->memoryUsage += (sizeof(MOT) * 8 * nTotalMB);
 #endif
 
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 2364684..cd984f0 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -114,7 +114,7 @@
     mConfig->crcEnabled = false;
 
     uint32_t memRequirements = pvmp3_decoderMemRequirements();
-    mDecoderBuf = malloc(memRequirements);
+    mDecoderBuf = calloc(1, memRequirements);
 
     pvmp3_InitDecoder(mConfig, mDecoderBuf);
     mIsFirst = true;
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_framedecoder.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_framedecoder.cpp
index 26bc25c..df6cd03 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_framedecoder.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_framedecoder.cpp
@@ -299,7 +299,11 @@
         }
 
 
-        bytes_to_discard = pVars->frame_start - pVars->sideInfo.main_data_begin - main_data_end;
+        // force signed computation; buffer sizes and offsets are all going to be
+        // well within the constraints of 32-bit signed math.
+        bytes_to_discard = pVars->frame_start
+                           - ((int32)pVars->sideInfo.main_data_begin)
+                           - ((int32)main_data_end);
 
 
         if (main_data_end > BUFSIZE)   /* check overflow on the buffer */
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp
index 7eaa860..e55c2e7 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_get_side_info.cpp
@@ -154,7 +154,7 @@
                 tmp = getbits_crc(inputStream, 22, crc, info->error_protection);
 
                 si->ch[ch].gran[gr].big_values            = (tmp << 10) >> 23;   /* 9 */
-                si->ch[ch].gran[gr].global_gain           = ((tmp << 19) >> 24) - 210;   /* 8 */
+                si->ch[ch].gran[gr].global_gain        = (int32)((tmp << 19) >> 24) - 210; /* 8 */
                 si->ch[ch].gran[gr].scalefac_compress     = (tmp << 27) >> 28;   /* 4 */
                 si->ch[ch].gran[gr].window_switching_flag = tmp & 1;         /* 1 */
 
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_stereo_proc.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
index 10edfc3..4338c43 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
@@ -178,6 +178,10 @@
 ; FUNCTION CODE
 ----------------------------------------------------------------------------*/
 
+#if __has_attribute(no_sanitize)
+// deliberately playing near overflow points of int32
+__attribute__((no_sanitize("integer")))
+#endif
 void pvmp3_st_mid_side(int32 xr[SUBBANDS_NUMBER*FILTERBANK_BANDS],
                        int32 xl[SUBBANDS_NUMBER*FILTERBANK_BANDS],
                        int32 Start,
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index 813004b..942f850 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -431,7 +431,7 @@
             }
 
             if (mInputBufferCount == 0) {
-                CHECK(mHeader == NULL);
+                delete mHeader;
                 mHeader = new OpusHeader();
                 memset(mHeader, 0, sizeof(*mHeader));
                 if (!ParseOpusHeader(data, size, mHeader)) {
@@ -452,6 +452,9 @@
                 }
 
                 int status = OPUS_INVALID_STATE;
+                if (mDecoder != NULL) {
+                    opus_multistream_decoder_destroy(mDecoder);
+                }
                 mDecoder = opus_multistream_decoder_create(kRate,
                                                            mHeader->channels,
                                                            mHeader->num_streams,
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index f173e0f..06b15b3 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -689,7 +689,6 @@
                     notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
                     return;
                 }
-                mIsCodecConfigFlushRequired = true;
             }
 
             if (!mSampFreq || !mNumChannels) {
@@ -713,10 +712,14 @@
             signed int bytesConsumed = 0;
             int errorCode = 0;
             if (mIsCodecInitialized) {
+                mIsCodecConfigFlushRequired = true;
                 errorCode =
                     decodeXAACStream(inBuffer, inBufferLength, &bytesConsumed, &numOutBytes);
-            } else {
+            } else if (!mIsCodecConfigFlushRequired) {
                 ALOGW("Assumption that first frame after header initializes decoder failed!");
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, -1, NULL);
+                return;
             }
             inHeader->nFilledLen -= bytesConsumed;
             inHeader->nOffset += bytesConsumed;
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index c46a40f..862cc63 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -20,10 +20,12 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/ColorConverter.h>
 #include <media/stagefright/MediaErrors.h>
 
 #include "libyuv/convert_from.h"
+#include "libyuv/convert_argb.h"
 #include "libyuv/video_common.h"
 #include <functional>
 #include <sys/time.h>
@@ -44,10 +46,28 @@
 
 namespace android {
 
+static bool isRGB(OMX_COLOR_FORMATTYPE colorFormat) {
+    return colorFormat == OMX_COLOR_Format16bitRGB565
+            || colorFormat == OMX_COLOR_Format32BitRGBA8888
+            || colorFormat == OMX_COLOR_Format32bitBGRA8888;
+}
+
+bool ColorConverter::ColorSpace::isBt709() {
+    return (mStandard == ColorUtils::kColorStandardBT709);
+}
+
+
+bool ColorConverter::ColorSpace::isJpeg() {
+    return ((mStandard == ColorUtils::kColorStandardBT601_625)
+            || (mStandard == ColorUtils::kColorStandardBT601_525))
+            && (mRange == ColorUtils::kColorRangeFull);
+}
+
 ColorConverter::ColorConverter(
         OMX_COLOR_FORMATTYPE from, OMX_COLOR_FORMATTYPE to)
     : mSrcFormat(from),
       mDstFormat(to),
+      mSrcColorSpace({0, 0, 0}),
       mClip(NULL) {
 }
 
@@ -80,9 +100,18 @@
 }
 
 bool ColorConverter::isDstRGB() const {
-    return mDstFormat == OMX_COLOR_Format16bitRGB565
-            || mDstFormat == OMX_COLOR_Format32BitRGBA8888
-            || mDstFormat == OMX_COLOR_Format32bitBGRA8888;
+    return isRGB(mDstFormat);
+}
+
+void ColorConverter::setSrcColorSpace(
+        uint32_t standard, uint32_t range, uint32_t transfer) {
+    if (isRGB(mSrcFormat)) {
+        ALOGW("Can't set color space on RGB source");
+        return;
+    }
+    mSrcColorSpace.mStandard = standard;
+    mSrcColorSpace.mRange = range;
+    mSrcColorSpace.mTransfer = transfer;
 }
 
 /*
@@ -281,6 +310,13 @@
     return OK;
 }
 
+#define DECLARE_YUV2RGBFUNC(func, rgb) int (*func)(     \
+        const uint8*, int, const uint8*, int,           \
+        const uint8*, int, uint8*, int, int, int)       \
+        = mSrcColorSpace.isBt709() ? libyuv::H420To##rgb \
+        : mSrcColorSpace.isJpeg() ? libyuv::J420To##rgb  \
+        : libyuv::I420To##rgb
+
 status_t ColorConverter::convertYUV420PlanarUseLibYUV(
         const BitmapParams &src, const BitmapParams &dst) {
     uint8_t *dst_ptr = (uint8_t *)dst.mBits
@@ -298,19 +334,28 @@
 
     switch (mDstFormat) {
     case OMX_COLOR_Format16bitRGB565:
-        libyuv::I420ToRGB565(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
+    {
+        DECLARE_YUV2RGBFUNC(func, RGB565);
+        (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
                 (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
+    }
 
     case OMX_COLOR_Format32BitRGBA8888:
-        libyuv::ConvertFromI420(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight(), libyuv::FOURCC_ABGR);
+    {
+        DECLARE_YUV2RGBFUNC(func, ABGR);
+        (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
+                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
+    }
 
     case OMX_COLOR_Format32bitBGRA8888:
-        libyuv::ConvertFromI420(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight(), libyuv::FOURCC_ARGB);
+    {
+        DECLARE_YUV2RGBFUNC(func, ARGB);
+        (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
+                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
+    }
 
     default:
         return ERROR_UNSUPPORTED;
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 1695c75..a32cf08 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -50,6 +50,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
 const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
 const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
 const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 25be89f..b165bcb 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -52,6 +52,7 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
 extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
 extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
 extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
 
 extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
diff --git a/media/libstagefright/http/ClearMediaHTTP.cpp b/media/libstagefright/http/ClearMediaHTTP.cpp
new file mode 100644
index 0000000..bfbad1e
--- /dev/null
+++ b/media/libstagefright/http/ClearMediaHTTP.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearMediaHTTP"
+#include <utils/Log.h>
+
+#include <media/stagefright/ClearMediaHTTP.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/Utils.h>
+
+#include <media/MediaHTTPConnection.h>
+
+namespace android {
+
+ClearMediaHTTP::ClearMediaHTTP(const sp<MediaHTTPConnection> &conn)
+    : mInitCheck((conn != NULL) ? OK : NO_INIT),
+      mHTTPConnection(conn),
+      mCachedSizeValid(false),
+      mCachedSize(0ll) {
+}
+
+ClearMediaHTTP::~ClearMediaHTTP() {
+}
+
+status_t ClearMediaHTTP::connect(
+        const char *uri,
+        const KeyedVector<String8, String8> *headers,
+        off64_t /* offset */) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    KeyedVector<String8, String8> extHeaders;
+    if (headers != NULL) {
+        extHeaders = *headers;
+    }
+
+    if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
+        extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+    }
+
+    mLastURI = uri;
+    // reconnect() calls with uri == old mLastURI.c_str(), which gets zapped
+    // as part of the above assignment. Ensure no accidental later use.
+    uri = NULL;
+
+    bool success = mHTTPConnection->connect(mLastURI.c_str(), &extHeaders);
+
+    mLastHeaders = extHeaders;
+
+    mCachedSizeValid = false;
+
+    if (success) {
+        AString sanitized = uriDebugString(mLastURI);
+        mName = String8::format("ClearMediaHTTP(%s)", sanitized.c_str());
+    }
+
+    return success ? OK : UNKNOWN_ERROR;
+}
+
+void ClearMediaHTTP::disconnect() {
+    mName = String8("ClearMediaHTTP(<disconnected>)");
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    mHTTPConnection->disconnect();
+}
+
+status_t ClearMediaHTTP::initCheck() const {
+    return mInitCheck;
+}
+
+ssize_t ClearMediaHTTP::readAt(off64_t offset, void *data, size_t size) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    int64_t startTimeUs = ALooper::GetNowUs();
+
+    size_t numBytesRead = 0;
+    while (numBytesRead < size) {
+        size_t copy = size - numBytesRead;
+
+        if (copy > 64 * 1024) {
+            // limit the buffer sizes transferred across binder boundaries
+            // to avoid spurious transaction failures.
+            copy = 64 * 1024;
+        }
+
+        ssize_t n = mHTTPConnection->readAt(
+                offset + numBytesRead, (uint8_t *)data + numBytesRead, copy);
+
+        if (n < 0) {
+            return n;
+        } else if (n == 0) {
+            break;
+        }
+
+        numBytesRead += n;
+    }
+
+    int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
+
+    addBandwidthMeasurement(numBytesRead, delayUs);
+
+    return numBytesRead;
+}
+
+status_t ClearMediaHTTP::getSize(off64_t *size) {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    // Caching the returned size so that it stays valid even after a
+    // disconnect. NuCachedSource2 relies on this.
+
+    if (!mCachedSizeValid) {
+        mCachedSize = mHTTPConnection->getSize();
+        mCachedSizeValid = true;
+    }
+
+    *size = mCachedSize;
+
+    return *size < 0 ? *size : static_cast<status_t>(OK);
+}
+
+uint32_t ClearMediaHTTP::flags() {
+    return kWantsPrefetching | kIsHTTPBasedSource;
+}
+
+status_t ClearMediaHTTP::reconnectAtOffset(off64_t offset) {
+    return connect(mLastURI.c_str(), &mLastHeaders, offset);
+}
+
+
+String8 ClearMediaHTTP::getUri() {
+    if (mInitCheck != OK) {
+        return String8::empty();
+    }
+
+    String8 uri;
+    if (OK == mHTTPConnection->getUri(&uri)) {
+        return uri;
+    }
+    return String8(mLastURI.c_str());
+}
+
+String8 ClearMediaHTTP::getMIMEType() const {
+    if (mInitCheck != OK) {
+        return String8("application/octet-stream");
+    }
+
+    String8 mimeType;
+    status_t err = mHTTPConnection->getMIMEType(&mimeType);
+
+    if (err != OK) {
+        return String8("application/octet-stream");
+    }
+
+    return mimeType;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/http/HTTPHelper.cpp b/media/libstagefright/http/HTTPHelper.cpp
index 77845e2..b895f03 100644
--- a/media/libstagefright/http/HTTPHelper.cpp
+++ b/media/libstagefright/http/HTTPHelper.cpp
@@ -42,11 +42,11 @@
             env, env->FindClass("android/media/MediaHTTPService"));
     CHECK(clazz.get() != NULL);
 
-    jmethodID constructID = env->GetMethodID(clazz.get(), "<init>", "()V");
+    jmethodID constructID = env->GetMethodID(clazz.get(), "<init>", "(Ljava/util/List;)V");
     CHECK(constructID != NULL);
 
     ScopedLocalRef<jobject> httpServiceObj(
-            env, env->NewObject(clazz.get(), constructID));
+            env, env->NewObject(clazz.get(), constructID, NULL));
 
     sp<IMediaHTTPService> httpService;
     if (httpServiceObj.get() != NULL) {
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index 7c9247e..0fba3dc 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -30,10 +30,7 @@
 namespace android {
 
 MediaHTTP::MediaHTTP(const sp<MediaHTTPConnection> &conn)
-    : mInitCheck((conn != NULL) ? OK : NO_INIT),
-      mHTTPConnection(conn),
-      mCachedSizeValid(false),
-      mCachedSize(0ll),
+    : ClearMediaHTTP(conn),
       mDrmManagerClient(NULL) {
 }
 
@@ -41,117 +38,6 @@
     clearDRMState_l();
 }
 
-status_t MediaHTTP::connect(
-        const char *uri,
-        const KeyedVector<String8, String8> *headers,
-        off64_t /* offset */) {
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    KeyedVector<String8, String8> extHeaders;
-    if (headers != NULL) {
-        extHeaders = *headers;
-    }
-
-    if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
-        extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
-    }
-
-    mLastURI = uri;
-    // reconnect() calls with uri == old mLastURI.c_str(), which gets zapped
-    // as part of the above assignment. Ensure no accidental later use.
-    uri = NULL;
-
-    bool success = mHTTPConnection->connect(mLastURI.c_str(), &extHeaders);
-
-    mLastHeaders = extHeaders;
-
-    mCachedSizeValid = false;
-
-    if (success) {
-        AString sanitized = uriDebugString(mLastURI);
-        mName = String8::format("MediaHTTP(%s)", sanitized.c_str());
-    }
-
-    return success ? OK : UNKNOWN_ERROR;
-}
-
-void MediaHTTP::disconnect() {
-    mName = String8("MediaHTTP(<disconnected>)");
-    if (mInitCheck != OK) {
-        return;
-    }
-
-    mHTTPConnection->disconnect();
-}
-
-status_t MediaHTTP::initCheck() const {
-    return mInitCheck;
-}
-
-ssize_t MediaHTTP::readAt(off64_t offset, void *data, size_t size) {
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    int64_t startTimeUs = ALooper::GetNowUs();
-
-    size_t numBytesRead = 0;
-    while (numBytesRead < size) {
-        size_t copy = size - numBytesRead;
-
-        if (copy > 64 * 1024) {
-            // limit the buffer sizes transferred across binder boundaries
-            // to avoid spurious transaction failures.
-            copy = 64 * 1024;
-        }
-
-        ssize_t n = mHTTPConnection->readAt(
-                offset + numBytesRead, (uint8_t *)data + numBytesRead, copy);
-
-        if (n < 0) {
-            return n;
-        } else if (n == 0) {
-            break;
-        }
-
-        numBytesRead += n;
-    }
-
-    int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
-
-    addBandwidthMeasurement(numBytesRead, delayUs);
-
-    return numBytesRead;
-}
-
-status_t MediaHTTP::getSize(off64_t *size) {
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    // Caching the returned size so that it stays valid even after a
-    // disconnect. NuCachedSource2 relies on this.
-
-    if (!mCachedSizeValid) {
-        mCachedSize = mHTTPConnection->getSize();
-        mCachedSizeValid = true;
-    }
-
-    *size = mCachedSize;
-
-    return *size < 0 ? *size : static_cast<status_t>(OK);
-}
-
-uint32_t MediaHTTP::flags() {
-    return kWantsPrefetching | kIsHTTPBasedSource;
-}
-
-status_t MediaHTTP::reconnectAtOffset(off64_t offset) {
-    return connect(mLastURI.c_str(), &mLastHeaders, offset);
-}
-
 // DRM...
 
 sp<DecryptHandle> MediaHTTP::DrmInitialization(const char* mime) {
@@ -176,33 +62,6 @@
     return mDecryptHandle;
 }
 
-String8 MediaHTTP::getUri() {
-    if (mInitCheck != OK) {
-        return String8::empty();
-    }
-
-    String8 uri;
-    if (OK == mHTTPConnection->getUri(&uri)) {
-        return uri;
-    }
-    return String8(mLastURI.c_str());
-}
-
-String8 MediaHTTP::getMIMEType() const {
-    if (mInitCheck != OK) {
-        return String8("application/octet-stream");
-    }
-
-    String8 mimeType;
-    status_t err = mHTTPConnection->getMIMEType(&mimeType);
-
-    if (err != OK) {
-        return String8("application/octet-stream");
-    }
-
-    return mimeType;
-}
-
 void MediaHTTP::clearDRMState_l() {
     if (mDecryptHandle != NULL) {
         // To release mDecryptHandle
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
index 72604e3..59265fe 100644
--- a/media/libstagefright/httplive/HTTPDownloader.cpp
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -26,8 +26,8 @@
 #include <media/MediaHTTPService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaHTTP.h>
-#include <media/stagefright/FileSource.h>
+#include <media/stagefright/ClearMediaHTTP.h>
+#include <media/stagefright/ClearFileSource.h>
 #include <openssl/aes.h>
 #include <openssl/md5.h>
 #include <utils/Mutex.h>
@@ -38,7 +38,7 @@
 HTTPDownloader::HTTPDownloader(
         const sp<MediaHTTPService> &httpService,
         const KeyedVector<String8, String8> &headers) :
-    mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())),
+    mHTTPDataSource(new ClearMediaHTTP(httpService->makeHTTPConnection())),
     mExtraHeaders(headers),
     mDisconnecting(false) {
 }
@@ -91,7 +91,7 @@
 
     if (reconnect) {
         if (!strncasecmp(url, "file://", 7)) {
-            mDataSource = new FileSource(url + 7);
+            mDataSource = new ClearFileSource(url + 7);
         } else if (strncasecmp(url, "http://", 7)
                 && strncasecmp(url, "https://", 8)) {
             return ERROR_UNSUPPORTED;
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 52791b9..8ab33f7 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -234,7 +234,11 @@
         if (mSelectedIndex >= 0 && i == (size_t)mSelectedIndex) {
             const Media &item = mMediaItems.itemAt(i);
 
-            *uri = item.makeURL(baseURL);
+            if (item.mURI.empty()) {
+                *uri = "";
+            } else {
+                *uri = item.makeURL(baseURL);
+            }
             return true;
         }
     }
@@ -465,7 +469,7 @@
         }
 
         if ((*uri).empty()) {
-            *uri = mItems.itemAt(index).mURI;
+            *uri = mItems.itemAt(index).makeURL(mBaseURI.c_str());
         }
     }
 
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index a0a62f4..b489183 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -21,6 +21,8 @@
 #include "../include/ID3.h"
 
 #include <media/DataSource.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ByteUtils.h>
 #include <utils/String8.h>
@@ -56,6 +58,55 @@
     DISALLOW_EVIL_CONSTRUCTORS(MemorySource);
 };
 
+class DataSourceUnwrapper : public DataSourceBase {
+
+public:
+    explicit DataSourceUnwrapper(DataSourceHelper *sourcehelper) {
+        mSource = sourcehelper;
+    }
+    virtual status_t initCheck() const { return OK; }
+
+    // Returns the number of bytes read, or -1 on failure. It's not an error if
+    // this returns zero; it just means the given offset is equal to, or
+    // beyond, the end of the source.
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
+        return mSource->readAt(offset, data, size);
+    }
+
+    // May return ERROR_UNSUPPORTED.
+    virtual status_t getSize(off64_t *size) {
+        return mSource->getSize(size);
+    }
+
+    virtual bool getUri(char * /*uriString*/, size_t /*bufferSize*/) {
+        return false;
+    }
+
+    virtual uint32_t flags() {
+        return 0;
+    }
+
+    virtual void close() {};
+private:
+    DataSourceHelper *mSource;
+};
+
+
+ID3::ID3(DataSourceHelper *sourcehelper, bool ignoreV1, off64_t offset)
+    : mIsValid(false),
+      mData(NULL),
+      mSize(0),
+      mFirstFrameOffset(0),
+      mVersion(ID3_UNKNOWN),
+      mRawSize(0) {
+    DataSourceUnwrapper source(sourcehelper);
+    mIsValid = parseV2(&source, offset);
+
+    if (!mIsValid && !ignoreV1) {
+        mIsValid = parseV1(&source);
+    }
+}
+
 ID3::ID3(DataSourceBase *source, bool ignoreV1, off64_t offset)
     : mIsValid(false),
       mData(NULL),
diff --git a/media/libstagefright/include/ID3.h b/media/libstagefright/include/ID3.h
index 7c2391e..5e433ea 100644
--- a/media/libstagefright/include/ID3.h
+++ b/media/libstagefright/include/ID3.h
@@ -24,6 +24,7 @@
 
 class DataSourceBase;
 class String8;
+class DataSourceHelper;
 
 struct ID3 {
     enum Version {
@@ -35,6 +36,7 @@
         ID3_V2_4,
     };
 
+    explicit ID3(DataSourceHelper *source, bool ignoreV1 = false, off64_t offset = 0);
     explicit ID3(DataSourceBase *source, bool ignoreV1 = false, off64_t offset = 0);
     ID3(const uint8_t *data, size_t size, bool ignoreV1 = false);
     ~ID3();
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index f78e125..a7090ad 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -26,7 +26,6 @@
 namespace android {
 
 class DataSource;
-class MediaExtractor;
 struct ImageDecoder;
 struct FrameRect;
 
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 97d15a7..1137cf1 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -137,6 +137,7 @@
         kWhatOMXDied                 = 'OMXd',
         kWhatReleaseCodecInstance    = 'relC',
         kWhatForceStateTransition    = 'fstt',
+        kWhatCheckIfStuck            = 'Cstk',
     };
 
     enum {
diff --git a/media/libstagefright/include/media/stagefright/CameraSource.h b/media/libstagefright/include/media/stagefright/CameraSource.h
index 475976b..3037b72 100644
--- a/media/libstagefright/include/media/stagefright/CameraSource.h
+++ b/media/libstagefright/include/media/stagefright/CameraSource.h
@@ -204,6 +204,7 @@
     int32_t mNumFramesReceived;
     int64_t mLastFrameTimestampUs;
     bool mStarted;
+    bool mEos;
     int32_t mNumFramesEncoded;
 
     // Time between capture of two frames.
diff --git a/media/libstagefright/include/media/stagefright/ClearDataSourceFactory.h b/media/libstagefright/include/media/stagefright/ClearDataSourceFactory.h
new file mode 100644
index 0000000..12bcdd3
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/ClearDataSourceFactory.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_SOURCE_FACTORY2_H_
+
+#define DATA_SOURCE_FACTORY2_H_
+
+#include <sys/types.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct MediaHTTPService;
+class String8;
+struct HTTPBase;
+
+class ClearDataSourceFactory {
+public:
+    static sp<DataSource> CreateFromURI(
+            const sp<MediaHTTPService> &httpService,
+            const char *uri,
+            const KeyedVector<String8, String8> *headers = NULL,
+            String8 *contentType = NULL,
+            HTTPBase *httpSource = NULL);
+
+    static sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
+    static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
+};
+
+}  // namespace android
+
+#endif  // DATA_SOURCE_FACTORY2_H_
diff --git a/media/libstagefright/include/media/stagefright/ClearFileSource.h b/media/libstagefright/include/media/stagefright/ClearFileSource.h
new file mode 100644
index 0000000..be83748
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/ClearFileSource.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_FILE_SOURCE_H_
+
+#define CLEAR_FILE_SOURCE_H_
+
+#include <stdio.h>
+
+#include <media/DataSource.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class ClearFileSource : public DataSource {
+public:
+    ClearFileSource(const char *filename);
+    // ClearFileSource takes ownership and will close the fd
+    ClearFileSource(int fd, int64_t offset, int64_t length);
+
+    virtual status_t initCheck() const;
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+
+    virtual status_t getSize(off64_t *size);
+
+    virtual uint32_t flags() {
+        return kIsLocalFileSource;
+    }
+
+    virtual String8 toString() {
+        return mName;
+    }
+
+protected:
+    virtual ~ClearFileSource();
+    virtual ssize_t readAt_l(off64_t offset, void *data, size_t size);
+
+    int mFd;
+    int64_t mOffset;
+    int64_t mLength;
+    Mutex mLock;
+
+private:
+    String8 mName;
+
+    ClearFileSource(const ClearFileSource &);
+    ClearFileSource &operator=(const ClearFileSource &);
+};
+
+}  // namespace android
+
+#endif  // CLEAR_FILE_SOURCE_H_
+
diff --git a/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h b/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h
new file mode 100644
index 0000000..7fe9c74
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_MEDIA_HTTP_H_
+
+#define CLEAR_MEDIA_HTTP_H_
+
+#include <media/stagefright/foundation/AString.h>
+
+#include "include/HTTPBase.h"
+
+namespace android {
+
+struct MediaHTTPConnection;
+
+struct ClearMediaHTTP : public HTTPBase {
+    ClearMediaHTTP(const sp<MediaHTTPConnection> &conn);
+
+    virtual status_t connect(
+            const char *uri,
+            const KeyedVector<String8, String8> *headers,
+            off64_t offset);
+
+    virtual void disconnect();
+
+    virtual status_t initCheck() const;
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+
+    virtual status_t getSize(off64_t *size);
+
+    virtual uint32_t flags();
+
+    virtual status_t reconnectAtOffset(off64_t offset);
+
+protected:
+    virtual ~ClearMediaHTTP();
+
+    virtual String8 getUri();
+    virtual String8 getMIMEType() const;
+
+    AString mLastURI;
+
+private:
+    status_t mInitCheck;
+    sp<MediaHTTPConnection> mHTTPConnection;
+
+    KeyedVector<String8, String8> mLastHeaders;
+
+    bool mCachedSizeValid;
+    off64_t mCachedSize;
+
+    DISALLOW_EVIL_CONSTRUCTORS(ClearMediaHTTP);
+};
+
+}  // namespace android
+
+#endif  // CLEAR_MEDIA_HTTP_H_
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 2b8c7c8..6d4c1bf 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -35,6 +35,8 @@
 
     bool isDstRGB() const;
 
+    void setSrcColorSpace(uint32_t standard, uint32_t range, uint32_t transfer);
+
     status_t convert(
             const void *srcBits,
             size_t srcWidth, size_t srcHeight, size_t srcStride,
@@ -46,6 +48,15 @@
             size_t dstCropRight, size_t dstCropBottom);
 
 private:
+    struct ColorSpace {
+        uint32_t mStandard;
+        uint32_t mRange;
+        uint32_t mTransfer;
+
+        bool isBt709();
+        bool isJpeg();
+    };
+
     struct BitmapParams {
         BitmapParams(
                 void *bits,
@@ -65,6 +76,7 @@
     };
 
     OMX_COLOR_FORMATTYPE mSrcFormat, mDstFormat;
+    ColorSpace mSrcColorSpace;
     uint8_t *mClip;
 
     uint8_t *initClip();
diff --git a/media/libstagefright/include/media/stagefright/FileSource.h b/media/libstagefright/include/media/stagefright/FileSource.h
index 8604890..b610eef 100644
--- a/media/libstagefright/include/media/stagefright/FileSource.h
+++ b/media/libstagefright/include/media/stagefright/FileSource.h
@@ -20,47 +20,29 @@
 
 #include <stdio.h>
 
-#include <media/DataSource.h>
+#include <media/stagefright/ClearFileSource.h>
 #include <media/stagefright/MediaErrors.h>
 #include <utils/threads.h>
 #include <drm/DrmManagerClient.h>
 
 namespace android {
 
-class FileSource : public DataSource {
+class FileSource : public ClearFileSource {
 public:
     FileSource(const char *filename);
     // FileSource takes ownership and will close the fd
     FileSource(int fd, int64_t offset, int64_t length);
 
-    virtual status_t initCheck() const;
-
     virtual ssize_t readAt(off64_t offset, void *data, size_t size);
 
-    virtual status_t getSize(off64_t *size);
-
-    virtual uint32_t flags() {
-        return kIsLocalFileSource;
-    }
-
     virtual sp<DecryptHandle> DrmInitialization(const char *mime);
 
-    virtual String8 toString() {
-        return mName;
-    }
-
     static bool requiresDrm(int fd, int64_t offset, int64_t length, const char *mime);
 
 protected:
     virtual ~FileSource();
 
 private:
-    int mFd;
-    int64_t mOffset;
-    int64_t mLength;
-    Mutex mLock;
-    String8 mName;
-
     /*for DRM*/
     sp<DecryptHandle> mDecryptHandle;
     DrmManagerClient *mDrmManagerClient;
@@ -68,7 +50,7 @@
     ssize_t mDrmBufSize;
     unsigned char *mDrmBuf;
 
-    ssize_t readAtDRM(off64_t offset, void *data, size_t size);
+    ssize_t readAtDRM_l(off64_t offset, void *data, size_t size);
 
     FileSource(const FileSource &);
     FileSource &operator=(const FileSource &);
diff --git a/media/libstagefright/include/media/stagefright/InterfaceUtils.h b/media/libstagefright/include/media/stagefright/InterfaceUtils.h
index f0ebd48..b83a958 100644
--- a/media/libstagefright/include/media/stagefright/InterfaceUtils.h
+++ b/media/libstagefright/include/media/stagefright/InterfaceUtils.h
@@ -18,7 +18,6 @@
 #define INTERFACE_UTILS_H_
 
 #include <utils/RefBase.h>
-#include <media/MediaExtractor.h>
 #include <media/stagefright/RemoteMediaExtractor.h>
 #include <media/MediaSource.h>
 #include <media/IMediaExtractor.h>
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index ad02004..7f6aae6 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -377,7 +377,7 @@
 
     MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid);
 
-    static sp<CodecBase> GetCodecBase(const AString &name);
+    static sp<CodecBase> GetCodecBase(const AString &name, const char *owner = nullptr);
 
     static status_t PostAndAwaitResponse(
             const sp<AMessage> &msg, sp<AMessage> *response);
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractor.h b/media/libstagefright/include/media/stagefright/MediaExtractor.h
new file mode 100644
index 0000000..d9456ab
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaExtractor.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_EXTRACTOR_H_
+
+#define MEDIA_EXTRACTOR_H_
+
+#include <stdio.h>
+#include <vector>
+
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <media/MediaExtractorPluginApi.h>
+#include <media/MediaExtractorPluginHelper.h>
+
+namespace android {
+
+class DataSourceBase;
+class MetaDataBase;
+struct MediaTrack;
+
+
+class ExtractorAllocTracker {
+public:
+    ExtractorAllocTracker() {
+        ALOGD("extractor allocated: %p", this);
+    }
+    virtual ~ExtractorAllocTracker() {
+        ALOGD("extractor freed: %p", this);
+    }
+};
+
+class MediaExtractor
+// : public ExtractorAllocTracker
+{
+public:
+    virtual ~MediaExtractor();
+    virtual size_t countTracks() = 0;
+    virtual MediaTrack *getTrack(size_t index) = 0;
+
+    enum GetTrackMetaDataFlags {
+        kIncludeExtensiveMetaData = 1
+    };
+    virtual status_t getTrackMetaData(
+            MetaDataBase& meta,
+            size_t index, uint32_t flags = 0) = 0;
+
+    // Return container specific meta-data. The default implementation
+    // returns an empty metadata object.
+    virtual status_t getMetaData(MetaDataBase& meta) = 0;
+
+    enum Flags {
+        CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
+        CAN_SEEK_FORWARD   = 2,  // the "seek 10secs forward button"
+        CAN_PAUSE          = 4,
+        CAN_SEEK           = 8,  // the "seek bar"
+    };
+
+    // If subclasses do _not_ override this, the default is
+    // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
+    virtual uint32_t flags() const;
+
+    virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
+        return INVALID_OPERATION;
+    }
+
+    virtual const char * name() { return "<unspecified>"; }
+
+protected:
+    MediaExtractor();
+
+private:
+    MediaExtractor(const MediaExtractor &);
+    MediaExtractor &operator=(const MediaExtractor &);
+};
+
+class MediaExtractorCUnwrapper : public MediaExtractor {
+public:
+    explicit MediaExtractorCUnwrapper(CMediaExtractor *wrapper);
+    virtual size_t countTracks();
+    virtual MediaTrack *getTrack(size_t index);
+    virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
+    virtual status_t getMetaData(MetaDataBase& meta);
+    virtual const char * name();
+    virtual uint32_t flags() const;
+    virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
+protected:
+    virtual ~MediaExtractorCUnwrapper();
+private:
+    CMediaExtractor *wrapper;
+};
+
+
+}  // namespace android
+
+#endif  // MEDIA_EXTRACTOR_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index d5f4b35..e603176 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -21,7 +21,6 @@
 #include <stdio.h>
 
 #include <media/IMediaExtractor.h>
-#include <media/MediaExtractor.h>
 
 namespace android {
 
@@ -41,6 +40,7 @@
     static Mutex gPluginMutex;
     static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
     static bool gPluginsRegistered;
+    static bool gIgnoreVersion;
 
     static void RegisterExtractorsInApk(
             const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
@@ -49,8 +49,8 @@
     static void RegisterExtractor(
             const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
 
-    static MediaExtractor::CreatorFunc sniff(DataSourceBase *source,
-            float *confidence, void **meta, MediaExtractor::FreeMetaFunc *freeMeta,
+    static CreatorFunc sniff(const sp<DataSource> &source,
+            float *confidence, void **meta, FreeMetaFunc *freeMeta,
             sp<ExtractorPlugin> &plugin);
 
     static void UpdateExtractors(const char *newUpdateApkPath);
diff --git a/media/libstagefright/include/media/stagefright/MediaHTTP.h b/media/libstagefright/include/media/stagefright/MediaHTTP.h
index fe0e613..acaa6c4 100644
--- a/media/libstagefright/include/media/stagefright/MediaHTTP.h
+++ b/media/libstagefright/include/media/stagefright/MediaHTTP.h
@@ -19,50 +19,21 @@
 #define MEDIA_HTTP_H_
 
 #include <media/stagefright/foundation/AString.h>
-
-#include "include/HTTPBase.h"
+#include <media/stagefright/ClearMediaHTTP.h>
 
 namespace android {
 
 struct MediaHTTPConnection;
 
-struct MediaHTTP : public HTTPBase {
+struct MediaHTTP : public ClearMediaHTTP {
     MediaHTTP(const sp<MediaHTTPConnection> &conn);
 
-    virtual status_t connect(
-            const char *uri,
-            const KeyedVector<String8, String8> *headers,
-            off64_t offset);
-
-    virtual void disconnect();
-
-    virtual status_t initCheck() const;
-
-    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
-
-    virtual status_t getSize(off64_t *size);
-
-    virtual uint32_t flags();
-
-    virtual status_t reconnectAtOffset(off64_t offset);
-
 protected:
     virtual ~MediaHTTP();
 
     virtual sp<DecryptHandle> DrmInitialization(const char* mime);
-    virtual String8 getUri();
-    virtual String8 getMIMEType() const;
 
 private:
-    status_t mInitCheck;
-    sp<MediaHTTPConnection> mHTTPConnection;
-
-    KeyedVector<String8, String8> mLastHeaders;
-    AString mLastURI;
-
-    bool mCachedSizeValid;
-    off64_t mCachedSize;
-
     sp<DecryptHandle> mDecryptHandle;
     DrmManagerClient *mDrmManagerClient;
 
diff --git a/media/libstagefright/include/media/stagefright/MetaDataUtils.h b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
index d5a8080..4a7107d 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataUtils.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
@@ -24,6 +24,7 @@
 
 struct ABuffer;
 bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size);
+bool MakeAACCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size);
 bool MakeAACCodecSpecificData(MetaDataBase &meta, unsigned profile, unsigned sampling_freq_index,
         unsigned channel_configuration);
 
diff --git a/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
index 509e669..9925114 100644
--- a/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
@@ -18,7 +18,7 @@
 #define REMOTE_MEDIA_EXTRACTOR_H_
 
 #include <media/IMediaExtractor.h>
-#include <media/MediaExtractor.h>
+#include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/foundation/ABase.h>
 
 namespace android {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5cc5093..fb498d4 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -119,6 +119,7 @@
 private:
     struct StreamInfo {
         unsigned mType;
+        unsigned mTypeExt;
         unsigned mPID;
         int32_t mCASystemId;
     };
@@ -145,10 +146,12 @@
     Stream(Program *program,
            unsigned elementaryPID,
            unsigned streamType,
+           unsigned streamTypeExt,
            unsigned PCR_PID,
            int32_t CA_system_ID);
 
     unsigned type() const { return mStreamType; }
+    unsigned typeExt() const { return mStreamTypeExt; }
     unsigned pid() const { return mElementaryPID; }
     void setPID(unsigned pid) { mElementaryPID = pid; }
 
@@ -194,6 +197,7 @@
     Program *mProgram;
     unsigned mElementaryPID;
     unsigned mStreamType;
+    unsigned mStreamTypeExt;
     unsigned mPCR_PID;
     int32_t mExpectedContinuityCounter;
 
@@ -447,7 +451,7 @@
         if (descriptor_length > infoLength) {
             break;
         }
-        if (descriptor_tag == 9 && descriptor_length >= 4) {
+        if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
             found = true;
             caDescriptor->mSystemID = br->getBits(16);
             caDescriptor->mPID = br->getBits(16) & 0x1fff;
@@ -513,37 +517,65 @@
     // infoBytesRemaining is the number of bytes that make up the
     // variable length section of ES_infos. It does not include the
     // final CRC.
-    size_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
+    int32_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
 
     while (infoBytesRemaining >= 5) {
-
-        unsigned streamType = br->getBits(8);
-        ALOGV("    stream_type = 0x%02x", streamType);
-
+        StreamInfo info;
+        info.mType = br->getBits(8);
+        ALOGV("    stream_type = 0x%02x", info.mType);
         MY_LOGV("    reserved = %u", br->getBits(3));
 
-        unsigned elementaryPID = br->getBits(13);
-        ALOGV("    elementary_PID = 0x%04x", elementaryPID);
+        info.mPID = br->getBits(13);
+        ALOGV("    elementary_PID = 0x%04x", info.mPID);
 
         MY_LOGV("    reserved = %u", br->getBits(4));
 
         unsigned ES_info_length = br->getBits(12);
         ALOGV("    ES_info_length = %u", ES_info_length);
+        infoBytesRemaining -= 5 + ES_info_length;
 
         CADescriptor streamCA;
-        bool hasStreamCA = findCADescriptor(br, ES_info_length, &streamCA);
+        info.mTypeExt = EXT_DESCRIPTOR_DVB_RESERVED_MAX;
+        bool hasStreamCA = false;
+        while (ES_info_length > 2 && infoBytesRemaining >= 0) {
+            unsigned descriptor_tag = br->getBits(8);
+            ALOGV("      tag = 0x%02x", descriptor_tag);
+
+            unsigned descriptor_length = br->getBits(8);
+            ALOGV("      len = %u", descriptor_length);
+
+            ES_info_length -= 2;
+            if (descriptor_length > ES_info_length) {
+                return ERROR_MALFORMED;
+            }
+            if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
+                hasStreamCA = true;
+                streamCA.mSystemID = br->getBits(16);
+                streamCA.mPID = br->getBits(16) & 0x1fff;
+                ES_info_length -= 4;
+                streamCA.mPrivateData.assign(br->data(), br->data() + descriptor_length - 4);
+            } else if (info.mType == STREAMTYPE_PES_PRIVATE_DATA &&
+                       descriptor_tag == DESCRIPTOR_DVB_EXTENSION && descriptor_length >= 1) {
+                unsigned descTagExt = br->getBits(8);
+                ALOGV("      tag_ext = 0x%02x", descTagExt);
+                if (descTagExt == EXT_DESCRIPTOR_DVB_AC4) {
+                    info.mTypeExt = EXT_DESCRIPTOR_DVB_AC4;
+                }
+                ES_info_length -= descriptor_length;
+                descriptor_length--;
+                br->skipBits(descriptor_length * 8);
+            } else {
+                ES_info_length -= descriptor_length;
+                br->skipBits(descriptor_length * 8);
+            }
+        }
         if (hasStreamCA && !mParser->mCasManager->addStream(
-                mProgramNumber, elementaryPID, streamCA)) {
+                mProgramNumber, info.mPID, streamCA)) {
             return ERROR_MALFORMED;
         }
-        StreamInfo info;
-        info.mType = streamType;
-        info.mPID = elementaryPID;
         info.mCASystemId = hasProgramCA ? programCA.mSystemID :
                            hasStreamCA ? streamCA.mSystemID  : -1;
         infos.push(info);
-
-        infoBytesRemaining -= 5 + ES_info_length;
     }
 
     if (infoBytesRemaining != 0) {
@@ -602,7 +634,7 @@
 
         if (index < 0) {
             sp<Stream> stream = new Stream(
-                    this, info.mPID, info.mType, PCR_PID, info.mCASystemId);
+                    this, info.mPID, info.mType, info.mTypeExt, PCR_PID, info.mCASystemId);
 
             if (mSampleAesKeyItem != NULL) {
                 stream->signalNewSampleAesKey(mSampleAesKeyItem);
@@ -720,11 +752,13 @@
         Program *program,
         unsigned elementaryPID,
         unsigned streamType,
+        unsigned streamTypeExt,
         unsigned PCR_PID,
         int32_t CA_system_ID)
     : mProgram(program),
       mElementaryPID(elementaryPID),
       mStreamType(streamType),
+      mStreamTypeExt(streamTypeExt),
       mPCR_PID(PCR_PID),
       mExpectedContinuityCounter(-1),
       mPayloadStarted(false),
@@ -741,10 +775,12 @@
     ALOGV("new stream PID 0x%02x, type 0x%02x, scrambled %d, SampleEncrypted: %d",
             elementaryPID, streamType, mScrambled, mSampleEncrypted);
 
-    uint32_t flags =
-            (isVideo() && mScrambled) ? ElementaryStreamQueue::kFlag_ScrambledData :
-            (mSampleEncrypted) ? ElementaryStreamQueue::kFlag_SampleEncryptedData :
-            0;
+    uint32_t flags = 0;
+    if (((isVideo() || isAudio()) && mScrambled)) {
+        flags = ElementaryStreamQueue::kFlag_ScrambledData;
+    } else if (mSampleEncrypted) {
+        flags = ElementaryStreamQueue::kFlag_SampleEncryptedData;
+    }
 
     ElementaryStreamQueue::Mode mode = ElementaryStreamQueue::INVALID;
 
@@ -781,6 +817,16 @@
             mode = ElementaryStreamQueue::AC3;
             break;
 
+        case STREAMTYPE_EAC3:
+            mode = ElementaryStreamQueue::EAC3;
+            break;
+
+        case STREAMTYPE_PES_PRIVATE_DATA:
+            if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4) {
+                mode = ElementaryStreamQueue::AC4;
+            }
+            break;
+
         case STREAMTYPE_METADATA:
             mode = ElementaryStreamQueue::METADATA;
             break;
@@ -986,9 +1032,12 @@
         case STREAMTYPE_MPEG2_AUDIO_ADTS:
         case STREAMTYPE_LPCM_AC3:
         case STREAMTYPE_AC3:
+        case STREAMTYPE_EAC3:
         case STREAMTYPE_AAC_ENCRYPTED:
         case STREAMTYPE_AC3_ENCRYPTED:
             return true;
+        case STREAMTYPE_PES_PRIVATE_DATA:
+            return mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4;
 
         default:
             return false;
@@ -1395,7 +1444,7 @@
     // Perform the 1st pass descrambling if needed
     if (descrambleBytes > 0) {
         memcpy(mDescrambledBuffer->data(), mBuffer->data(), descrambleBytes);
-        mDescrambledBuffer->setRange(0, descrambleBytes);
+        mDescrambledBuffer->setRange(0, mBuffer->size());
 
         hidl_vec<SubSample> subSamples;
         subSamples.resize(descrambleSubSamples);
@@ -1412,10 +1461,9 @@
             }
         }
 
-        uint64_t srcOffset = 0, dstOffset = 0;
-        // If scrambled at PES-level, PES header should be skipped
+        // If scrambled at PES-level, PES header is in the clear
         if (pesScramblingControl != 0) {
-            srcOffset = dstOffset = pesOffset;
+            subSamples[0].numBytesOfClearData = pesOffset;
             subSamples[0].numBytesOfEncryptedData -= pesOffset;
         }
 
@@ -1431,9 +1479,9 @@
                 (ScramblingControl) sctrl,
                 subSamples,
                 mDescramblerSrcBuffer,
-                srcOffset,
+                0 /*srcOffset*/,
                 dstBuffer,
-                dstOffset,
+                0 /*dstOffset*/,
                 [&status, &bytesWritten, &detailedError] (
                         Status _status, uint32_t _bytesWritten,
                         const hidl_string& _detailedError) {
@@ -1450,9 +1498,21 @@
 
         ALOGV("[stream %d] descramble succeeded, %d bytes",
                 mElementaryPID, bytesWritten);
-        memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
+
+        // Set descrambleBytes to the returned result.
+        // Note that this might be smaller than the total length of input data.
+        // (eg. when we're descrambling the PES header portion of a secure stream,
+        // the plugin might cut it off right after the PES header.)
+        descrambleBytes = bytesWritten;
     }
 
+    // |buffer| points to the buffer from which we'd parse the PES header.
+    // When the output stream is scrambled, it points to mDescrambledBuffer
+    // (unless all packets in this PES are actually clear, in which case,
+    // it points to mBuffer since we never copied into mDescrambledBuffer).
+    // When the output stream is clear, it points to mBuffer, and we'll
+    // copy all descrambled data back to mBuffer.
+    sp<ABuffer> buffer = mBuffer;
     if (mQueue->isScrambled()) {
         // Queue subSample info for scrambled queue
         sp<ABuffer> clearSizesBuffer = new ABuffer(mSubSamples.size() * 4);
@@ -1464,8 +1524,7 @@
         for (auto it = mSubSamples.begin();
                 it != mSubSamples.end(); it++, i++) {
             if ((it->transport_scrambling_mode == 0
-                    && pesScramblingControl == 0)
-                    || i < descrambleSubSamples) {
+                    && pesScramblingControl == 0)) {
                 clearSizePtr[i] = it->subSampleSize;
                 encSizePtr[i] = 0;
             } else {
@@ -1474,14 +1533,29 @@
             }
             isSync |= it->random_access_indicator;
         }
+
+        // If scrambled at PES-level, PES header is in the clear
+        if (pesScramblingControl != 0) {
+            clearSizePtr[0] = pesOffset;
+            encSizePtr[0] -= pesOffset;
+        }
         // Pass the original TS subsample size now. The PES header adjust
         // will be applied when the scrambled AU is dequeued.
+        // Note that if descrambleBytes is 0, it means this PES contains only
+        // all ts packets, leadingClearBytes is entire buffer size.
         mQueue->appendScrambledData(
-                mBuffer->data(), mBuffer->size(), sctrl,
-                isSync, clearSizesBuffer, encSizesBuffer);
+                mBuffer->data(), mBuffer->size(),
+                (descrambleBytes > 0) ? descrambleBytes : mBuffer->size(),
+                sctrl, isSync, clearSizesBuffer, encSizesBuffer);
+
+        if (descrambleBytes > 0) {
+            buffer = mDescrambledBuffer;
+        }
+    } else {
+        memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
     }
 
-    ABitReader br(mBuffer->data(), mBuffer->size());
+    ABitReader br(buffer->data(), buffer->size());
     status_t err = parsePES(&br, event);
 
     if (err != OK) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 45ca06b..a31dc46 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -142,6 +142,7 @@
         STREAMTYPE_MPEG2_VIDEO          = 0x02,
         STREAMTYPE_MPEG1_AUDIO          = 0x03,
         STREAMTYPE_MPEG2_AUDIO          = 0x04,
+        STREAMTYPE_PES_PRIVATE_DATA     = 0x06,
         STREAMTYPE_MPEG2_AUDIO_ADTS     = 0x0f,
         STREAMTYPE_MPEG4_VIDEO          = 0x10,
         STREAMTYPE_METADATA             = 0x15,
@@ -153,6 +154,7 @@
         // Stream type 0x83 is non-standard,
         // it could be LPCM or TrueHD AC3
         STREAMTYPE_LPCM_AC3             = 0x83,
+        STREAMTYPE_EAC3                 = 0x87,
 
         //Sample Encrypted types
         STREAMTYPE_H264_ENCRYPTED       = 0xDB,
@@ -160,6 +162,20 @@
         STREAMTYPE_AC3_ENCRYPTED        = 0xC1,
     };
 
+    enum {
+        // From ISO/IEC 13818-1: 2007 (E), Table 2-29
+        DESCRIPTOR_CA                   = 0x09,
+
+        // DVB BlueBook A038 Table 12
+        DESCRIPTOR_DVB_EXTENSION        = 0x7F,
+    };
+
+    // DVB BlueBook A038 Table 109
+    enum {
+        EXT_DESCRIPTOR_DVB_AC4              = 0x15,
+        EXT_DESCRIPTOR_DVB_RESERVED_MAX     = 0x7F,
+    };
+
 protected:
     virtual ~ATSParser();
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index ece0692..9e154a3 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -226,6 +226,7 @@
         int32_t cryptoMode;
         if (buffer->meta()->findInt32("cryptoMode", &cryptoMode)) {
             int32_t cryptoKey;
+            int32_t pesOffset;
             sp<ABuffer> clearBytesBuffer, encBytesBuffer;
 
             CHECK(buffer->meta()->findInt32("cryptoKey", &cryptoKey));
@@ -233,6 +234,8 @@
                     && clearBytesBuffer != NULL);
             CHECK(buffer->meta()->findBuffer("encBytes", &encBytesBuffer)
                     && encBytesBuffer != NULL);
+            CHECK(buffer->meta()->findInt32("pesOffset", &pesOffset)
+                    && (pesOffset >= 0) && (pesOffset < 65536));
 
             bufmeta.setInt32(kKeyCryptoMode, cryptoMode);
 
@@ -240,6 +243,11 @@
             bufmeta.setData(kKeyCryptoIV, 0, array, 16);
 
             array[0] = (uint8_t) (cryptoKey & 0xff);
+            // array[1] contains PES header flag, which we don't use.
+            // array[2~3] contain the PES offset.
+            array[2] = (uint8_t) (pesOffset & 0xff);
+            array[3] = (uint8_t) ((pesOffset >> 8) & 0xff);
+
             bufmeta.setData(kKeyCryptoKey, 0, array, 16);
 
             bufmeta.setData(kKeyPlainSizes, 0,
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 0fa9fcb..fb8b9fd 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -86,6 +86,21 @@
     mCasSessionId = sessionId;
 }
 
+static int32_t readVariableBits(ABitReader &bits, int32_t nbits) {
+    int32_t value = 0;
+    int32_t more_bits = 1;
+
+    while (more_bits) {
+        value += bits.getBits(nbits);
+        more_bits = bits.getBits(1);
+        if (!more_bits)
+            break;
+        value++;
+        value <<= nbits;
+    }
+    return value;
+}
+
 // Parse AC3 header assuming the current ptr is start position of syncframe,
 // update metadata only applicable, and return the payload size
 static unsigned parseAC3SyncFrame(
@@ -195,8 +210,153 @@
     return payloadSize;
 }
 
-static bool IsSeeminglyValidAC3Header(const uint8_t *ptr, size_t size) {
-    return parseAC3SyncFrame(ptr, size, NULL) > 0;
+// Parse EAC3 header assuming the current ptr is start position of syncframe,
+// update metadata only applicable, and return the payload size
+// ATSC A/52:2012 E2.3.1
+static unsigned parseEAC3SyncFrame(
+    const uint8_t *ptr, size_t size, sp<MetaData> *metaData) {
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+    static const unsigned samplingRateTable[] = {48000, 44100, 32000};
+    static const unsigned samplingRateTable2[] = {24000, 22050, 16000};
+
+    ABitReader bits(ptr, size);
+    if (bits.numBitsLeft() < 16) {
+        ALOGE("Not enough bits left for further parsing");
+        return 0;
+    }
+    if (bits.getBits(16) != 0x0B77) {
+        ALOGE("No valid sync word in EAC3 header");
+        return 0;
+    }
+
+    // we parse up to bsid so there needs to be at least that many bits
+    if (bits.numBitsLeft() < 2 + 3 + 11 + 2 + 2 + 3 + 1 + 5) {
+        ALOGE("Not enough bits left for further parsing");
+        return 0;
+    }
+
+    unsigned strmtyp = bits.getBits(2);
+    if (strmtyp == 3) {
+        ALOGE("Incorrect strmtyp in EAC3 header");
+        return 0;
+    }
+
+    unsigned substreamid = bits.getBits(3);
+    // only the first independent stream is supported
+    if ((strmtyp == 0 || strmtyp == 2) && substreamid != 0)
+        return 0;
+
+    unsigned frmsiz = bits.getBits(11);
+    unsigned fscod = bits.getBits(2);
+
+    unsigned samplingRate = 0;
+    if (fscod == 0x3) {
+        unsigned fscod2 = bits.getBits(2);
+        if (fscod2 == 3) {
+            ALOGW("Incorrect fscod2 in EAC3 header");
+            return 0;
+        }
+        samplingRate = samplingRateTable2[fscod2];
+    } else {
+        samplingRate = samplingRateTable[fscod];
+        unsigned numblkscod __unused = bits.getBits(2);
+    }
+
+    unsigned acmod = bits.getBits(3);
+    unsigned lfeon = bits.getBits(1);
+    unsigned bsid = bits.getBits(5);
+    if (bsid < 11 || bsid > 16) {
+        ALOGW("Incorrect bsid in EAC3 header. Could be AC-3 or some unknown EAC3 format");
+        return 0;
+    }
+
+    // we currently only support the first independant stream
+    if (metaData != NULL && (strmtyp == 0 || strmtyp == 2)) {
+        unsigned channelCount = channelCountTable[acmod] + lfeon;
+        ALOGV("EAC3 channelCount = %d", channelCount);
+        ALOGV("EAC3 samplingRate = %d", samplingRate);
+        (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EAC3);
+        (*metaData)->setInt32(kKeyChannelCount, channelCount);
+        (*metaData)->setInt32(kKeySampleRate, samplingRate);
+        (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+    }
+
+    unsigned payloadSize = frmsiz + 1;
+    payloadSize <<= 1;  // convert from 16-bit words to bytes
+
+    return payloadSize;
+}
+
+// Parse AC4 header assuming the current ptr is start position of syncframe
+// and update frameSize and metadata.
+static status_t parseAC4SyncFrame(
+        const uint8_t *ptr, size_t size, unsigned &frameSize, sp<MetaData> *metaData) {
+    // ETSI TS 103 190-2 V1.1.1 (2015-09), Annex C
+    // The sync_word can be either 0xAC40 or 0xAC41.
+    static const int kSyncWordAC40 = 0xAC40;
+    static const int kSyncWordAC41 = 0xAC41;
+
+    size_t headerSize = 0;
+    ABitReader bits(ptr, size);
+    int32_t syncWord = bits.getBits(16);
+    if ((syncWord != kSyncWordAC40) && (syncWord != kSyncWordAC41)) {
+        ALOGE("Invalid syncword in AC4 header");
+        return ERROR_MALFORMED;
+    }
+    headerSize += 2;
+
+    frameSize = bits.getBits(16);
+    headerSize += 2;
+    if (frameSize == 0xFFFF) {
+        frameSize = bits.getBits(24);
+        headerSize += 3;
+    }
+
+    if (frameSize == 0) {
+        ALOGE("Invalid frame size in AC4 header");
+        return ERROR_MALFORMED;
+    }
+    frameSize += headerSize;
+    // If the sync_word is 0xAC41, a crc_word is also transmitted.
+    if (syncWord == kSyncWordAC41) {
+        frameSize += 2; // crc_word
+    }
+    ALOGV("AC4 frameSize = %u", frameSize);
+
+    // ETSI TS 103 190-2 V1.1.1 6.2.1.1
+    uint32_t bitstreamVersion = bits.getBits(2);
+    if (bitstreamVersion == 3) {
+        bitstreamVersion += readVariableBits(bits, 2);
+    }
+
+    bits.skipBits(10); // Sequence Counter
+
+    uint32_t bWaitFrames = bits.getBits(1);
+    if (bWaitFrames) {
+        uint32_t waitFrames = bits.getBits(3);
+        if (waitFrames > 0) {
+            bits.skipBits(2); // br_code;
+        }
+    }
+
+    // ETSI TS 103 190 V1.1.1 Table 82
+    bool fsIndex = bits.getBits(1);
+    uint32_t samplingRate = fsIndex ? 48000 : 44100;
+
+    if (metaData != NULL) {
+        ALOGV("dequeueAccessUnitAC4 Setting mFormat");
+        (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+        (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+        // [FIXME] AC4 channel count is defined per presentation. Provide a default channel count
+        // as stereo for the entire stream.
+        (*metaData)->setInt32(kKeyChannelCount, 2);
+        (*metaData)->setInt32(kKeySampleRate, samplingRate);
+    }
+    return OK;
+}
+
+static status_t IsSeeminglyValidAC4Header(const uint8_t *ptr, size_t size, unsigned &frameSize) {
+    return parseAC4SyncFrame(ptr, size, frameSize, NULL);
 }
 
 static bool IsSeeminglyValidADTSHeader(
@@ -279,7 +439,8 @@
         ALOGE("appending data after EOS");
         return ERROR_MALFORMED;
     }
-    if (mBuffer == NULL || mBuffer->size() == 0) {
+
+    if (!isScrambled() && (mBuffer == NULL || mBuffer->size() == 0)) {
         switch (mMode) {
             case H264:
             case MPEG_VIDEO:
@@ -390,12 +551,19 @@
             }
 
             case AC3:
+            case EAC3:
             {
                 uint8_t *ptr = (uint8_t *)data;
 
                 ssize_t startOffset = -1;
                 for (size_t i = 0; i < size; ++i) {
-                    if (IsSeeminglyValidAC3Header(&ptr[i], size - i)) {
+                    unsigned payloadSize = 0;
+                    if (mMode == AC3) {
+                        payloadSize = parseAC3SyncFrame(&ptr[i], size - i, NULL);
+                    } else if (mMode == EAC3) {
+                        payloadSize = parseEAC3SyncFrame(&ptr[i], size - i, NULL);
+                    }
+                    if (payloadSize > 0) {
                         startOffset = i;
                         break;
                     }
@@ -406,7 +574,7 @@
                 }
 
                 if (startOffset > 0) {
-                    ALOGI("found something resembling an AC3 syncword at "
+                    ALOGI("found something resembling an (E)AC3 syncword at "
                           "offset %zd",
                           startOffset);
                 }
@@ -416,6 +584,43 @@
                 break;
             }
 
+            case AC4:
+            {
+                uint8_t *ptr = (uint8_t *)data;
+                unsigned frameSize = 0;
+                ssize_t startOffset = -1;
+
+                // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+                // (Sync header 4 bytes + AC4 toc 3 bytes)
+                if (size < 7) {
+                    return ERROR_MALFORMED;
+                }
+                for (size_t i = 0; i < size; ++i) {
+                    if (IsSeeminglyValidAC4Header(&ptr[i], size - i, frameSize) == OK) {
+                        startOffset = i;
+                        break;
+                    }
+                }
+
+                if (startOffset < 0) {
+                    return ERROR_MALFORMED;
+                }
+
+                if (startOffset > 0) {
+                    ALOGI("found something resembling an AC4 syncword at "
+                          "offset %zd",
+                          startOffset);
+                }
+                if (frameSize != size - startOffset) {
+                    ALOGV("AC4 frame size is %u bytes, while the buffer size is %zd bytes.",
+                          frameSize, size - startOffset);
+                }
+
+                data = &ptr[startOffset];
+                size -= startOffset;
+                break;
+            }
+
             case MPEG_AUDIO:
             {
                 uint8_t *ptr = (uint8_t *)data;
@@ -494,6 +699,7 @@
 
 void ElementaryStreamQueue::appendScrambledData(
         const void *data, size_t size,
+        size_t leadingClearBytes,
         int32_t keyId, bool isSync,
         sp<ABuffer> clearSizes, sp<ABuffer> encSizes) {
     if (!isScrambled()) {
@@ -521,6 +727,7 @@
 
     ScrambledRangeInfo scrambledInfo;
     scrambledInfo.mLength = size;
+    scrambledInfo.mLeadingClearBytes = leadingClearBytes;
     scrambledInfo.mKeyId = keyId;
     scrambledInfo.mIsSync = isSync;
     scrambledInfo.mClearSizes = clearSizes;
@@ -533,7 +740,6 @@
 
 sp<ABuffer> ElementaryStreamQueue::dequeueScrambledAccessUnit() {
     size_t nextScan = mBuffer->size();
-    mBuffer->setRange(0, 0);
     int32_t pesOffset = 0, pesScramblingControl = 0;
     int64_t timeUs = fetchTimestamp(nextScan, &pesOffset, &pesScramblingControl);
     if (timeUs < 0ll) {
@@ -544,6 +750,7 @@
     // return scrambled unit
     int32_t keyId = pesScramblingControl, isSync = 0, scrambledLength = 0;
     sp<ABuffer> clearSizes, encSizes;
+    size_t leadingClearBytes;
     while (mScrambledRangeInfos.size() > mRangeInfos.size()) {
         auto it = mScrambledRangeInfos.begin();
         ALOGV("[stream %d] fetching scrambled range: size=%zu", mMode, it->mLength);
@@ -561,6 +768,7 @@
         clearSizes = it->mClearSizes;
         encSizes = it->mEncSizes;
         isSync = it->mIsSync;
+        leadingClearBytes = it->mLeadingClearBytes;
         mScrambledRangeInfos.erase(it);
     }
     if (scrambledLength == 0) {
@@ -568,26 +776,74 @@
         return NULL;
     }
 
-    // skip the PES header, and copy the rest into scrambled access unit
-    sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
-            mScrambledBuffer->data() + pesOffset,
-            scrambledLength - pesOffset);
-
-    // fix up first sample size after skipping the PES header
-    if (pesOffset > 0) {
-        int32_t &firstClearSize = *(int32_t*)clearSizes->data();
-        int32_t &firstEncSize = *(int32_t*)encSizes->data();
-        // Cut away the PES header
-        if (firstClearSize >= pesOffset) {
-            // This is for TS-level scrambling, we descrambled the first
-            // (or it was clear to begin with)
-            firstClearSize -= pesOffset;
-        } else if (firstEncSize >= pesOffset) {
-            // This can only be PES-level scrambling
-            firstEncSize -= pesOffset;
-        }
+    // Retrieve the leading clear bytes info, and use it to set the clear
+    // range on mBuffer. Note that the leading clear bytes includes the
+    // PES header portion, while mBuffer doesn't.
+    if ((int32_t)leadingClearBytes > pesOffset) {
+        mBuffer->setRange(0, leadingClearBytes - pesOffset);
+    } else {
+        mBuffer->setRange(0, 0);
     }
 
+    // Try to parse formats, and if unavailable set up a dummy format.
+    // Only support the following modes for scrambled content for now.
+    // (will be expanded later).
+    if (mFormat == NULL) {
+        mFormat = new MetaData;
+        switch (mMode) {
+            case H264:
+            {
+                if (!MakeAVCCodecSpecificData(
+                        *mFormat, mBuffer->data(), mBuffer->size())) {
+                    ALOGI("Creating dummy AVC format for scrambled content");
+
+                    mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+                    mFormat->setInt32(kKeyWidth, 1280);
+                    mFormat->setInt32(kKeyHeight, 720);
+                }
+                break;
+            }
+            case AAC:
+            {
+                if (!MakeAACCodecSpecificData(
+                        *mFormat, mBuffer->data(), mBuffer->size())) {
+                    ALOGI("Creating dummy AAC format for scrambled content");
+
+                    MakeAACCodecSpecificData(*mFormat,
+                            1 /*profile*/, 7 /*sampling_freq_index*/, 1 /*channel_config*/);
+                    mFormat->setInt32(kKeyIsADTS, true);
+                }
+
+                break;
+            }
+            case MPEG_VIDEO:
+            {
+                ALOGI("Creating dummy MPEG format for scrambled content");
+
+                mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
+                mFormat->setInt32(kKeyWidth, 1280);
+                mFormat->setInt32(kKeyHeight, 720);
+                break;
+            }
+            default:
+            {
+                ALOGE("Unknown mode for scrambled content");
+                return NULL;
+            }
+        }
+
+        // for MediaExtractor.CasInfo
+        mFormat->setInt32(kKeyCASystemID, mCASystemId);
+        mFormat->setData(kKeyCASessionID,
+                0, mCasSessionId.data(), mCasSessionId.size());
+    }
+
+    mBuffer->setRange(0, 0);
+
+    // copy into scrambled access unit
+    sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
+            mScrambledBuffer->data(), scrambledLength);
+
     scrambledAccessUnit->meta()->setInt64("timeUs", timeUs);
     if (isSync) {
         scrambledAccessUnit->meta()->setInt32("isSync", 1);
@@ -600,6 +856,7 @@
     scrambledAccessUnit->meta()->setInt32("cryptoKey", keyId);
     scrambledAccessUnit->meta()->setBuffer("clearBytes", clearSizes);
     scrambledAccessUnit->meta()->setBuffer("encBytes", encSizes);
+    scrambledAccessUnit->meta()->setInt32("pesOffset", pesOffset);
 
     memmove(mScrambledBuffer->data(),
             mScrambledBuffer->data() + scrambledLength,
@@ -614,7 +871,11 @@
 }
 
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnit() {
-    if ((mFlags & kFlag_AlignedData) && mMode == H264 && !isScrambled()) {
+    if (isScrambled()) {
+        return dequeueScrambledAccessUnit();
+    }
+
+    if ((mFlags & kFlag_AlignedData) && mMode == H264) {
         if (mRangeInfos.empty()) {
             return NULL;
         }
@@ -648,7 +909,10 @@
         case AAC:
             return dequeueAccessUnitAAC();
         case AC3:
-            return dequeueAccessUnitAC3();
+        case EAC3:
+            return dequeueAccessUnitEAC3();
+        case AC4:
+            return dequeueAccessUnitAC4();
         case MPEG_VIDEO:
             return dequeueAccessUnitMPEGVideo();
         case MPEG4_VIDEO:
@@ -666,34 +930,38 @@
     }
 }
 
-sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC3() {
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitEAC3() {
     unsigned syncStartPos = 0;  // in bytes
     unsigned payloadSize = 0;
     sp<MetaData> format = new MetaData;
 
-    ALOGV("dequeueAccessUnit_AC3[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+    ALOGV("dequeueAccessUnitEAC3[%d]: mBuffer %p(%zu)", mAUIndex,
+            mBuffer->data(), mBuffer->size());
 
     while (true) {
         if (syncStartPos + 2 >= mBuffer->size()) {
             return NULL;
         }
 
-        payloadSize = parseAC3SyncFrame(
-                mBuffer->data() + syncStartPos,
-                mBuffer->size() - syncStartPos,
-                &format);
+        uint8_t *ptr = mBuffer->data() + syncStartPos;
+        size_t size = mBuffer->size() - syncStartPos;
+        if (mMode == AC3) {
+            payloadSize = parseAC3SyncFrame(ptr, size, &format);
+        } else if (mMode == EAC3) {
+            payloadSize = parseEAC3SyncFrame(ptr, size, &format);
+        }
         if (payloadSize > 0) {
             break;
         }
 
-        ALOGV("dequeueAccessUnit_AC3[%d]: syncStartPos %u payloadSize %u",
+        ALOGV("dequeueAccessUnitEAC3[%d]: syncStartPos %u payloadSize %u",
                 mAUIndex, syncStartPos, payloadSize);
 
         ++syncStartPos;
     }
 
     if (mBuffer->size() < syncStartPos + payloadSize) {
-        ALOGV("Not enough buffer size for AC3");
+        ALOGV("Not enough buffer size for E/AC3");
         return NULL;
     }
 
@@ -701,7 +969,6 @@
         mFormat = format;
     }
 
-
     int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
     if (timeUs < 0ll) {
         ALOGE("negative timeUs");
@@ -710,7 +977,12 @@
 
     // Not decrypting if key info not available (e.g., scanner/extractor parsing ts files)
     if (mSampleDecryptor != NULL) {
-        mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+        if (mMode == AC3) {
+            mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+        } else if (mMode == EAC3) {
+            ALOGE("EAC3 AU is encrypted and decryption is not supported");
+            return NULL;
+        }
     }
     mAUIndex++;
 
@@ -730,6 +1002,69 @@
     return accessUnit;
 }
 
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC4() {
+    unsigned syncStartPos = 0;
+    unsigned payloadSize = 0;
+    sp<MetaData> format = new MetaData;
+    ALOGV("dequeueAccessUnit_AC4[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+
+    // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+    // (Sync header 4 bytes + AC4 toc 3 bytes)
+    if (mBuffer->size() < 7) {
+        return NULL;
+    }
+
+    while (true) {
+        if (syncStartPos + 2 >= mBuffer->size()) {
+            return NULL;
+        }
+
+        status_t status = parseAC4SyncFrame(
+                    mBuffer->data() + syncStartPos,
+                    mBuffer->size() - syncStartPos,
+                    payloadSize,
+                    &format);
+        if (status == OK) {
+            break;
+        }
+
+        ALOGV("dequeueAccessUnit_AC4[%d]: syncStartPos %u payloadSize %u",
+                mAUIndex, syncStartPos, payloadSize);
+
+        ++syncStartPos;
+    }
+
+    if (mBuffer->size() < syncStartPos + payloadSize) {
+        ALOGV("Not enough buffer size for AC4");
+        return NULL;
+    }
+
+    if (mFormat == NULL) {
+        mFormat = format;
+    }
+
+    int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+    if (timeUs < 0ll) {
+        ALOGE("negative timeUs");
+        return NULL;
+    }
+    mAUIndex++;
+
+    sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+    memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+    accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
+
+    memmove(
+            mBuffer->data(),
+            mBuffer->data() + syncStartPos + payloadSize,
+            mBuffer->size() - syncStartPos - payloadSize);
+
+    mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+    return accessUnit;
+}
+
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() {
     if (mBuffer->size() < 4) {
         return NULL;
@@ -851,25 +1186,11 @@
         bool protection_absent = bits.getBits(1) != 0;
 
         if (mFormat == NULL) {
-            unsigned profile = bits.getBits(2);
-            if (profile == 3u) {
-                ALOGE("profile should not be 3");
-                return NULL;
-            }
-            unsigned sampling_freq_index = bits.getBits(4);
-            bits.getBits(1);  // private_bit
-            unsigned channel_configuration = bits.getBits(3);
-            if (channel_configuration == 0u) {
-                ALOGE("channel_config should not be 0");
-                return NULL;
-            }
-            bits.skipBits(2);  // original_copy, home
-
             mFormat = new MetaData;
-            MakeAACCodecSpecificData(*mFormat,
-                    profile, sampling_freq_index, channel_configuration);
-
-            mFormat->setInt32(kKeyIsADTS, true);
+            if (!MakeAACCodecSpecificData(
+                    *mFormat, mBuffer->data() + offset, mBuffer->size() - offset)) {
+                return NULL;
+            }
 
             int32_t sampleRate;
             int32_t numChannels;
@@ -884,12 +1205,12 @@
 
             ALOGI("found AAC codec config (%d Hz, %d channels)",
                  sampleRate, numChannels);
-        } else {
-            // profile_ObjectType, sampling_frequency_index, private_bits,
-            // channel_configuration, original_copy, home
-            bits.skipBits(12);
         }
 
+        // profile_ObjectType, sampling_frequency_index, private_bits,
+        // channel_configuration, original_copy, home
+        bits.skipBits(12);
+
         // adts_variable_header
 
         // copyright_identification_bit, copyright_identification_start
@@ -1004,27 +1325,6 @@
 }
 
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
-    if (isScrambled()) {
-        if (mBuffer == NULL || mBuffer->size() == 0) {
-            return NULL;
-        }
-        if (mFormat == NULL) {
-            mFormat = new MetaData;
-            if (!MakeAVCCodecSpecificData(*mFormat, mBuffer->data(), mBuffer->size())) {
-                ALOGW("Creating dummy AVC format for scrambled content");
-                mFormat = new MetaData;
-                mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-                mFormat->setInt32(kKeyWidth, 1280);
-                mFormat->setInt32(kKeyHeight, 720);
-            }
-            // for MediaExtractor.CasInfo
-            mFormat->setInt32(kKeyCASystemID, mCASystemId);
-            mFormat->setData(kKeyCASessionID, 0,
-                    mCasSessionId.data(), mCasSessionId.size());
-        }
-        return dequeueScrambledAccessUnit();
-    }
-
     const uint8_t *data = mBuffer->data();
 
     size_t size = mBuffer->size();
@@ -1324,25 +1624,6 @@
 }
 
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitMPEGVideo() {
-    if (isScrambled()) {
-        if (mBuffer == NULL || mBuffer->size() == 0) {
-            return NULL;
-        }
-        if (mFormat == NULL) {
-            ALOGI("Creating dummy MPEG format for scrambled content");
-            mFormat = new MetaData;
-            mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
-            mFormat->setInt32(kKeyWidth, 1280);
-            mFormat->setInt32(kKeyHeight, 720);
-
-            // for MediaExtractor.CasInfo
-            mFormat->setInt32(kKeyCASystemID, mCASystemId);
-            mFormat->setData(kKeyCASessionID, 0,
-                    mCasSessionId.data(), mCasSessionId.size());
-        }
-        return dequeueScrambledAccessUnit();
-    }
-
     const uint8_t *data = mBuffer->data();
     size_t size = mBuffer->size();
 
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index ffcb502..3227f47 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -38,6 +38,8 @@
         H264,
         AAC,
         AC3,
+        EAC3,
+        AC4,
         MPEG_AUDIO,
         MPEG_VIDEO,
         MPEG4_VIDEO,
@@ -59,6 +61,7 @@
 
     void appendScrambledData(
             const void *data, size_t size,
+            size_t leadingClearBytes,
             int32_t keyId, bool isSync,
             sp<ABuffer> clearSizes, sp<ABuffer> encSizes);
 
@@ -84,8 +87,8 @@
     };
 
     struct ScrambledRangeInfo {
-        //int64_t mTimestampUs;
         size_t mLength;
+        size_t mLeadingClearBytes;
         int32_t mKeyId;
         int32_t mIsSync;
         sp<ABuffer> mClearSizes;
@@ -115,7 +118,8 @@
 
     sp<ABuffer> dequeueAccessUnitH264();
     sp<ABuffer> dequeueAccessUnitAAC();
-    sp<ABuffer> dequeueAccessUnitAC3();
+    sp<ABuffer> dequeueAccessUnitEAC3();
+    sp<ABuffer> dequeueAccessUnitAC4();
     sp<ABuffer> dequeueAccessUnitMPEGAudio();
     sp<ABuffer> dequeueAccessUnitMPEGVideo();
     sp<ABuffer> dequeueAccessUnitMPEG4Video();
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 7d2c2dd..32113c2 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -1085,7 +1085,8 @@
         }
 
         case OMXBuffer::kBufferTypeANWBuffer: {
-            if (mPortMode[portIndex] != IOMX::kPortModePresetANWBuffer) {
+            if (mPortMode[portIndex] != IOMX::kPortModePresetANWBuffer
+                    && mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer) {
                 break;
             }
             return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
@@ -1609,12 +1610,15 @@
     }
     BufferMeta *buffer_meta = static_cast<BufferMeta *>(header->pAppPrivate);
 
+    // Invalidate buffers in the client side first before calling OMX_FreeBuffer.
+    // If not, pending events in the client side might access the buffers after free.
+    invalidateBufferID(buffer);
+
     OMX_ERRORTYPE err = OMX_FreeBuffer(mHandle, portIndex, header);
     CLOG_IF_ERROR(freeBuffer, err, "%s:%u %#x", portString(portIndex), portIndex, buffer);
 
     delete buffer_meta;
     buffer_meta = NULL;
-    invalidateBufferID(buffer);
 
     return StatusFromOMXError(err);
 }
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 895a4ce..5388ba7 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -28,7 +28,6 @@
 #include <cutils/properties.h>
 #include <media/DataSource.h>
 #include <media/IMediaHTTPService.h>
-#include <media/MediaExtractor.h>
 #include <media/MediaSource.h>
 #include <media/OMXBuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index d459cbd..665d51a 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -24,7 +24,7 @@
 
 #include <media/MediaHTTPConnection.h>
 #include <media/MediaHTTPService.h>
-#include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/ClearMediaHTTP.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/Utils.h>
@@ -41,7 +41,7 @@
       mFlags(flags),
       mNetLooper(new ALooper),
       mCancelled(false),
-      mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())) {
+      mHTTPDataSource(new ClearMediaHTTP(httpService->makeHTTPConnection())) {
     mNetLooper->setName("sdp net");
     mNetLooper->start(false /* runOnCallingThread */,
                       false /* canCallJava */,
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index ccddd6e..cff2803 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -99,6 +99,7 @@
     MTP_EVENT_STORE_ADDED,
     MTP_EVENT_STORE_REMOVED,
     MTP_EVENT_DEVICE_PROP_CHANGED,
+    MTP_EVENT_OBJECT_INFO_CHANGED,
 };
 
 MtpServer::MtpServer(IMtpDatabase* database, int controlFd, bool ptp,
@@ -259,6 +260,11 @@
     sendEvent(MTP_EVENT_OBJECT_REMOVED, handle);
 }
 
+void MtpServer::sendObjectInfoChanged(MtpObjectHandle handle) {
+    ALOGV("sendObjectInfoChanged %d\n", handle);
+    sendEvent(MTP_EVENT_OBJECT_INFO_CHANGED, handle);
+}
+
 void MtpServer::sendStoreAdded(MtpStorageID id) {
     ALOGV("sendStoreAdded %08X\n", id);
     sendEvent(MTP_EVENT_STORE_ADDED, id);
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index f6939d7..1f8799f 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -115,6 +115,7 @@
 
     void                sendObjectAdded(MtpObjectHandle handle);
     void                sendObjectRemoved(MtpObjectHandle handle);
+    void                sendObjectInfoChanged(MtpObjectHandle handle);
     void                sendDevicePropertyChanged(MtpDeviceProperty property);
 
 private:
diff --git a/media/mtp/PosixAsyncIO.cpp b/media/mtp/PosixAsyncIO.cpp
index e67c568..72c07cc 100644
--- a/media/mtp/PosixAsyncIO.cpp
+++ b/media/mtp/PosixAsyncIO.cpp
@@ -15,42 +15,109 @@
  */
 
 #include <android-base/logging.h>
-#include <condition_variable>
 #include <memory>
-#include <mutex>
+#include <pthread.h>
 #include <queue>
+#include <thread>
 #include <unistd.h>
 
 #include "PosixAsyncIO.h"
 
 namespace {
 
-void read_func(struct aiocb *aiocbp) {
-    aiocbp->ret = TEMP_FAILURE_RETRY(pread(aiocbp->aio_fildes,
-                aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
-    if (aiocbp->ret == -1) aiocbp->error = errno;
+std::thread gWorkerThread;
+std::deque<struct aiocb*> gWorkQueue;
+bool gSuspended = true;
+int gAiocbRefcount = 0;
+std::mutex gLock;
+std::condition_variable gWait;
+
+void work_func(void *) {
+    pthread_setname_np(pthread_self(), "AsyncIO work");
+    while (true) {
+        struct aiocb *aiocbp;
+        {
+            std::unique_lock<std::mutex> lk(gLock);
+            gWait.wait(lk, []{return gWorkQueue.size() > 0 || gSuspended;});
+            if (gSuspended)
+                return;
+            aiocbp = gWorkQueue.back();
+            gWorkQueue.pop_back();
+        }
+        CHECK(aiocbp->queued);
+        int ret;
+        if (aiocbp->read) {
+            ret = TEMP_FAILURE_RETRY(pread(aiocbp->aio_fildes,
+                    aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
+        } else {
+            ret = TEMP_FAILURE_RETRY(pwrite(aiocbp->aio_fildes,
+               aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
+        }
+        {
+            std::unique_lock<std::mutex> lk(aiocbp->lock);
+            aiocbp->ret = ret;
+            if (aiocbp->ret == -1) {
+                aiocbp->error = errno;
+            }
+            aiocbp->queued = false;
+        }
+        aiocbp->cv.notify_all();
+    }
 }
 
-void write_func(struct aiocb *aiocbp) {
-    aiocbp->ret = TEMP_FAILURE_RETRY(pwrite(aiocbp->aio_fildes,
-                aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
-    if (aiocbp->ret == -1) aiocbp->error = errno;
+int aio_add(struct aiocb *aiocbp) {
+    CHECK(!aiocbp->queued);
+    aiocbp->queued = true;
+    {
+        std::unique_lock<std::mutex> lk(gLock);
+        gWorkQueue.push_front(aiocbp);
+    }
+    gWait.notify_one();
+    return 0;
 }
 
 } // end anonymous namespace
 
+aiocb::aiocb() {
+    this->ret = 0;
+    this->queued = false;
+    {
+        std::unique_lock<std::mutex> lk(gLock);
+        if (gAiocbRefcount == 0) {
+            CHECK(gWorkQueue.size() == 0);
+            CHECK(gSuspended);
+            gSuspended = false;
+            gWorkerThread = std::thread(work_func, nullptr);
+        }
+        gAiocbRefcount++;
+    }
+}
+
 aiocb::~aiocb() {
-    CHECK(!thread.joinable());
+    CHECK(!this->queued);
+    {
+        std::unique_lock<std::mutex> lk(gLock);
+        CHECK(!gSuspended);
+        if (gAiocbRefcount == 1) {
+            CHECK(gWorkQueue.size() == 0);
+            gSuspended = true;
+            lk.unlock();
+            gWait.notify_one();
+            gWorkerThread.join();
+            lk.lock();
+        }
+        gAiocbRefcount--;
+    }
 }
 
 int aio_read(struct aiocb *aiocbp) {
-    aiocbp->thread = std::thread(read_func, aiocbp);
-    return 0;
+    aiocbp->read = true;
+    return aio_add(aiocbp);
 }
 
 int aio_write(struct aiocb *aiocbp) {
-    aiocbp->thread = std::thread(write_func, aiocbp);
-    return 0;
+    aiocbp->read = false;
+    return aio_add(aiocbp);
 }
 
 int aio_error(const struct aiocb *aiocbp) {
@@ -64,7 +131,10 @@
 int aio_suspend(struct aiocb *aiocbp[], int n,
         const struct timespec *) {
     for (int i = 0; i < n; i++) {
-        aiocbp[i]->thread.join();
+        {
+            std::unique_lock<std::mutex> lk(aiocbp[i]->lock);
+            aiocbp[i]->cv.wait(lk, [aiocbp, i]{return !aiocbp[i]->queued;});
+        }
     }
     return 0;
 }
diff --git a/media/mtp/PosixAsyncIO.h b/media/mtp/PosixAsyncIO.h
index 590aaef..2bb5735 100644
--- a/media/mtp/PosixAsyncIO.h
+++ b/media/mtp/PosixAsyncIO.h
@@ -17,10 +17,11 @@
 #ifndef _POSIXASYNCIO_H
 #define _POSIXASYNCIO_H
 
+#include <condition_variable>
+#include <mutex>
 #include <sys/cdefs.h>
 #include <sys/types.h>
 #include <time.h>
-#include <thread>
 #include <unistd.h>
 
 /**
@@ -35,10 +36,15 @@
     size_t aio_nbytes;
 
     // Used internally
-    std::thread thread;
+    bool read;
+    bool queued;
     ssize_t ret;
     int error;
 
+    std::mutex lock;
+    std::condition_variable cv;
+
+    aiocb();
     ~aiocb();
 };
 
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 6b20bca..42285f8 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -811,7 +811,13 @@
         size_t *encryptedbytes) {
 
     // size needed to store all the crypto data
-    size_t cryptosize = sizeof(AMediaCodecCryptoInfo) + sizeof(size_t) * numsubsamples * 2;
+    size_t cryptosize;
+    // = sizeof(AMediaCodecCryptoInfo) + sizeof(size_t) * numsubsamples * 2;
+    if (__builtin_mul_overflow(sizeof(size_t) * 2, numsubsamples, &cryptosize) ||
+            __builtin_add_overflow(cryptosize, sizeof(AMediaCodecCryptoInfo), &cryptosize)) {
+        ALOGE("crypto size overflow");
+        return NULL;
+    }
     AMediaCodecCryptoInfo *ret = (AMediaCodecCryptoInfo*) malloc(cryptosize);
     if (!ret) {
         ALOGE("couldn't allocate %zu bytes", cryptosize);
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 6d10f1c..5597488 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -309,6 +309,7 @@
     }
     String8 defaultUrl;
     DrmPlugin::KeyRequestType keyRequestType;
+    mObj->mKeyRequest.clear();
     status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
             mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
             &keyRequestType);
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 38e12e3..f936118 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -36,13 +36,12 @@
 #ifndef _NDK_IMAGE_H
 #define _NDK_IMAGE_H
 
+#include <stdint.h>
 #include <sys/cdefs.h>
 
 #include "NdkMediaError.h"
 
-#if __ANDROID_API__ >= 26
 #include <android/hardware_buffer.h>
-#endif /* __ANDROID_API__ >= 26 */
 
 __BEGIN_DECLS
 
@@ -516,6 +515,8 @@
     int32_t bottom;
 } AImageCropRect;
 
+#if __ANDROID_API__ >= 24
+
 /**
  * Return the image back the the system and delete the AImage object from memory.
  *
@@ -712,6 +713,10 @@
         const AImage* image, int planeIdx,
         /*out*/uint8_t** data, /*out*/int* dataLength) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 26
+
 /**
  * Return the image back the the system and delete the AImage object from memory asynchronously.
  *
@@ -756,6 +761,8 @@
  */
 media_status_t AImage_getHardwareBuffer(const AImage* image, /*out*/AHardwareBuffer** buffer) __INTRODUCED_IN(26);
 
+#endif /* __ANDROID_API__ >= 26 */
+
 __END_DECLS
 
 #endif //_NDK_IMAGE_H
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index eb1a44a..68de176 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -50,6 +50,8 @@
  */
 typedef struct AImageReader AImageReader;
 
+#if __ANDROID_API__ >= 24
+
 /**
  * Create a new reader for images of the desired size and format.
  *
@@ -296,6 +298,10 @@
 media_status_t AImageReader_setImageListener(
         AImageReader* reader, AImageReader_ImageListener* listener) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 26
+
 /**
  * AImageReader constructor similar to {@link AImageReader_new} that takes an additional parameter
  * for the consumer usage. All other parameters and the return values are identical to those passed
@@ -455,6 +461,8 @@
 media_status_t AImageReader_setBufferRemovedListener(
         AImageReader* reader, AImageReader_BufferRemovedListener* listener) __INTRODUCED_IN(26);
 
+#endif /* __ANDROID_API__ >= 26 */
+
 __END_DECLS
 
 #endif //_NDK_IMAGE_READER_H
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b329b39..9dc120d 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -121,6 +121,8 @@
       AMediaCodecOnAsyncError           onAsyncError;
 };
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Create codec by name. Use this if you know the exact codec you want to use.
  * When configuring, you will need to specify whether to use the codec as an
@@ -274,6 +276,8 @@
 media_status_t AMediaCodec_releaseOutputBufferAtTime(
         AMediaCodec *mData, size_t idx, int64_t timestampNs) __INTRODUCED_IN(21);
 
+#if __ANDROID_API__ >= 26
+
 /**
  * Creates a Surface that can be used as the input to encoder, in place of input buffers
  *
@@ -344,6 +348,10 @@
  */
 media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData) __INTRODUCED_IN(26);
 
+#endif /* __ANDROID_API__ >= 26 */
+
+#if __ANDROID_API__ >= 28
+
 /**
  * Get the component name. If the codec was created by createDecoderByType
  * or createEncoderByType, what component is chosen is not known beforehand.
@@ -405,6 +413,8 @@
  */
 bool AMediaCodecActionCode_isTransient(int32_t actionCode) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 typedef enum {
     AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
     AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
@@ -483,6 +493,8 @@
  */
 media_status_t AMediaCodecCryptoInfo_getEncryptedBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif //_NDK_MEDIA_CODEC_H
diff --git a/media/ndk/include/media/NdkMediaCrypto.h b/media/ndk/include/media/NdkMediaCrypto.h
index b673adc..bcdf9a0 100644
--- a/media/ndk/include/media/NdkMediaCrypto.h
+++ b/media/ndk/include/media/NdkMediaCrypto.h
@@ -47,6 +47,8 @@
 
 typedef uint8_t AMediaUUID[16];
 
+#if __ANDROID_API__ >= 21
+
 bool AMediaCrypto_isCryptoSchemeSupported(const AMediaUUID uuid) __INTRODUCED_IN(21);
 
 bool AMediaCrypto_requiresSecureDecoderComponent(const char *mime) __INTRODUCED_IN(21);
@@ -55,6 +57,8 @@
 
 void AMediaCrypto_delete(AMediaCrypto* crypto) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_CRYPTO_H
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 3a4373c..ea5ba0c 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -38,6 +38,8 @@
 struct AMediaDataSource;
 typedef struct AMediaDataSource AMediaDataSource;
 
+#if __ANDROID_API__ >= 28
+
 /*
  * AMediaDataSource's callbacks will be invoked on an implementation-defined thread
  * or thread pool. No guarantees are provided about which thread(s) will be used for
@@ -133,6 +135,8 @@
         AMediaDataSource*,
         AMediaDataSourceClose) __INTRODUCED_IN(28);
 
+#endif  /*__ANDROID_API__ >= 28 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_DATASOURCE_H
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 24c0d6d..0209681 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -87,6 +87,8 @@
 typedef void (*AMediaDrmEventListener)(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         AMediaDrmEventType eventType, int extra, const uint8_t *data, size_t dataSize);
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Query if the given scheme identified by its UUID is supported on this device, and
  * whether the drm plugin is able to handle the media container format specified by mimeType.
@@ -459,6 +461,8 @@
         const char *macAlgorithm, uint8_t *keyId, const uint8_t *message, size_t messageSize,
         const uint8_t *signature, size_t signatureSize) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif //_NDK_MEDIA_DRM_H
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index 9f60891..6a1796f 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -49,6 +49,8 @@
 struct AMediaExtractor;
 typedef struct AMediaExtractor AMediaExtractor;
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Create new media extractor
  */
@@ -72,12 +74,16 @@
         const char *location) __INTRODUCED_IN(21);
         // TODO support headers
 
+#if __ANDROID_API__ >= 28
+
 /**
  * Set the custom data source implementation from which the extractor will read.
  */
 media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*,
         AMediaDataSource *src) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 /**
  * Return the number of tracks in the previously specified media file
  */
@@ -173,6 +179,8 @@
     AMEDIAEXTRACTOR_SAMPLE_FLAG_ENCRYPTED = 2,
 };
 
+#if __ANDROID_API__ >= 28
+
 /**
  * Returns the format of the extractor. The caller must free the returned format
  * using AMediaFormat_delete(format).
@@ -219,6 +227,10 @@
 media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex,
         AMediaFormat *fmt) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_EXTRACTOR_H
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index dd39acf..5f7804d 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -46,6 +46,8 @@
 struct AMediaFormat;
 typedef struct AMediaFormat AMediaFormat;
 
+#if __ANDROID_API__ >= 21
+
 AMediaFormat *AMediaFormat_new() __INTRODUCED_IN(21);
 media_status_t AMediaFormat_delete(AMediaFormat*) __INTRODUCED_IN(21);
 
@@ -88,73 +90,76 @@
 /**
  * XXX should these be ints/enums that we look up in a table as needed?
  */
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
-extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
-extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
-extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
-extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
-extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
-extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
-extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
-extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
-extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
-extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
-extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
-extern const char* AMEDIAFORMAT_KEY_CSD;
-extern const char* AMEDIAFORMAT_KEY_CSD_0;
-extern const char* AMEDIAFORMAT_KEY_CSD_1;
-extern const char* AMEDIAFORMAT_KEY_CSD_2;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_DURATION;
-extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
-extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS;
-extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
-extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
-extern const char* AMEDIAFORMAT_KEY_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
-extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
-extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
-extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
-extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
-extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
-extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
-extern const char* AMEDIAFORMAT_KEY_LATENCY;
-extern const char* AMEDIAFORMAT_KEY_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
-extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_MIME;
-extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA;
-extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
-extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
-extern const char* AMEDIAFORMAT_KEY_PRIORITY;
-extern const char* AMEDIAFORMAT_KEY_PROFILE;
-extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
-extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
-extern const char* AMEDIAFORMAT_KEY_ROTATION;
-extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_SEI;
-extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_STRIDE;
-extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID;
-extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
-extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_TIME_US;
-extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
-extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_BIT_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_0 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_1 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_2 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DURATION __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FRAME_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_HEIGHT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_IS_ADTS __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_LANGUAGE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_LATENCY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MIME __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PRIORITY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PROFILE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_ROTATION __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_SEI __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_STRIDE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TIME_US __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_WIDTH __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
+#if __ANDROID_API__ >= 28
 bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out) __INTRODUCED_IN(28);
 bool AMediaFormat_getRect(AMediaFormat*, const char *name,
         int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) __INTRODUCED_IN(28);
@@ -163,6 +168,7 @@
 void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value) __INTRODUCED_IN(28);
 void AMediaFormat_setRect(AMediaFormat*, const char* name,
         int32_t left, int32_t top, int32_t right, int32_t bottom) __INTRODUCED_IN(28);
+#endif /* __ANDROID_API__ >= 28 */
 
 __END_DECLS
 
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 75c70ed..7393867 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -53,6 +53,8 @@
     AMEDIAMUXER_OUTPUT_FORMAT_WEBM   = 1,
 } OutputFormat;
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Create new media muxer
  */
@@ -121,6 +123,8 @@
         size_t trackIdx, const uint8_t *data,
         const AMediaCodecBufferInfo *info) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_MUXER_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index fb56694..d828d6a 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -32,55 +32,66 @@
     AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
-    AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+    AMEDIAFORMAT_KEY_AAC_PROFILE; # var introduced=21
     AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
     AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
     AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
-    AMEDIAFORMAT_KEY_BIT_RATE; # var
+    AMEDIAFORMAT_KEY_BIT_RATE; # var introduced=21
     AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
-    AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
-    AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
-    AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+    AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var introduced=21
+    AMEDIAFORMAT_KEY_CHANNEL_MASK; # var introduced=21
+    AMEDIAFORMAT_KEY_COLOR_FORMAT; # var introduced=21
     AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
     AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_0; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_1; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_2; # var introduced=28
     AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
-    AMEDIAFORMAT_KEY_DURATION; # var
-    AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
-    AMEDIAFORMAT_KEY_FRAME_RATE; # var
+    AMEDIAFORMAT_KEY_DISPLAY_HEIGHT; # var introduced=28
+    AMEDIAFORMAT_KEY_DISPLAY_WIDTH; # var introduced=28
+    AMEDIAFORMAT_KEY_DURATION; # var introduced=21
+    AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var introduced=21
+    AMEDIAFORMAT_KEY_FRAME_RATE; # var introduced=21
     AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
     AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
     AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
-    AMEDIAFORMAT_KEY_HEIGHT; # var
+    AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
     AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
-    AMEDIAFORMAT_KEY_IS_ADTS; # var
-    AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
-    AMEDIAFORMAT_KEY_IS_DEFAULT; # var
-    AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
-    AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
-    AMEDIAFORMAT_KEY_LANGUAGE; # var
+    AMEDIAFORMAT_KEY_IS_ADTS; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_DEFAULT; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var introduced=21
+    AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
+    AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
     AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
     AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
-    AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
-    AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
-    AMEDIAFORMAT_KEY_MAX_WIDTH; # var
-    AMEDIAFORMAT_KEY_MIME; # var
+    AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
+    AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
+    AMEDIAFORMAT_KEY_MAX_WIDTH; # var introduced=21
+    AMEDIAFORMAT_KEY_MIME; # var introduced=21
+    AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
     AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
     AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
     AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
     AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
-    AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
-    AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+    AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
+    AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
     AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
-    AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+    AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
+    AMEDIAFORMAT_KEY_SEI; # var introduced=28
     AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
-    AMEDIAFORMAT_KEY_STRIDE; # var
+    AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
+    AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID; # var introduced=28
     AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
+    AMEDIAFORMAT_KEY_TIME_US; # var introduced=28
+    AMEDIAFORMAT_KEY_TRACK_INDEX; # var introduced=28
     AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
-    AMEDIAFORMAT_KEY_WIDTH; # var
+    AMEDIAFORMAT_KEY_WIDTH; # var introduced=21
     AMediaCodecActionCode_isRecoverable; # introduced=28
     AMediaCodecActionCode_isTransient; # introduced=28
     AMediaCodecCryptoInfo_delete;
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index de8e46a..f5b3f92 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -22,6 +22,7 @@
         "ProcessInfo.cpp",
         "SchedulingPolicyService.cpp",
         "ServiceUtilities.cpp",
+        "TimeCheck.cpp",
     ],
     shared_libs: [
         "libbinder",
@@ -31,12 +32,20 @@
         "libmemunreachable",
     ],
 
+    logtags: ["EventLogTags.logtags"],
+
     cflags: [
         "-Wall",
         "-Wextra",
         "-Werror",
     ],
 
+    product_variables: {
+        product_is_iot: {
+            cflags: ["-DTARGET_ANDROID_THINGS"],
+        },
+    },
+
     local_include_dirs: ["include"],
     export_include_dirs: ["include"],
 }
diff --git a/media/utils/EventLogTags.logtags b/media/utils/EventLogTags.logtags
new file mode 100644
index 0000000..67f0ea8
--- /dev/null
+++ b/media/utils/EventLogTags.logtags
@@ -0,0 +1,41 @@
+# The entries in this file map a sparse set of log tag numbers to tag names.
+# This is installed on the device, in /system/etc, and parsed by logcat.
+#
+# Tag numbers are decimal integers, from 0 to 2^31.  (Let's leave the
+# negative values alone for now.)
+#
+# Tag names are one or more ASCII letters and numbers or underscores, i.e.
+# "[A-Z][a-z][0-9]_".  Do not include spaces or punctuation (the former
+# impacts log readability, the latter makes regex searches more annoying).
+#
+# Tag numbers and names are separated by whitespace.  Blank lines and lines
+# starting with '#' are ignored.
+#
+# Optionally, after the tag names can be put a description for the value(s)
+# of the tag. Description are in the format
+#    (<name>|data type[|data unit])
+# Multiple values are separated by commas.
+#
+# The data type is a number from the following values:
+# 1: int
+# 2: long
+# 3: string
+# 4: list
+#
+# The data unit is a number taken from the following list:
+# 1: Number of objects
+# 2: Number of bytes
+# 3: Number of milliseconds
+# 4: Number of allocations
+# 5: Id
+# 6: Percent
+# Default value for data of type int/long is 2 (bytes).
+#
+# See system/core/logcat/event.logtags for the master copy of the tags.
+
+# 61000 - 61199 reserved for audioserver
+
+61000 audioserver_binder_timeout (command|3)
+
+# NOTE - the range 1000000-2000000 is reserved for partners and others who
+# want to define their own log tags without conflicting with the core platform.
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 0d50be0..1c54aec 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -158,6 +158,27 @@
     return ok;
 }
 
+bool modifyDefaultAudioEffectsAllowed() {
+    static const String16 sModifyDefaultAudioEffectsAllowed(
+            "android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sModifyDefaultAudioEffectsAllowed);
+
+#ifdef TARGET_ANDROID_THINGS
+    if (!ok) {
+        // Use a secondary permission on Android Things to allow a more lenient level of protection.
+        static const String16 sModifyDefaultAudioEffectsAndroidThingsAllowed(
+                "com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+        ok = PermissionCache::checkCallingPermission(
+                sModifyDefaultAudioEffectsAndroidThingsAllowed);
+    }
+    if (!ok) ALOGE("com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+#else
+    if (!ok) ALOGE("android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+#endif
+    return ok;
+}
+
 bool dumpAllowed() {
     static const String16 sDump("android.permission.DUMP");
     // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
diff --git a/media/libmedia/TimeCheck.cpp b/media/utils/TimeCheck.cpp
similarity index 91%
rename from media/libmedia/TimeCheck.cpp
rename to media/utils/TimeCheck.cpp
index dab5d4f..59cf4ef 100644
--- a/media/libmedia/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -15,7 +15,9 @@
  */
 
 
+#include <utils/Log.h>
 #include <media/TimeCheck.h>
+#include <media/EventLog.h>
 
 namespace android {
 
@@ -81,7 +83,10 @@
             status = mCond.waitRelative(mMutex, waitTimeNs);
         }
     }
-    LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "TimeCheck timeout for %s", tag);
+    if (status != NO_ERROR) {
+        LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+        LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
+    }
     return true;
 }
 
diff --git a/media/utils/include/mediautils/EventLog.h b/media/utils/include/mediautils/EventLog.h
new file mode 100644
index 0000000..553d3bd
--- /dev/null
+++ b/media/utils/include/mediautils/EventLog.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_AUDIO_EVENT_LOG_H_
+#define ANDROID_AUDIO_EVENT_LOG_H_
+
+namespace android {
+
+// keep values in sync with frameworks/av/media/utils/EventLogTags.logtags
+enum {
+  LOGTAG_AUDIO_BINDER_TIMEOUT = 61000,
+};
+
+}  // namespace android
+
+#endif  // ANDROID_AUDIO_EVENT_LOG_H_
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 0911744..98f54c2 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -68,6 +68,7 @@
 bool captureHotwordAllowed(pid_t pid, uid_t uid);
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
+bool modifyDefaultAudioEffectsAllowed();
 bool dumpAllowed();
 bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
 status_t checkIMemory(const sp<IMemory>& iMemory);
diff --git a/media/libmedia/include/media/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
similarity index 100%
rename from media/libmedia/include/media/TimeCheck.h
rename to media/utils/include/mediautils/TimeCheck.h
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index c0aa477..2c26ba4 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -42,6 +42,7 @@
 
 LOCAL_STATIC_LIBRARIES := \
     libcpustats \
+    libjsoncpp \
     libsndfile \
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 53a4ce9..ed2b3c0 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -20,9 +20,11 @@
 //#define LOG_NDEBUG 0
 
 #include "Configuration.h"
+#include <algorithm>    // std::any_of
 #include <dirent.h>
 #include <math.h>
 #include <signal.h>
+#include <string>
 #include <sys/time.h>
 #include <sys/resource.h>
 
@@ -56,13 +58,14 @@
 
 #include <audio_utils/primitives.h>
 
+#include <json/json.h>
+
 #include <powermanager/PowerManager.h>
 
 #include <media/IMediaLogService.h>
 #include <media/MemoryLeakTrackUtil.h>
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
-#include <media/AudioParameter.h>
 #include <mediautils/BatteryNotifier.h>
 #include <mediautils/ServiceUtilities.h>
 #include <private/android_filesystem_config.h>
@@ -306,7 +309,7 @@
         *sessionId = actualSessionId;
     } else {
         if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
-            AudioSystem::releaseOutput(io, streamType, actualSessionId);
+            AudioSystem::releaseOutput(portId);
         } else {
             AudioSystem::releaseInput(portId);
         }
@@ -434,6 +437,18 @@
     if (!dumpAllowed()) {
         dumpPermissionDenial(fd, args);
     } else {
+        // XXX This is sort of hacky for now.
+        const bool formatJson = std::any_of(args.begin(), args.end(),
+                [](const String16 &arg) { return arg == String16("--json"); });
+        if (formatJson) {
+            Json::Value root = getJsonDump();
+            Json::FastWriter writer;
+            std::string rootStr = writer.write(root);
+            // XXX consider buffering if the string happens to be too long.
+            dprintf(fd, "%s", rootStr.c_str());
+            return NO_ERROR;
+        }
+
         // get state of hardware lock
         bool hardwareLocked = dumpTryLock(mHardwareLock);
         if (!hardwareLocked) {
@@ -443,7 +458,7 @@
             mHardwareLock.unlock();
         }
 
-        bool locked = dumpTryLock(mLock);
+        const bool locked = dumpTryLock(mLock);
 
         // failed to lock - AudioFlinger is probably deadlocked
         if (!locked) {
@@ -501,6 +516,15 @@
 
         mPatchPanel.dump(fd);
 
+        // dump external setParameters
+        auto dumpLogger = [fd](SimpleLog& logger, const char* name) {
+            dprintf(fd, "\n%s setParameters:\n", name);
+            logger.dump(fd, "    " /* prefix */);
+        };
+        dumpLogger(mRejectedSetParameterLog, "Rejected");
+        dumpLogger(mAppSetParameterLog, "App");
+        dumpLogger(mSystemSetParameterLog, "System");
+
         BUFLOG_RESET;
 
         if (locked) {
@@ -545,6 +569,32 @@
     return NO_ERROR;
 }
 
+Json::Value AudioFlinger::getJsonDump()
+{
+    Json::Value root(Json::objectValue);
+    const bool locked = dumpTryLock(mLock);
+
+    // failed to lock - AudioFlinger is probably deadlocked
+    if (!locked) {
+        root["deadlock_message"] = kDeadlockedString;
+    }
+    // FIXME risky to access data structures without a lock held?
+
+    Json::Value playbackThreads = Json::arrayValue;
+    // dump playback threads
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        playbackThreads.append(mPlaybackThreads.valueAt(i)->getJsonDump());
+    }
+
+    if (locked) {
+        mLock.unlock();
+    }
+
+    root["playback_threads"] = playbackThreads;
+
+    return root;
+}
+
 sp<AudioFlinger::Client> AudioFlinger::registerPid(pid_t pid)
 {
     Mutex::Autolock _cl(mClientLock);
@@ -777,7 +827,7 @@
 
 Exit:
     if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
-        AudioSystem::releaseOutput(output.outputId, streamType, sessionId);
+        AudioSystem::releaseOutput(portId);
     }
     *status = lStatus;
     return trackHandle;
@@ -1144,6 +1194,23 @@
     }
 }
 
+// forwardAudioHwSyncToDownstreamPatches_l() must be called with AudioFlinger::mLock held
+void AudioFlinger::forwardParametersToDownstreamPatches_l(
+        audio_io_handle_t upStream, const String8& keyValuePairs,
+        std::function<bool(const sp<PlaybackThread>&)> useThread)
+{
+    std::vector<PatchPanel::SoftwarePatch> swPatches;
+    if (mPatchPanel.getDownstreamSoftwarePatches(upStream, &swPatches) != OK) return;
+    ALOGV_IF(!swPatches.empty(), "%s found %zu downstream patches for stream ID %d",
+            __func__, swPatches.size(), upStream);
+    for (const auto& swPatch : swPatches) {
+        sp<PlaybackThread> downStream = checkPlaybackThread_l(swPatch.getPlaybackThreadHandle());
+        if (downStream != NULL && (useThread == nullptr || useThread(downStream))) {
+            downStream->setParameters(keyValuePairs);
+        }
+    }
+}
+
 // Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
 // Some keys are used for audio routing and audio path configuration and should be reserved for use
 // by audio policy and audio flinger for functional, privacy and security reasons.
@@ -1170,16 +1237,33 @@
 
     AudioParameter param = AudioParameter(keyValuePairs);
     String8 value;
+    AudioParameter rejectedParam;
     for (auto& key : kReservedParameters) {
         if (param.get(key, value) == NO_ERROR) {
-            ALOGW("%s: filtering key %s value %s from uid %d",
-                  __func__, key.string(), value.string(), callingUid);
+            rejectedParam.add(key, value);
             param.remove(key);
         }
     }
+    logFilteredParameters(param.size() + rejectedParam.size(), keyValuePairs,
+                          rejectedParam.size(), rejectedParam.toString(), callingUid);
     keyValuePairs = param.toString();
 }
 
+void AudioFlinger::logFilteredParameters(size_t originalKVPSize, const String8& originalKVPs,
+                                         size_t rejectedKVPSize, const String8& rejectedKVPs,
+                                         uid_t callingUid) {
+    auto prefix = String8::format("UID %5d", callingUid);
+    auto suffix = String8::format("%zu KVP received: %s", originalKVPSize, originalKVPs.c_str());
+    if (rejectedKVPSize != 0) {
+        auto error = String8::format("%zu KVP rejected: %s", rejectedKVPSize, rejectedKVPs.c_str());
+        ALOGW("%s: %s, %s, %s", __func__, prefix.c_str(), error.c_str(), suffix.c_str());
+        mRejectedSetParameterLog.log("%s, %s, %s", prefix.c_str(), error.c_str(), suffix.c_str());
+    } else {
+        auto& logger = (isServiceUid(callingUid) ? mSystemSetParameterLog : mAppSetParameterLog);
+        logger.log("%s, %s", prefix.c_str(), suffix.c_str());
+    }
+}
+
 status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
 {
     ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d calling uid %d",
@@ -1259,7 +1343,9 @@
         }
     }
     if (thread != 0) {
-        return thread->setParameters(filteredKeyValuePairs);
+        status_t result = thread->setParameters(filteredKeyValuePairs);
+        forwardParametersToDownstreamPatches_l(thread->id(), filteredKeyValuePairs);
+        return result;
     }
     return BAD_VALUE;
 }
@@ -1817,6 +1903,10 @@
 
         mHardwareStatus = AUDIO_HW_IDLE;
     }
+    if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_MSD) == 0) {
+        // An MSD module is inserted before hardware modules in order to mix encoded streams.
+        flags = static_cast<AudioHwDevice::Flags>(flags | AudioHwDevice::AHWD_IS_INSERT);
+    }
 
     audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
     mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
@@ -1960,7 +2050,10 @@
         if (sessions & ThreadBase::TRACK_SESSION) {
             AudioParameter param = AudioParameter();
             param.addInt(String8(AudioParameter::keyStreamHwAvSync), value);
-            thread->setParameters(param.toString());
+            String8 keyValuePairs = param.toString();
+            thread->setParameters(keyValuePairs);
+            forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+                    [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
             break;
         }
     }
@@ -2006,7 +2099,10 @@
         ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
         AudioParameter param = AudioParameter();
         param.addInt(String8(AudioParameter::keyStreamHwAvSync), syncId);
-        thread->setParameters(param.toString());
+        String8 keyValuePairs = param.toString();
+        thread->setParameters(keyValuePairs);
+        forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+                [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
     }
 }
 
@@ -2096,6 +2192,7 @@
                       *output, thread.get());
             }
             mPlaybackThreads.add(*output, thread);
+            mPatchPanel.notifyStreamOpened(outHwDev, *output);
             return thread;
         }
     }
@@ -2231,6 +2328,7 @@
         const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
         ioDesc->mIoHandle = output;
         ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+        mPatchPanel.notifyStreamClosed(output);
     }
     // The thread entity (active unit of execution) is no longer running here,
     // but the ThreadBase container still exists.
@@ -2877,16 +2975,74 @@
 }
 
 status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid,
-        effect_descriptor_t *descriptor) const
+                                           const effect_uuid_t *pTypeUuid,
+                                           uint32_t preferredTypeFlag,
+                                           effect_descriptor_t *descriptor) const
 {
+    if (pUuid == NULL || pTypeUuid == NULL || descriptor == NULL) {
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock _l(mLock);
-    if (mEffectsFactoryHal.get()) {
-        return mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
-    } else {
+
+    if (!mEffectsFactoryHal.get()) {
         return -ENODEV;
     }
-}
 
+    status_t status = NO_ERROR;
+    if (!EffectsFactoryHalInterface::isNullUuid(pUuid)) {
+        // If uuid is specified, request effect descriptor from that.
+        status = mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
+    } else if (!EffectsFactoryHalInterface::isNullUuid(pTypeUuid)) {
+        // If uuid is not specified, look for an available implementation
+        // of the required type instead.
+
+        // Use a temporary descriptor to avoid modifying |descriptor| in the failure case.
+        effect_descriptor_t desc;
+        desc.flags = 0; // prevent compiler warning
+
+        uint32_t numEffects = 0;
+        status = mEffectsFactoryHal->queryNumberEffects(&numEffects);
+        if (status < 0) {
+            ALOGW("getEffectDescriptor() error %d from FactoryHal queryNumberEffects", status);
+            return status;
+        }
+
+        bool found = false;
+        for (uint32_t i = 0; i < numEffects; i++) {
+            status = mEffectsFactoryHal->getDescriptor(i, &desc);
+            if (status < 0) {
+                ALOGW("getEffectDescriptor() error %d from FactoryHal getDescriptor", status);
+                continue;
+            }
+            if (memcmp(&desc.type, pTypeUuid, sizeof(effect_uuid_t)) == 0) {
+                // If matching type found save effect descriptor.
+                found = true;
+                *descriptor = desc;
+
+                // If there's no preferred flag or this descriptor matches the preferred
+                // flag, success! If this descriptor doesn't match the preferred
+                // flag, continue enumeration in case a better matching version of this
+                // effect type is available. Note that this means if no effect with a
+                // correct flag is found, the descriptor returned will correspond to the
+                // last effect that at least had a matching type uuid (if any).
+                if (preferredTypeFlag == EFFECT_FLAG_TYPE_MASK ||
+                    (desc.flags & EFFECT_FLAG_TYPE_MASK) == preferredTypeFlag) {
+                    break;
+                }
+            }
+        }
+
+        if (!found) {
+            status = NAME_NOT_FOUND;
+            ALOGW("getEffectDescriptor(): Effect not found by type.");
+        }
+    } else {
+        status = BAD_VALUE;
+        ALOGE("getEffectDescriptor(): Either uuid or type uuid must be non-null UUIDs.");
+    }
+    return status;
+}
 
 sp<IEffect> AudioFlinger::createEffect(
         effect_descriptor_t *pDesc,
@@ -2940,60 +3096,15 @@
     }
 
     {
-        if (!EffectsFactoryHalInterface::isNullUuid(&pDesc->uuid)) {
-            // if uuid is specified, request effect descriptor
-            lStatus = mEffectsFactoryHal->getDescriptor(&pDesc->uuid, &desc);
-            if (lStatus < 0) {
-                ALOGW("createEffect() error %d from EffectGetDescriptor", lStatus);
-                goto Exit;
-            }
-        } else {
-            // if uuid is not specified, look for an available implementation
-            // of the required type in effect factory
-            if (EffectsFactoryHalInterface::isNullUuid(&pDesc->type)) {
-                ALOGW("createEffect() no effect type");
-                lStatus = BAD_VALUE;
-                goto Exit;
-            }
-            uint32_t numEffects = 0;
-            effect_descriptor_t d;
-            d.flags = 0; // prevent compiler warning
-            bool found = false;
-
-            lStatus = mEffectsFactoryHal->queryNumberEffects(&numEffects);
-            if (lStatus < 0) {
-                ALOGW("createEffect() error %d from EffectQueryNumberEffects", lStatus);
-                goto Exit;
-            }
-            for (uint32_t i = 0; i < numEffects; i++) {
-                lStatus = mEffectsFactoryHal->getDescriptor(i, &desc);
-                if (lStatus < 0) {
-                    ALOGW("createEffect() error %d from EffectQueryEffect", lStatus);
-                    continue;
-                }
-                if (memcmp(&desc.type, &pDesc->type, sizeof(effect_uuid_t)) == 0) {
-                    // If matching type found save effect descriptor. If the session is
-                    // 0 and the effect is not auxiliary, continue enumeration in case
-                    // an auxiliary version of this effect type is available
-                    found = true;
-                    d = desc;
-                    if (sessionId != AUDIO_SESSION_OUTPUT_MIX ||
-                            (desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
-                        break;
-                    }
-                }
-            }
-            if (!found) {
-                lStatus = BAD_VALUE;
-                ALOGW("createEffect() effect not found");
-                goto Exit;
-            }
-            // For same effect type, chose auxiliary version over insert version if
-            // connect to output mix (Compliance to OpenSL ES)
-            if (sessionId == AUDIO_SESSION_OUTPUT_MIX &&
-                    (d.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) {
-                desc = d;
-            }
+        // Get the full effect descriptor from the uuid/type.
+        // If the session is the output mix, prefer an auxiliary effect,
+        // otherwise no preference.
+        uint32_t preferredType = (sessionId == AUDIO_SESSION_OUTPUT_MIX ?
+                                  EFFECT_FLAG_TYPE_AUXILIARY : EFFECT_FLAG_TYPE_MASK);
+        lStatus = getEffectDescriptor(&pDesc->uuid, &pDesc->type, preferredType, &desc);
+        if (lStatus < 0) {
+            ALOGW("createEffect() error %d from getEffectDescriptor", lStatus);
+            goto Exit;
         }
 
         // Do not allow auxiliary effects on a session different from 0 (output mix)
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index a59c13e..9c36479 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -23,6 +23,9 @@
 #include <mutex>
 #include <deque>
 #include <map>
+#include <memory>
+#include <set>
+#include <string>
 #include <vector>
 #include <stdint.h>
 #include <sys/types.h>
@@ -62,7 +65,9 @@
 #include <media/LinearMap.h>
 #include <media/VolumeShaper.h>
 
+#include <audio_utils/clock.h>
 #include <audio_utils/SimpleLog.h>
+#include <audio_utils/TimestampVerifier.h>
 
 #include "FastCapture.h"
 #include "FastMixer.h"
@@ -75,6 +80,7 @@
 
 #include <powermanager/IPowerManager.h>
 
+#include <json/json.h>
 #include <media/nblog/NBLog.h>
 #include <private/media/AudioEffectShared.h>
 #include <private/media/AudioTrackShared.h>
@@ -110,6 +116,7 @@
     static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
 
     virtual     status_t    dump(int fd, const Vector<String16>& args);
+                Json::Value getJsonDump();
 
     // IAudioFlinger interface, in binder opcode order
     virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
@@ -203,6 +210,8 @@
     virtual status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor) const;
 
     virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
+                                         const effect_uuid_t *pTypeUuid,
+                                         uint32_t preferredTypeFlag,
                                          effect_descriptor_t *descriptor) const;
 
     virtual sp<IEffect> createEffect(
@@ -676,6 +685,9 @@
                 bool            updateOrphanEffectChains(const sp<EffectModule>& effect);
 
                 void broacastParametersToRecordThreads_l(const String8& keyValuePairs);
+                void forwardParametersToDownstreamPatches_l(
+                        audio_io_handle_t upStream, const String8& keyValuePairs,
+                        std::function<bool(const sp<PlaybackThread>&)> useThread = nullptr);
 
     // AudioStreamIn is immutable, so their fields are const.
     // For emphasis, we could also make all pointers to them be "const *",
@@ -800,6 +812,9 @@
     status_t    checkStreamType(audio_stream_type_t stream) const;
 
     void        filterReservedParameters(String8& keyValuePairs, uid_t callingUid);
+    void        logFilteredParameters(size_t originalKVPSize, const String8& originalKVPs,
+                                      size_t rejectedKVPSize, const String8& rejectedKVPs,
+                                      uid_t callingUid);
 
 public:
     // These methods read variables atomically without mLock,
@@ -820,7 +835,11 @@
     PatchPanel mPatchPanel;
     sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
 
-    bool        mSystemReady;
+    bool       mSystemReady;
+
+    SimpleLog  mRejectedSetParameterLog;
+    SimpleLog  mAppSetParameterLog;
+    SimpleLog  mSystemSetParameterLog;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index eb826c6..d4299b0 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -35,6 +35,9 @@
     enum Flags {
         AHWD_CAN_SET_MASTER_VOLUME  = 0x1,
         AHWD_CAN_SET_MASTER_MUTE    = 0x2,
+        // Means that this isn't a terminal module, and software patches
+        // are used to transport audio data further.
+        AHWD_IS_INSERT              = 0x4,
     };
 
     AudioHwDevice(audio_module_handle_t handle,
@@ -55,6 +58,10 @@
         return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
     }
 
+    bool isInsert() const {
+        return (0 != (mFlags & AHWD_IS_INSERT));
+    }
+
     audio_module_handle_t handle() const { return mHandle; }
     const char *moduleName() const { return mModuleName; }
     sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
index ede8e3f..34cd821 100644
--- a/services/audioflinger/Configuration.h
+++ b/services/audioflinger/Configuration.h
@@ -27,7 +27,7 @@
 //#define AUDIO_WATCHDOG
 
 // uncomment to display CPU load adjusted for CPU frequency
-//#define CPU_FREQUENCY_STATISTICS
+//define CPU_FREQUENCY_STATISTICS
 
 // uncomment to enable fast threads to take performance samples for later statistical analysis
 #define FAST_THREAD_STATISTICS
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 786c4af..c4c742e 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -50,6 +50,8 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+#define DEFAULT_OUTPUT_SAMPLE_RATE 48000
+
 namespace android {
 
 // ----------------------------------------------------------------------------
@@ -263,7 +265,9 @@
         }
         break;
     case STOPPING:
-        if (stop_l() == NO_ERROR) {
+        // volume control for offload and direct threads must take effect immediately.
+        if (stop_l() == NO_ERROR
+            && !(isVolumeControl() && isOffloadedOrDirect())) {
             mDisableWaitCnt = mMaxDisableWaitCnt;
         } else {
             mDisableWaitCnt = 1; // will cause immediate transition to IDLE
@@ -547,7 +551,14 @@
 
     mConfig.inputCfg.format = EFFECT_BUFFER_FORMAT;
     mConfig.outputCfg.format = EFFECT_BUFFER_FORMAT;
-    mConfig.inputCfg.samplingRate = thread->sampleRate();
+
+    // Don't use sample rate for thread if effect isn't offloadable.
+    if ((thread->type() == ThreadBase::OFFLOAD) && !isOffloaded()) {
+        mConfig.inputCfg.samplingRate = DEFAULT_OUTPUT_SAMPLE_RATE;
+        ALOGV("Overriding effect input as 48kHz");
+    } else {
+        mConfig.inputCfg.samplingRate = thread->sampleRate();
+    }
     mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
     mConfig.inputCfg.bufferProvider.cookie = NULL;
     mConfig.inputCfg.bufferProvider.getBuffer = NULL;
@@ -778,6 +789,16 @@
     }
     status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
+
+    if (isVolumeControl() && isOffloadedOrDirect()) {
+        sp<EffectChain>chain = mChain.promote();
+        // We have the EffectChain and EffectModule lock, permit a reentrant call to setVolume:
+        // resetVolume_l --> setVolume_l --> EffectModule::setVolume
+        mSetVolumeReentrantTid = gettid();
+        chain->resetVolume_l();
+        mSetVolumeReentrantTid = INVALID_PID;
+    }
+
     status_t status = mEffectInterface->command(EFFECT_CMD_DISABLE,
                                                 0,
                                                 NULL,
@@ -993,6 +1014,16 @@
     }
 }
 
+bool AudioFlinger::EffectModule::isOffloadedOrDirect() const
+{
+    return (mThreadType == ThreadBase::OFFLOAD || mThreadType == ThreadBase::DIRECT);
+}
+
+bool AudioFlinger::EffectModule::isVolumeControlEnabled() const
+{
+    return (isVolumeControl() && (isOffloadedOrDirect() ? isEnabled() : isProcessEnabled()));
+}
+
 void AudioFlinger::EffectModule::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
     ALOGVV("setInBuffer %p",(&buffer));
 
@@ -1091,7 +1122,7 @@
 
 status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
 {
-    Mutex::Autolock _l(mLock);
+    AutoLockReentrant _l(mLock, mSetVolumeReentrantTid);
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
@@ -1122,6 +1153,18 @@
     return status;
 }
 
+void AudioFlinger::EffectChain::setVolumeForOutput_l(uint32_t left, uint32_t right)
+{
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread != 0 &&
+        (thread->type() == ThreadBase::OFFLOAD || thread->type() == ThreadBase::DIRECT)) {
+        PlaybackThread *t = (PlaybackThread *)thread.get();
+        float vol_l = (float)left / (1 << 24);
+        float vol_r = (float)right / (1 << 24);
+        t->setVolumeForOutput_l(vol_l, vol_r);
+    }
+}
+
 status_t AudioFlinger::EffectModule::setDevice(audio_devices_t device)
 {
     if (device == AUDIO_DEVICE_NONE) {
@@ -2200,8 +2243,7 @@
 
     // first update volume controller
     for (size_t i = size; i > 0; i--) {
-        if (mEffects[i - 1]->isProcessEnabled() &&
-            (mEffects[i - 1]->desc().flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL) {
+        if (mEffects[i - 1]->isVolumeControlEnabled()) {
             ctrlIdx = i - 1;
             hasControl = true;
             break;
@@ -2247,6 +2289,8 @@
     *left = newLeft;
     *right = newRight;
 
+    setVolumeForOutput_l(*left, *right);
+
     return hasControl;
 }
 
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 2327bb9..e04ee8e 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -85,6 +85,8 @@
     status_t    setEnabled_l(bool enabled);
     bool isEnabled() const;
     bool isProcessEnabled() const;
+    bool isOffloadedOrDirect() const;
+    bool isVolumeControlEnabled() const;
 
     void        setInBuffer(const sp<EffectBufferHalInterface>& buffer);
     int16_t     *inBuffer() const {
@@ -95,7 +97,8 @@
         return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
     }
     void        setChain(const wp<EffectChain>& chain) { mChain = chain; }
-    void        setThread(const wp<ThreadBase>& thread) { mThread = thread; }
+    void        setThread(const wp<ThreadBase>& thread)
+                    { mThread = thread; mThreadType = thread.promote()->type(); }
     const wp<ThreadBase>& thread() { return mThread; }
 
     status_t addHandle(EffectHandle *handle);
@@ -128,6 +131,9 @@
                         { return (mDescriptor.flags & EFFECT_FLAG_HW_ACC_MASK) == 0; }
     bool             isProcessImplemented() const
                         { return (mDescriptor.flags & EFFECT_FLAG_NO_PROCESS) == 0; }
+    bool             isVolumeControl() const
+                        { return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
+                            == EFFECT_FLAG_VOLUME_CTRL; }
     status_t         setOffloaded(bool offloaded, audio_io_handle_t io);
     bool             isOffloaded() const;
     void             addEffectToHal_l();
@@ -150,6 +156,7 @@
 
 mutable Mutex               mLock;      // mutex for process, commands and handles list protection
     wp<ThreadBase>      mThread;    // parent thread
+    ThreadBase::type_t  mThreadType; // parent thread type
     wp<EffectChain>     mChain;     // parent effect chain
     const int           mId;        // this instance unique ID
     const audio_session_t mSessionId; // audio session ID
@@ -176,6 +183,24 @@
     uint32_t mInChannelCountRequested;
     uint32_t mOutChannelCountRequested;
 #endif
+
+    class AutoLockReentrant {
+    public:
+        AutoLockReentrant(Mutex& mutex, pid_t allowedTid)
+            : mMutex(gettid() == allowedTid ? nullptr : &mutex)
+        {
+            if (mMutex != nullptr) mMutex->lock();
+        }
+        ~AutoLockReentrant() {
+            if (mMutex != nullptr) mMutex->unlock();
+        }
+    private:
+        Mutex * const mMutex;
+    };
+
+    static constexpr pid_t INVALID_PID = (pid_t)-1;
+    // this tid is allowed to call setVolume() without acquiring the mutex.
+    pid_t mSetVolumeReentrantTid = INVALID_PID;
 };
 
 // The EffectHandle class implements the IEffect interface. It provides resources
@@ -403,6 +428,8 @@
 
     void setThread(const sp<ThreadBase>& thread);
 
+    void setVolumeForOutput_l(uint32_t left, uint32_t right);
+
              wp<ThreadBase> mThread;     // parent mixer thread
     mutable  Mutex mLock;        // mutex protecting effect list
              Vector< sp<EffectModule> > mEffects; // list of effect modules
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index d063772..dd84bf2 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -20,6 +20,7 @@
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 
 #include "Configuration.h"
+#include <audio_utils/format.h>
 #include <linux/futex.h>
 #include <sys/syscall.h>
 #include <media/AudioBufferProvider.h>
@@ -161,7 +162,21 @@
     const FastCaptureState * const current = (const FastCaptureState *) mCurrent;
     FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) mDumpState;
     const FastCaptureState::Command command = mCommand;
-    const size_t frameCount = current->mFrameCount;
+    size_t frameCount = current->mFrameCount;
+    AudioBufferProvider* fastPatchRecordBufferProvider = current->mFastPatchRecordBufferProvider;
+    AudioBufferProvider::Buffer patchBuffer;
+
+    if (fastPatchRecordBufferProvider != 0) {
+        patchBuffer.frameCount = ~0;
+        status_t status = fastPatchRecordBufferProvider->getNextBuffer(&patchBuffer);
+        if (status != NO_ERROR) {
+            frameCount = 0;
+        } else if (patchBuffer.frameCount < frameCount) {
+            // TODO: Make sure that it doesn't cause any issues if we just get a small available
+            // buffer from the buffer provider.
+            frameCount = patchBuffer.frameCount;
+        }
+    }
 
     if ((command & FastCaptureState::READ) /*&& isWarm*/) {
         ALOG_ASSERT(mInputSource != NULL);
@@ -176,6 +191,7 @@
             mTotalNativeFramesRead += framesRead;
             dumpState->mFramesRead = mTotalNativeFramesRead;
             mReadBufferState = framesRead;
+            patchBuffer.frameCount = framesRead;
         } else {
             dumpState->mReadErrors++;
             mReadBufferState = 0;
@@ -193,11 +209,18 @@
         }
         if (mReadBufferState > 0) {
             ssize_t framesWritten = mPipeSink->write(mReadBuffer, mReadBufferState);
-            // FIXME This supports at most one fast capture client.
-            //       To handle multiple clients this could be converted to an array,
-            //       or with a lot more work the control block could be shared by all clients.
             audio_track_cblk_t* cblk = current->mCblk;
-            if (cblk != NULL && framesWritten > 0) {
+            if (fastPatchRecordBufferProvider != 0) {
+                // This indicates the fast track is a patch record, update the cblk by
+                // calling releaseBuffer().
+                memcpy_by_audio_format(patchBuffer.raw, current->mFastPatchRecordFormat,
+                        mReadBuffer, mFormat.mFormat, framesWritten * mFormat.mChannelCount);
+                patchBuffer.frameCount = framesWritten;
+                fastPatchRecordBufferProvider->releaseBuffer(&patchBuffer);
+            } else if (cblk != NULL && framesWritten > 0) {
+                // FIXME This supports at most one fast capture client.
+                //       To handle multiple clients this could be converted to an array,
+                //       or with a lot more work the control block could be shared by all clients.
                 int32_t rear = cblk->u.mStreaming.mRear;
                 android_atomic_release_store(framesWritten + rear, &cblk->u.mStreaming.mRear);
                 cblk->mServer += framesWritten;
diff --git a/services/audioflinger/FastCaptureState.h b/services/audioflinger/FastCaptureState.h
index 9bca2d4..d287232 100644
--- a/services/audioflinger/FastCaptureState.h
+++ b/services/audioflinger/FastCaptureState.h
@@ -18,6 +18,7 @@
 #define ANDROID_AUDIO_FAST_CAPTURE_STATE_H
 
 #include <media/nbaio/NBAIO.h>
+#include <media/AudioBufferProvider.h>
 #include "FastThreadState.h"
 #include <private/media/AudioTrackShared.h>
 
@@ -37,6 +38,10 @@
     size_t          mFrameCount;        // number of frames per fast capture buffer
     audio_track_cblk_t* mCblk;          // control block for the single fast client, or NULL
 
+    audio_format_t  mFastPatchRecordFormat = AUDIO_FORMAT_INVALID;
+    AudioBufferProvider* mFastPatchRecordBufferProvider = nullptr;   // a reference to a patch
+                                                                     // record in fast mode
+
     // Extends FastThreadState::Command
     static const Command
         // The following commands also process configuration changes, and can be "or"ed:
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index caeede9..a42d6b3 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -32,7 +32,7 @@
 #include <utils/Trace.h>
 #include <system/audio.h>
 #ifdef FAST_THREAD_STATISTICS
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
 #ifdef CPU_FREQUENCY_STATISTICS
 #include <cpustats/ThreadCpuUsage.h>
 #endif
@@ -336,13 +336,15 @@
 {
     // TODO: pass an ID parameter to indicate which time series we want to write to in NBLog.cpp
     // Or: pass both of these into a single call with a boolean
+    const FastMixerState * const current = (const FastMixerState *) mCurrent;
+    FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
+
     if (mIsWarm) {
         LOG_HIST_TS();
     } else {
+        dumpState->mTimestampVerifier.discontinuity();
         LOG_AUDIO_STATE();
     }
-    const FastMixerState * const current = (const FastMixerState *) mCurrent;
-    FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
     const FastMixerState::Command command = mCommand;
     const size_t frameCount = current->mFrameCount;
 
@@ -477,39 +479,47 @@
         mAttemptedWrite = true;
         // FIXME count # of writes blocked excessively, CPU usage, etc. for dump
 
-        ExtendedTimestamp timestamp; // local
-        status_t status = mOutputSink->getTimestamp(timestamp);
-        if (status == NO_ERROR) {
-            const int64_t totalNativeFramesPresented =
-                    timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
-            if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
-                mNativeFramesWrittenButNotPresented =
-                    mTotalNativeFramesWritten - totalNativeFramesPresented;
-                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+        if (mIsWarm) {
+            ExtendedTimestamp timestamp; // local
+            status_t status = mOutputSink->getTimestamp(timestamp);
+            if (status == NO_ERROR) {
+                dumpState->mTimestampVerifier.add(
+                        timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+                        mSampleRate);
+                const int64_t totalNativeFramesPresented =
                         timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
-                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
-                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
-                // We don't compensate for server - kernel time difference and
-                // only update latency if we have valid info.
-                dumpState->mLatencyMs =
-                        (double)mNativeFramesWrittenButNotPresented * 1000 / mSampleRate;
+                if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
+                    mNativeFramesWrittenButNotPresented =
+                        mTotalNativeFramesWritten - totalNativeFramesPresented;
+                    mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+                            timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+                            timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+                    // We don't compensate for server - kernel time difference and
+                    // only update latency if we have valid info.
+                    dumpState->mLatencyMs =
+                            (double)mNativeFramesWrittenButNotPresented * 1000 / mSampleRate;
+                } else {
+                    // HAL reported that more frames were presented than were written
+                    mNativeFramesWrittenButNotPresented = 0;
+                    status = INVALID_OPERATION;
+                }
             } else {
-                // HAL reported that more frames were presented than were written
-                mNativeFramesWrittenButNotPresented = 0;
-                status = INVALID_OPERATION;
+                dumpState->mTimestampVerifier.error();
             }
-        }
-        if (status == NO_ERROR) {
-            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
-                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
-        } else {
-            // fetch server time if we can't get timestamp
-            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
-                    systemTime(SYSTEM_TIME_MONOTONIC);
-            // clear out kernel cached position as this may get rapidly stale
-            // if we never get a new valid timestamp
-            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
-            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+            if (status == NO_ERROR) {
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+                        mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+            } else {
+                // fetch server time if we can't get timestamp
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+                        systemTime(SYSTEM_TIME_MONOTONIC);
+                // clear out kernel cached position as this may get rapidly stale
+                // if we never get a new valid timestamp
+                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+            }
         }
     }
 }
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index b3a2520..2abfbfb 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -19,11 +19,13 @@
 
 #include "Configuration.h"
 #ifdef FAST_THREAD_STATISTICS
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
 #ifdef CPU_FREQUENCY_STATISTICS
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 #endif
+#include <json/json.h>
+#include <string>
 #include <utils/Debug.h>
 #include <utils/Log.h>
 #include "FastMixerDumpState.h"
@@ -73,16 +75,17 @@
                 mNumTracks, mWriteErrors, mUnderruns, mOverruns,
                 mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
                 mixPeriodSec * 1e3, mLatencyMs);
+    dprintf(fd, "  FastMixer Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
 #ifdef FAST_THREAD_STATISTICS
     // find the interval of valid samples
-    uint32_t bounds = mBounds;
-    uint32_t newestOpen = bounds & 0xFFFF;
+    const uint32_t bounds = mBounds;
+    const uint32_t newestOpen = bounds & 0xFFFF;
     uint32_t oldestClosed = bounds >> 16;
 
     //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
     uint32_t n;
     __builtin_sub_overflow(newestOpen, oldestClosed, &n);
-    n = n & 0xFFFF;
+    n &= 0xFFFF;
 
     if (n > mSamplingN) {
         ALOGE("too many samples %u", n);
@@ -90,9 +93,9 @@
     }
     // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
     // and adjusted CPU load in MHz normalized for CPU clock frequency
-    CentralTendencyStatistics wall, loadNs;
+    audio_utils::Statistics<double> wall, loadNs;
 #ifdef CPU_FREQUENCY_STATISTICS
-    CentralTendencyStatistics kHz, loadMHz;
+    audio_utils::Statistics<double> kHz, loadMHz;
     uint32_t previousCpukHz = 0;
 #endif
     // Assuming a normal distribution for cycle times, three standard deviations on either side of
@@ -107,18 +110,18 @@
         if (tail != NULL) {
             tail[j] = wallNs;
         }
-        wall.sample(wallNs);
+        wall.add(wallNs);
         uint32_t sampleLoadNs = mLoadNs[i];
-        loadNs.sample(sampleLoadNs);
+        loadNs.add(sampleLoadNs);
 #ifdef CPU_FREQUENCY_STATISTICS
         uint32_t sampleCpukHz = mCpukHz[i];
         // skip bad kHz samples
         if ((sampleCpukHz & ~0xF) != 0) {
-            kHz.sample(sampleCpukHz >> 4);
+            kHz.add(sampleCpukHz >> 4);
             if (sampleCpukHz == previousCpukHz) {
                 double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
                 double adjMHz = megacycles / mixPeriodSec;  // _not_ wallNs * 1e9
-                loadMHz.sample(adjMHz);
+                loadMHz.add(adjMHz);
             }
         }
         previousCpukHz = sampleCpukHz;
@@ -126,42 +129,42 @@
     }
     if (n) {
         dprintf(fd, "  Simple moving statistics over last %.1f seconds:\n",
-                    wall.n() * mixPeriodSec);
+                    wall.getN() * mixPeriodSec);
         dprintf(fd, "    wall clock time in ms per mix cycle:\n"
                     "      mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
-                    wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
-                    wall.stddev()*1e-6);
+                    wall.getMean()*1e-6, wall.getMin()*1e-6, wall.getMax()*1e-6,
+                    wall.getStdDev()*1e-6);
         dprintf(fd, "    raw CPU load in us per mix cycle:\n"
                     "      mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
-                    loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
-                    loadNs.stddev()*1e-3);
+                    loadNs.getMean()*1e-3, loadNs.getMin()*1e-3, loadNs.getMax()*1e-3,
+                    loadNs.getStdDev()*1e-3);
     } else {
         dprintf(fd, "  No FastMixer statistics available currently\n");
     }
 #ifdef CPU_FREQUENCY_STATISTICS
     dprintf(fd, "  CPU clock frequency in MHz:\n"
                 "    mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
-                kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3);
+                kHz.getMean()*1e-3, kHz.getMin()*1e-3, kHz.getMax()*1e-3, kHz.getStdDev()*1e-3);
     dprintf(fd, "  adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
                 "    mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
-                loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev());
+                loadMHz.getMean(), loadMHz.getMin(), loadMHz.getMax(), loadMHz.getStdDev());
 #endif
     if (tail != NULL) {
         qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
         // assume same number of tail samples on each side, left and right
         uint32_t count = n / kTailDenominator;
-        CentralTendencyStatistics left, right;
+        audio_utils::Statistics<double> left, right;
         for (uint32_t i = 0; i < count; ++i) {
-            left.sample(tail[i]);
-            right.sample(tail[n - (i + 1)]);
+            left.add(tail[i]);
+            right.add(tail[n - (i + 1)]);
         }
         dprintf(fd, "  Distribution of mix cycle times in ms for the tails "
                     "(> ~3 stddev outliers):\n"
                     "    left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
                     "    right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
-                    left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
-                    right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
-                    right.stddev()*1e-6);
+                    left.getMean()*1e-6, left.getMin()*1e-6, left.getMax()*1e-6, left.getStdDev()*1e-6,
+                    right.getMean()*1e-6, right.getMin()*1e-6, right.getMax()*1e-6,
+                    right.getStdDev()*1e-6);
         delete[] tail;
     }
 #endif
@@ -203,4 +206,49 @@
     }
 }
 
+Json::Value FastMixerDumpState::getJsonDump() const
+{
+    Json::Value root(Json::objectValue);
+    if (mCommand == FastMixerState::INITIAL) {
+        root["status"] = "uninitialized";
+        return root;
+    }
+#ifdef FAST_THREAD_STATISTICS
+    // find the interval of valid samples
+    const uint32_t bounds = mBounds;
+    const uint32_t newestOpen = bounds & 0xFFFF;
+    uint32_t oldestClosed = bounds >> 16;
+
+    //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+    uint32_t n;
+    __builtin_sub_overflow(newestOpen, oldestClosed, &n);
+    n &= 0xFFFF;
+
+    if (n > mSamplingN) {
+        ALOGE("too many samples %u", n);
+        n = mSamplingN;
+    }
+    // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
+    // and adjusted CPU load in MHz normalized for CPU clock frequency
+    Json::Value jsonWall(Json::arrayValue);
+    Json::Value jsonLoadNs(Json::arrayValue);
+    // loop over all the samples
+    for (uint32_t j = 0; j < n; ++j) {
+        size_t i = oldestClosed++ & (mSamplingN - 1);
+        uint32_t wallNs = mMonotonicNs[i];
+        jsonWall.append(wallNs);
+        uint32_t sampleLoadNs = mLoadNs[i];
+        jsonLoadNs.append(sampleLoadNs);
+    }
+    if (n) {
+        root["wall_clock_time_ns"] = jsonWall;
+        root["raw_cpu_load_ns"] = jsonLoadNs;
+        root["status"] = "ok";
+    } else {
+        root["status"] = "unavailable";
+    }
+#endif
+    return root;
+}
+
 }   // android
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index aed6bc5..69c2e4e 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -18,6 +18,9 @@
 #define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
 
 #include <stdint.h>
+#include <string>
+#include <audio_utils/TimestampVerifier.h>
+#include <json/json.h>
 #include "Configuration.h"
 #include "FastThreadDumpState.h"
 #include "FastMixerState.h"
@@ -64,7 +67,8 @@
     FastMixerDumpState();
     /*virtual*/ ~FastMixerDumpState();
 
-    void dump(int fd) const;    // should only be called on a stable copy, not the original
+    void dump(int fd) const;             // should only be called on a stable copy, not the original
+    Json::Value getJsonDump() const;     // should only be called on a stable copy, not the original
 
     double   mLatencyMs = 0.;   // measured latency, default of 0 if no valid timestamp read.
     uint32_t mWriteSequence;    // incremented before and after each write()
@@ -75,6 +79,9 @@
     size_t   mFrameCount;
     uint32_t mTrackMask;        // mask of active tracks
     FastTrackDump   mTracks[FastMixerState::kMaxFastTracks];
+
+    // For timestamp statistics.
+    TimestampVerifier<int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
 };
 
 }   // android
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
index dc15487..e587026 100644
--- a/services/audioflinger/FastThread.cpp
+++ b/services/audioflinger/FastThread.cpp
@@ -339,6 +339,7 @@
                     // these stores #1, #2, #3 are not atomic with respect to each other,
                     // or with respect to store #4 below
                     mDumpState->mMonotonicNs[i] = monotonicNs;
+                    LOG_MONOTONIC_CYCLE_TIME(monotonicNs);
                     mDumpState->mLoadNs[i] = loadNs;
 #ifdef CPU_FREQUENCY_STATISTICS
                     mDumpState->mCpukHz[i] = kHz;
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 42a5a90..7b165a1 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -83,6 +83,16 @@
     return mPatchPanel.listAudioPatches(num_patches, patches);
 }
 
+status_t AudioFlinger::PatchPanel::SoftwarePatch::getLatencyMs_l(double *latencyMs) const
+{
+    const auto& iter = mPatchPanel.mPatches.find(mPatchHandle);
+    if (iter != mPatchPanel.mPatches.end()) {
+        return iter->second.getLatencyMs(latencyMs);
+    } else {
+        return BAD_VALUE;
+    }
+}
+
 /* List connected audio ports and their attributes */
 status_t AudioFlinger::PatchPanel::listAudioPorts(unsigned int *num_ports __unused,
                                 struct audio_port *ports __unused)
@@ -110,9 +120,7 @@
     status_t status = NO_ERROR;
     audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
 
-    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            (patch->num_sinks == 0 && patch->num_sources != 2) ||
-            patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+    if (!audio_patch_is_valid(patch) || (patch->num_sinks == 0 && patch->num_sources != 2)) {
         return BAD_VALUE;
     }
     // limit number of sources to 1 for now or 2 sources for special cross hw module case.
@@ -161,21 +169,21 @@
                 }
             }
             mPatches.erase(iter);
+            removeSoftwarePatchFromInsertedModules(*handle);
         }
     }
 
     Patch newPatch{*patch};
+    audio_module_handle_t insertedModule = AUDIO_MODULE_HANDLE_NONE;
 
     switch (patch->sources[0].type) {
         case AUDIO_PORT_TYPE_DEVICE: {
             audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
-            ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(srcModule);
-            if (index < 0) {
-                ALOGW("%s() bad src hw module %d", __func__, srcModule);
+            AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(srcModule);
+            if (!audioHwDevice) {
                 status = BAD_VALUE;
                 goto exit;
             }
-            AudioHwDevice *audioHwDevice = mAudioFlinger.mAudioHwDevs.valueAt(index);
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 // support only one sink if connection to a mix or across HW modules
                 if ((patch->sinks[i].type == AUDIO_PORT_TYPE_MIX ||
@@ -227,9 +235,19 @@
                     audio_devices_t device = patch->sinks[0].ext.device.type;
                     String8 address = String8(patch->sinks[0].ext.device.address);
                     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-                    audio_output_flags_t flags =
-                            patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
-                            patch->sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
+                    audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+                        config.sample_rate = patch->sinks[0].sample_rate;
+                    }
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+                        config.channel_mask = patch->sinks[0].channel_mask;
+                    }
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+                        config.format = patch->sinks[0].format;
+                    }
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+                        flags = patch->sinks[0].flags.output;
+                    }
                     sp<ThreadBase> thread = mAudioFlinger.openOutput_l(
                                                             patch->sinks[0].ext.device.hw_module,
                                                             &output,
@@ -287,6 +305,9 @@
                 if (status != NO_ERROR) {
                     goto exit;
                 }
+                if (audioHwDevice->isInsert()) {
+                    insertedModule = audioHwDevice->handle();
+                }
             } else {
                 if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
                     sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(
@@ -366,6 +387,9 @@
         *handle = (audio_patch_handle_t) mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
         newPatch.mHalHandle = halHandle;
         mPatches.insert(std::make_pair(*handle, std::move(newPatch)));
+        if (insertedModule != AUDIO_MODULE_HANDLE_NONE) {
+            addSoftwarePatchToInsertedModules(insertedModule, *handle);
+        }
         ALOGV("%s() added new patch handle %d halHandle %d", __func__, *handle, halHandle);
     } else {
         newPatch.clearConnections(this);
@@ -407,14 +431,14 @@
     // use a pseudo LCM between input and output framecount
     size_t playbackFrameCount = mPlayback.thread()->frameCount();
     int playbackShift = __builtin_ctz(playbackFrameCount);
-    size_t recordFramecount = mRecord.thread()->frameCount();
-    int shift = __builtin_ctz(recordFramecount);
+    size_t recordFrameCount = mRecord.thread()->frameCount();
+    int shift = __builtin_ctz(recordFrameCount);
     if (playbackShift < shift) {
         shift = playbackShift;
     }
-    size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
-    ALOGV("%s() playframeCount %zu recordFramecount %zu frameCount %zu",
-            __func__, playbackFrameCount, recordFramecount, frameCount);
+    size_t frameCount = (playbackFrameCount * recordFrameCount) >> shift;
+    ALOGV("%s() playframeCount %zu recordFrameCount %zu frameCount %zu",
+            __func__, playbackFrameCount, recordFrameCount, frameCount);
 
     // create a special record track to capture from record thread
     uint32_t channelCount = mPlayback.thread()->channelCount();
@@ -431,6 +455,17 @@
     }
     audio_input_flags_t inputFlags = mAudioPatch.sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
             mAudioPatch.sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
+    if (sampleRate == mRecord.thread()->sampleRate() &&
+            inChannelMask == mRecord.thread()->channelMask() &&
+            mRecord.thread()->fastTrackAvailable() &&
+            mRecord.thread()->hasFastCapture()) {
+        // Create a fast track if the record thread has fast capture to get better performance.
+        // Only enable fast mode when there is no resample needed.
+        inputFlags = (audio_input_flags_t) (inputFlags | AUDIO_INPUT_FLAG_FAST);
+    } else {
+        // Fast mode is not available in this case.
+        inputFlags = (audio_input_flags_t) (inputFlags & ~AUDIO_INPUT_FLAG_FAST);
+    }
     sp<RecordThread::PatchRecord> tempRecordTrack = new (std::nothrow) RecordThread::PatchRecord(
                                              mRecord.thread().get(),
                                              sampleRate,
@@ -447,12 +482,21 @@
 
     audio_output_flags_t outputFlags = mAudioPatch.sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
             mAudioPatch.sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
+    audio_stream_type_t streamType = AUDIO_STREAM_PATCH;
+    if (mAudioPatch.num_sources == 2 && mAudioPatch.sources[1].type == AUDIO_PORT_TYPE_MIX) {
+        // "reuse one existing output mix" case
+        streamType = mAudioPatch.sources[1].ext.mix.usecase.stream;
+    }
+    if (mPlayback.thread()->hasFastMixer()) {
+        // Create a fast track if the playback thread has fast mixer to get better performance.
+        outputFlags = (audio_output_flags_t) (outputFlags | AUDIO_OUTPUT_FLAG_FAST);
+    }
 
     // create a special playback track to render to playback thread.
     // this track is given the same buffer as the PatchRecord buffer
     sp<PlaybackThread::PatchTrack> tempPatchTrack = new (std::nothrow) PlaybackThread::PatchTrack(
                                            mPlayback.thread().get(),
-                                           mAudioPatch.sources[1].ext.mix.usecase.stream,
+                                           streamType,
                                            sampleRate,
                                            outChannelMask,
                                            format,
@@ -503,31 +547,59 @@
     // reverse due to internal biases).
     //
     // TODO: is this stable enough? Consider a PatchTrack synchronized version of this.
-    double recordServerLatencyMs;
-    if (recordTrack->getServerLatencyMs(&recordServerLatencyMs) != OK) return INVALID_OPERATION;
 
-    double playbackTrackLatencyMs;
-    if (playbackTrack->getTrackLatencyMs(&playbackTrackLatencyMs) != OK) return INVALID_OPERATION;
+    // For PCM tracks get server latency.
+    if (audio_is_linear_pcm(recordTrack->format())) {
+        double recordServerLatencyMs, playbackTrackLatencyMs;
+        if (recordTrack->getServerLatencyMs(&recordServerLatencyMs) == OK
+                && playbackTrack->getTrackLatencyMs(&playbackTrackLatencyMs) == OK) {
+            *latencyMs = recordServerLatencyMs + playbackTrackLatencyMs;
+            return OK;
+        }
+    }
 
-    *latencyMs = recordServerLatencyMs + playbackTrackLatencyMs;
-    return OK;
+    // See if kernel latencies are available.
+    // If so, do a frame diff and time difference computation to estimate
+    // the total patch latency. This requires that frame counts are reported by the
+    // HAL are matched properly in the case of record overruns and playback underruns.
+    ThreadBase::TrackBase::FrameTime recordFT{}, playFT{};
+    recordTrack->getKernelFrameTime(&recordFT);
+    playbackTrack->getKernelFrameTime(&playFT);
+    if (recordFT.timeNs > 0 && playFT.timeNs > 0) {
+        const int64_t frameDiff = recordFT.frames - playFT.frames;
+        const int64_t timeDiffNs = recordFT.timeNs - playFT.timeNs;
+
+        // It is possible that the patch track and patch record have a large time disparity because
+        // one thread runs but another is stopped.  We arbitrarily choose the maximum timestamp
+        // time difference based on how often we expect the timestamps to update in normal operation
+        // (typical should be no more than 50 ms).
+        //
+        // If the timestamps aren't sampled close enough, the patch latency is not
+        // considered valid.
+        //
+        // TODO: change this based on more experiments.
+        constexpr int64_t maxValidTimeDiffNs = 200 * NANOS_PER_MILLISECOND;
+        if (std::abs(timeDiffNs) < maxValidTimeDiffNs) {
+            *latencyMs = frameDiff * 1e3 / recordTrack->sampleRate()
+                   - timeDiffNs * 1e-6;
+            return OK;
+        }
+    }
+
+    return INVALID_OPERATION;
 }
 
-String8 AudioFlinger::PatchPanel::Patch::dump(audio_patch_handle_t myHandle)
+String8 AudioFlinger::PatchPanel::Patch::dump(audio_patch_handle_t myHandle) const
 {
-    String8 result;
-
     // TODO: Consider table dump form for patches, just like tracks.
-    result.appendFormat("Patch %d: thread %p => thread %p",
-            myHandle, mRecord.thread().get(), mPlayback.thread().get());
+    String8 result = String8::format("Patch %d: thread %p => thread %p",
+            myHandle, mRecord.const_thread().get(), mPlayback.const_thread().get());
 
     // add latency if it exists
     double latencyMs;
     if (getLatencyMs(&latencyMs) == OK) {
         result.appendFormat("  latency: %.2lf", latencyMs);
     }
-
-    result.append("\n");
     return result;
 }
 
@@ -598,6 +670,7 @@
     }
 
     mPatches.erase(iter);
+    removeSoftwarePatchFromInsertedModules(handle);
     return status;
 }
 
@@ -609,35 +682,114 @@
     return NO_ERROR;
 }
 
-sp<DeviceHalInterface> AudioFlinger::PatchPanel::findHwDeviceByModule(audio_module_handle_t module)
+status_t AudioFlinger::PatchPanel::getDownstreamSoftwarePatches(
+        audio_io_handle_t stream,
+        std::vector<AudioFlinger::PatchPanel::SoftwarePatch> *patches) const
+{
+    for (const auto& module : mInsertedModules) {
+        if (module.second.streams.count(stream)) {
+            for (const auto& patchHandle : module.second.sw_patches) {
+                const auto& patch_iter = mPatches.find(patchHandle);
+                if (patch_iter != mPatches.end()) {
+                    const Patch &patch = patch_iter->second;
+                    patches->emplace_back(*this, patchHandle,
+                            patch.mPlayback.const_thread()->id(),
+                            patch.mRecord.const_thread()->id());
+                } else {
+                    ALOGE("Stale patch handle in the cache: %d", patchHandle);
+                }
+            }
+            return OK;
+        }
+    }
+    // The stream is not associated with any of inserted modules.
+    return BAD_VALUE;
+}
+
+void AudioFlinger::PatchPanel::notifyStreamOpened(
+        AudioHwDevice *audioHwDevice, audio_io_handle_t stream)
+{
+    if (audioHwDevice->isInsert()) {
+        mInsertedModules[audioHwDevice->handle()].streams.insert(stream);
+    }
+}
+
+void AudioFlinger::PatchPanel::notifyStreamClosed(audio_io_handle_t stream)
+{
+    for (auto& module : mInsertedModules) {
+        module.second.streams.erase(stream);
+    }
+}
+
+AudioHwDevice* AudioFlinger::PatchPanel::findAudioHwDeviceByModule(audio_module_handle_t module)
 {
     if (module == AUDIO_MODULE_HANDLE_NONE) return nullptr;
     ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(module);
     if (index < 0) {
+        ALOGW("%s() bad hw module %d", __func__, module);
         return nullptr;
     }
-    return mAudioFlinger.mAudioHwDevs.valueAt(index)->hwDevice();
+    return mAudioFlinger.mAudioHwDevs.valueAt(index);
 }
 
-void AudioFlinger::PatchPanel::dump(int fd)
+sp<DeviceHalInterface> AudioFlinger::PatchPanel::findHwDeviceByModule(audio_module_handle_t module)
 {
+    AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(module);
+    return audioHwDevice ? audioHwDevice->hwDevice() : nullptr;
+}
+
+void AudioFlinger::PatchPanel::addSoftwarePatchToInsertedModules(
+        audio_module_handle_t module, audio_patch_handle_t handle)
+{
+    mInsertedModules[module].sw_patches.insert(handle);
+}
+
+void AudioFlinger::PatchPanel::removeSoftwarePatchFromInsertedModules(
+        audio_patch_handle_t handle)
+{
+    for (auto& module : mInsertedModules) {
+        module.second.sw_patches.erase(handle);
+    }
+}
+
+void AudioFlinger::PatchPanel::dump(int fd) const
+{
+    String8 patchPanelDump;
+    const char *indent = "  ";
+
     // Only dump software patches.
     bool headerPrinted = false;
-    for (auto& iter : mPatches) {
+    for (const auto& iter : mPatches) {
         if (iter.second.isSoftware()) {
             if (!headerPrinted) {
-                String8 header("\nSoftware patches:\n");
-                write(fd, header.string(), header.size());
+                patchPanelDump += "\nSoftware patches:\n";
                 headerPrinted = true;
             }
-            String8 patchDump("  ");
-            patchDump.append(iter.second.dump(iter.first));
-            write(fd, patchDump.string(), patchDump.size());
+            patchPanelDump.appendFormat("%s%s\n", indent, iter.second.dump(iter.first).string());
         }
     }
-    if (headerPrinted) {
-        String8 trailing("\n");
-        write(fd, trailing.string(), trailing.size());
+
+    headerPrinted = false;
+    for (const auto& module : mInsertedModules) {
+        if (!module.second.streams.empty() || !module.second.sw_patches.empty()) {
+            if (!headerPrinted) {
+                patchPanelDump += "\nTracked inserted modules:\n";
+                headerPrinted = true;
+            }
+            String8 moduleDump = String8::format("Module %d: I/O handles: ", module.first);
+            for (const auto& stream : module.second.streams) {
+                moduleDump.appendFormat("%d ", stream);
+            }
+            moduleDump.append("; SW Patches: ");
+            for (const auto& patch : module.second.sw_patches) {
+                moduleDump.appendFormat("%d ", patch);
+            }
+            patchPanelDump.appendFormat("%s%s\n", indent, moduleDump.string());
+        }
+    }
+
+    if (!patchPanelDump.isEmpty()) {
+        write(fd, patchPanelDump.string(), patchPanelDump.size());
     }
 }
 
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 5d6bf00..2d9bd8e 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -19,9 +19,32 @@
     #error This header file should only be included from AudioFlinger.h
 #endif
 
+
 // PatchPanel is concealed within AudioFlinger, their lifetimes are the same.
 class PatchPanel {
 public:
+    class SoftwarePatch {
+      public:
+        SoftwarePatch(const PatchPanel &patchPanel, audio_patch_handle_t patchHandle,
+                audio_io_handle_t playbackThreadHandle, audio_io_handle_t recordThreadHandle)
+                : mPatchPanel(patchPanel), mPatchHandle(patchHandle),
+                  mPlaybackThreadHandle(playbackThreadHandle),
+                  mRecordThreadHandle(recordThreadHandle) {}
+        SoftwarePatch(const SoftwarePatch&) = default;
+        SoftwarePatch& operator=(const SoftwarePatch&) = default;
+
+        // Must be called under AudioFlinger::mLock
+        status_t getLatencyMs_l(double *latencyMs) const;
+        audio_patch_handle_t getPatchHandle() const { return mPatchHandle; };
+        audio_io_handle_t getPlaybackThreadHandle() const { return mPlaybackThreadHandle; };
+        audio_io_handle_t getRecordThreadHandle() const { return mRecordThreadHandle; };
+      private:
+        const PatchPanel &mPatchPanel;
+        const audio_patch_handle_t mPatchHandle;
+        const audio_io_handle_t mPlaybackThreadHandle;
+        const audio_io_handle_t mRecordThreadHandle;
+    };
+
     explicit PatchPanel(AudioFlinger* audioFlinger) : mAudioFlinger(*audioFlinger) {}
 
     /* List connected audio ports and their attributes */
@@ -42,7 +65,16 @@
     status_t listAudioPatches(unsigned int *num_patches,
                                       struct audio_patch *patches);
 
-    void dump(int fd);
+    // Retrieves all currently estrablished software patches for a stream
+    // opened on an intermediate module.
+    status_t getDownstreamSoftwarePatches(audio_io_handle_t stream,
+            std::vector<SoftwarePatch> *patches) const;
+
+    // Notifies patch panel about all opened and closed streams.
+    void notifyStreamOpened(AudioHwDevice *audioHwDevice, audio_io_handle_t stream);
+    void notifyStreamClosed(audio_io_handle_t stream);
+
+    void dump(int fd) const;
 
 private:
     template<typename ThreadType, typename TrackType>
@@ -65,6 +97,7 @@
         audio_patch_handle_t handle() const { return mHandle; }
         sp<ThreadType> thread() { return mThread; }
         sp<TrackType> track() { return mTrack; }
+        sp<const ThreadType> const_thread() const { return mThread; }
         sp<const TrackType> const_track() const { return mTrack; }
 
         void closeConnections(PatchPanel *panel) {
@@ -122,7 +155,7 @@
         // returns the latency of the patch (from record to playback).
         status_t getLatencyMs(double *latencyMs) const;
 
-        String8 dump(audio_patch_handle_t myHandle);
+        String8 dump(audio_patch_handle_t myHandle) const;
 
         // Note that audio_patch::id is only unique within a HAL module
         struct audio_patch              mAudioPatch;
@@ -138,8 +171,38 @@
         Endpoint<RecordThread, RecordThread::PatchRecord> mRecord;
     };
 
+    AudioHwDevice* findAudioHwDeviceByModule(audio_module_handle_t module);
     sp<DeviceHalInterface> findHwDeviceByModule(audio_module_handle_t module);
+    void addSoftwarePatchToInsertedModules(
+            audio_module_handle_t module, audio_patch_handle_t handle);
+    void removeSoftwarePatchFromInsertedModules(audio_patch_handle_t handle);
 
     AudioFlinger &mAudioFlinger;
     std::map<audio_patch_handle_t, Patch> mPatches;
+
+    // This map allows going from a thread to "downstream" software patches
+    // when a processing module inserted in between. Example:
+    //
+    //  from map value.streams                               map key
+    //  [Mixer thread] --> [Virtual output device] --> [Processing module] ---\
+    //       [Harware module] <-- [Physical output device] <-- [S/W Patch] <--/
+    //                                                 from map value.sw_patches
+    //
+    // This allows the mixer thread to look up the threads of the software patch
+    // for propagating timing info, parameters, etc.
+    //
+    // The current assumptions are:
+    //   1) The processing module acts as a mixer with several outputs which
+    //      represent differently downmixed and / or encoded versions of the same
+    //      mixed stream. There is no 1:1 correspondence between the input streams
+    //      and the software patches, but rather a N:N correspondence between
+    //      a group of streams and a group of patches.
+    //   2) There are only a couple of inserted processing modules in the system,
+    //      so when looking for a stream or patch handle we can iterate over
+    //      all modules.
+    struct ModuleConnections {
+        std::set<audio_io_handle_t> streams;
+        std::set<audio_patch_handle_t> sw_patches;
+    };
+    std::map<audio_module_handle_t, ModuleConnections> mInsertedModules;
 };
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index c673a6f..b5f61e7 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -23,6 +23,8 @@
 #include "Configuration.h"
 #include <math.h>
 #include <fcntl.h>
+#include <memory>
+#include <string>
 #include <linux/futex.h>
 #include <sys/stat.h>
 #include <sys/syscall.h>
@@ -40,6 +42,7 @@
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
 #include <audio_utils/minifloat.h>
+#include <json/json.h>
 #include <system/audio_effects/effect_ns.h>
 #include <system/audio_effects/effect_aec.h>
 #include <system/audio.h>
@@ -71,7 +74,7 @@
 #endif
 
 #ifdef DEBUG_CPU_USAGE
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 
@@ -333,9 +336,9 @@
 #ifdef DEBUG_CPU_USAGE
 private:
     ThreadCpuUsage mCpuUsage;           // instantaneous thread CPU usage in wall clock ns
-    CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns
+    audio_utils::Statistics<double> mWcStats; // statistics on thread CPU usage in wall clock ns
 
-    CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles
+    audio_utils::Statistics<double> mHzStats; // statistics on thread CPU usage in cycles
 
     int mCpuNum;                        // thread's current CPU number
     int mCpukHz;                        // frequency of thread's current CPU in kHz
@@ -361,7 +364,7 @@
 
     // record sample for wall clock statistics
     if (valid) {
-        mWcStats.sample(wcNs);
+        mWcStats.add(wcNs);
     }
 
     // get the current CPU number
@@ -380,26 +383,26 @@
 
     // if no change in CPU number or frequency, then record sample for cycle statistics
     if (valid && mCpukHz > 0) {
-        double cycles = wcNs * cpukHz * 0.000001;
-        mHzStats.sample(cycles);
+        const double cycles = wcNs * cpukHz * 0.000001;
+        mHzStats.add(cycles);
     }
 
-    unsigned n = mWcStats.n();
+    const unsigned n = mWcStats.getN();
     // mCpuUsage.elapsed() is expensive, so don't call it every loop
     if ((n & 127) == 1) {
-        long long elapsed = mCpuUsage.elapsed();
+        const long long elapsed = mCpuUsage.elapsed();
         if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
-            double perLoop = elapsed / (double) n;
-            double perLoop100 = perLoop * 0.01;
-            double perLoop1k = perLoop * 0.001;
-            double mean = mWcStats.mean();
-            double stddev = mWcStats.stddev();
-            double minimum = mWcStats.minimum();
-            double maximum = mWcStats.maximum();
-            double meanCycles = mHzStats.mean();
-            double stddevCycles = mHzStats.stddev();
-            double minCycles = mHzStats.minimum();
-            double maxCycles = mHzStats.maximum();
+            const double perLoop = elapsed / (double) n;
+            const double perLoop100 = perLoop * 0.01;
+            const double perLoop1k = perLoop * 0.001;
+            const double mean = mWcStats.getMean();
+            const double stddev = mWcStats.getStdDev();
+            const double minimum = mWcStats.getMin();
+            const double maximum = mWcStats.getMax();
+            const double meanCycles = mHzStats.getMean();
+            const double stddevCycles = mHzStats.getStdDev();
+            const double minCycles = mHzStats.getMin();
+            const double maxCycles = mHzStats.getMax();
             mCpuUsage.resetElapsed();
             mWcStats.reset();
             mHzStats.reset();
@@ -853,6 +856,16 @@
     dprintf(fd, "  Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
     dprintf(fd, "  Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
 
+    // Dump timestamp statistics for the Thread types that support it.
+    if (mType == RECORD
+            || mType == MIXER
+            || mType == DUPLICATING
+            || mType == DIRECT
+            || mType == OFFLOAD) {
+        dprintf(fd, "  Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
+        dprintf(fd, "  Timestamp corrected: %s\n", isTimestampCorrectionEnabled() ? "yes" : "no");
+    }
+
     if (locked) {
         mLock.unlock();
     }
@@ -1720,10 +1733,21 @@
         if (mOutput->audioHwDev->canSetMasterMute()) {
             mMasterMute = false;
         }
+        mIsMsdDevice = strcmp(
+                mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
     }
 
     readOutputParameters_l();
 
+    // TODO: We may also match on address as well as device type for
+    // AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+    if (type == MIXER || type == DIRECT) {
+        mTimestampCorrectedDevices = (audio_devices_t)property_get_int64(
+                "audio.timestamp.corrected_output_devices",
+                (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_OUT_BUS // turn on by default for MSD
+                                       : AUDIO_DEVICE_NONE));
+    }
+
     // ++ operator does not compile
     for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_FOR_POLICY_CNT;
             stream = (audio_stream_type_t) (stream + 1)) {
@@ -1752,6 +1776,11 @@
     mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
 }
 
+Json::Value AudioFlinger::PlaybackThread::getJsonDump() const
+{
+    return Json::Value(Json::objectValue);
+}
+
 void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
@@ -2303,6 +2332,11 @@
     return mStreamTypes[stream].volume;
 }
 
+void AudioFlinger::PlaybackThread::setVolumeForOutput_l(float left, float right) const
+{
+    mOutput->stream->setVolume(left, right);
+}
+
 // addTrack_l() must be called with ThreadBase::mLock held
 status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
 {
@@ -2315,15 +2349,13 @@
         if (track->isExternalTrack()) {
             TrackBase::track_state state = track->mState;
             mLock.unlock();
-            status = AudioSystem::startOutput(mId, track->streamType(),
-                                              track->sessionId());
+            status = AudioSystem::startOutput(track->portId());
             mLock.lock();
             // abort track was stopped/paused while we released the lock
             if (state != track->mState) {
                 if (status == NO_ERROR) {
                     mLock.unlock();
-                    AudioSystem::stopOutput(mId, track->streamType(),
-                                            track->sessionId());
+                    AudioSystem::stopOutput(track->portId());
                     mLock.lock();
                 }
                 return INVALID_OPERATION;
@@ -2474,6 +2506,11 @@
     Mutex::Autolock _l(mLock);
     // reject out of sequence requests
     if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
+        // Register discontinuity when HW drain is completed because that can cause
+        // the timestamp frame position to reset to 0 for direct and offload threads.
+        // (Out of sequence requests are ignored, since the discontinuity would be handled
+        // elsewhere, e.g. in flush).
+        mTimestampVerifier.discontinuity();
         mDrainSequence &= ~1;
         mWaitWorkCV.signal();
     }
@@ -2798,15 +2835,13 @@
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             if (track->isExternalTrack()) {
-                AudioSystem::stopOutput(mId, track->streamType(),
-                                        track->sessionId());
+                AudioSystem::stopOutput(track->portId());
 #ifdef ADD_BATTERY_DATA
                 // to track the speaker usage
                 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
 #endif
                 if (track->isTerminated()) {
-                    AudioSystem::releaseOutput(mId, track->streamType(),
-                                               track->sessionId());
+                    AudioSystem::releaseOutput(track->portId());
                 }
             }
         }
@@ -3182,6 +3217,17 @@
 
     checkSilentMode_l();
 
+    // DIRECT and OFFLOAD threads should reset frame count to zero on stop/flush
+    // TODO: add confirmation checks:
+    // 1) DIRECT threads and linear PCM format really resets to 0?
+    // 2) Is frame count really valid if not linear pcm?
+    // 3) Are all 64 bits of position returned, not just lowest 32 bits?
+    if (mType == OFFLOAD || mType == DIRECT) {
+        mTimestampVerifier.setDiscontinuityMode(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
+    }
+    audio_utils::Statistics<double> downstreamLatencyStatMs(0.999 /* alpha */);
+    audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+
     while (!exitPending())
     {
         // Log merge requests are performed during AudioFlinger binder transactions, but
@@ -3192,6 +3238,46 @@
 
         Vector< sp<EffectChain> > effectChains;
 
+        // If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
+        //
+        // Note: we access outDevice() outside of mLock.
+        if (isMsdDevice() && (outDevice() & AUDIO_DEVICE_OUT_BUS) != 0) {
+            // Here, we try for the AF lock, but do not block on it as the latency
+            // is more informational.
+            if (mAudioFlinger->mLock.tryLock() == NO_ERROR) {
+                std::vector<PatchPanel::SoftwarePatch> swPatches;
+                double latencyMs;
+                status_t status = INVALID_OPERATION;
+                audio_patch_handle_t downstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+                if (mAudioFlinger->mPatchPanel.getDownstreamSoftwarePatches(id(), &swPatches) == OK
+                        && swPatches.size() > 0) {
+                        status = swPatches[0].getLatencyMs_l(&latencyMs);
+                        downstreamPatchHandle = swPatches[0].getPatchHandle();
+                }
+                if (downstreamPatchHandle != lastDownstreamPatchHandle) {
+                    downstreamLatencyStatMs.reset();
+                    lastDownstreamPatchHandle = downstreamPatchHandle;
+                }
+                if (status == OK) {
+                    // verify downstream latency (we assume a max reasonable
+                    // latency of 1 second).
+                    if (latencyMs >= 0. && latencyMs <= 1000.) {
+                        ALOGV("new downstream latency %lf ms", latencyMs);
+                        downstreamLatencyStatMs.add(latencyMs);
+                    } else {
+                        ALOGD("out of range downstream latency %lf ms", latencyMs);
+                    }
+                }
+                mAudioFlinger->mLock.unlock();
+            }
+        } else {
+            if (lastDownstreamPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
+                // our device is no longer AUDIO_DEVICE_OUT_BUS, reset patch handle and stats.
+                downstreamLatencyStatMs.reset();
+                lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+            }
+        }
+
         { // scope for mLock
 
             Mutex::Autolock _l(mLock);
@@ -3205,12 +3291,49 @@
                 logString = NULL;
             }
 
+            // Collect timestamp statistics for the Playback Thread types that support it.
+            if (mType == MIXER
+                    || mType == DUPLICATING
+                    || mType == DIRECT
+                    || mType == OFFLOAD) { // no indentation
             // Gather the framesReleased counters for all active tracks,
             // and associate with the sink frames written out.  We need
             // this to convert the sink timestamp to the track timestamp.
             bool kernelLocationUpdate = false;
             ExtendedTimestamp timestamp; // use private copy to fetch
-            if (threadloop_getHalTimestamp_l(&timestamp) == OK) {
+            if (mStandby) {
+                mTimestampVerifier.discontinuity();
+            } else if (threadloop_getHalTimestamp_l(&timestamp) == OK) {
+                mTimestampVerifier.add(timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+                        mSampleRate);
+
+                if (isTimestampCorrectionEnabled()) {
+                    ALOGV("TS_BEFORE: %d %lld %lld", id(),
+                            (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+                            (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
+                    auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
+                    timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+                            = correctedTimestamp.mFrames;
+                    timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
+                            = correctedTimestamp.mTimeNs;
+                    ALOGV("TS_AFTER: %d %lld %lld", id(),
+                            (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+                            (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
+
+                    // Note: Downstream latency only added if timestamp correction enabled.
+                    if (downstreamLatencyStatMs.getN() > 0) { // we have latency info.
+                        const int64_t newPosition =
+                                timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+                                - int64_t(downstreamLatencyStatMs.getMean() * mSampleRate * 1e-3);
+                        // prevent retrograde
+                        timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = max(
+                                newPosition,
+                                (mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+                                        - mSuspendedFrames));
+                    }
+                }
+
                 // We always fetch the timestamp here because often the downstream
                 // sink will block while writing.
 
@@ -3241,7 +3364,10 @@
                         + mSuspendedFrames; // add frames discarded when suspended
                 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
                         timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+            } else {
+                mTimestampVerifier.error();
             }
+
             // mFramesWritten for non-offloaded tracks are contiguous
             // even after standby() is called. This is useful for the track frame
             // to sink frame mapping.
@@ -3274,6 +3400,7 @@
                     }
                 }
             }
+            } // if (mType ... ) { // no indentation
 #if 0
             // logFormat example
             if (z % 100 == 0) {
@@ -3314,7 +3441,7 @@
 
                 continue;
             }
-            if ((!mActiveTracks.size() && systemTime() > mStandbyTimeNs) ||
+            if ((mActiveTracks.isEmpty() && systemTime() > mStandbyTimeNs) ||
                                    isSuspended()) {
                 // put audio hardware into standby after short delay
                 if (shouldStandby_l()) {
@@ -3328,7 +3455,7 @@
                     mStandby = true;
                 }
 
-                if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
+                if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
                     // we're about to wait, flush the binder command buffer
                     IPCThreadState::self()->flushCommands();
 
@@ -5077,9 +5204,8 @@
         // while we are dumping it.  It may be inconsistent, but it won't mutate!
         // This is a large object so we place it on the heap.
         // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
-        const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+        const std::unique_ptr<FastMixerDumpState> copy(new FastMixerDumpState(mFastMixerDumpState));
         copy->dump(fd);
-        delete copy;
 
 #ifdef STATE_QUEUE_DUMP
         // Similar for state queue
@@ -5102,6 +5228,22 @@
     }
 }
 
+Json::Value AudioFlinger::MixerThread::getJsonDump() const
+{
+    Json::Value root;
+    if (hasFastMixer()) {
+        // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+        // while we are dumping it.  It may be inconsistent, but it won't mutate!
+        // This is a large object so we place it on the heap.
+        // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+        const std::unique_ptr<FastMixerDumpState> copy(new FastMixerDumpState(mFastMixerDumpState));
+        root["fastmixer_stats"] = copy->getJsonDump();
+    } else {
+        root["fastmixer_stats"] = "no_fastmixer";
+    }
+    return root;
+}
+
 uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
 {
     return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2;
@@ -5180,20 +5322,20 @@
             mLeftVolFloat = left;
             mRightVolFloat = right;
 
-            // Convert volumes from float to 8.24
-            uint32_t vl = (uint32_t)(left * (1 << 24));
-            uint32_t vr = (uint32_t)(right * (1 << 24));
-
             // Delegate volume control to effect in track effect chain if needed
             // only one effect chain can be present on DirectOutputThread, so if
             // there is one, the track is connected to it
             if (!mEffectChains.isEmpty()) {
-                mEffectChains[0]->setVolume_l(&vl, &vr);
-                left = (float)vl / (1 << 24);
-                right = (float)vr / (1 << 24);
+                // if effect chain exists, volume is handled by it.
+                // Convert volumes from float to 8.24
+                uint32_t vl = (uint32_t)(left * (1 << 24));
+                uint32_t vr = (uint32_t)(right * (1 << 24));
+                // Direct/Offload effect chains set output volume in setVolume_l().
+                (void)mEffectChains[0]->setVolume_l(&vl, &vr);
+            } else {
+                // otherwise we directly set the volume.
+                setVolumeForOutput_l(left, right);
             }
-            status_t result = mOutput->stream->setVolume(left, right);
-            ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
         }
     }
 }
@@ -5601,6 +5743,7 @@
     mOutput->flush();
     mHwPaused = false;
     mFlushPending = false;
+    mTimestampVerifier.discontinuity(); // DIRECT and OFFLOADED flush resets frame count.
 }
 
 int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
@@ -5935,6 +6078,14 @@
                     track->presentationComplete(framesWritten, audioHALFrames);
                     track->reset();
                     tracksToRemove->add(track);
+                    // DIRECT and OFFLOADED stop resets frame counts.
+                    if (!mUseAsyncWrite) {
+                        // If we don't get explicit drain notification we must
+                        // register discontinuity regardless of whether this is
+                        // the previous (!last) or the upcoming (last) track
+                        // to avoid skipping the discontinuity.
+                        mTimestampVerifier.discontinuity();
+                    }
                 }
             } else {
                 // No buffers for this track. Give it a few chances to
@@ -6303,8 +6454,20 @@
     snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id);
     mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
 
+    if (mInput != nullptr && mInput->audioHwDev != nullptr) {
+        mIsMsdDevice = strcmp(
+                mInput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
+    }
+
     readInputParameters_l();
 
+    // TODO: We may also match on address as well as device type for
+    // AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_IN_REMOTE_SUBMIX
+    mTimestampCorrectedDevices = (audio_devices_t)property_get_int64(
+            "audio.timestamp.corrected_input_devices",
+            (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_IN_BUS // turn on by default for MSD
+                                   : AUDIO_DEVICE_NONE));
+
     // create an NBAIO source for the HAL input stream, and negotiate
     mInputSource = new AudioStreamInSource(input->stream);
     size_t numCounterOffers = 0;
@@ -6591,7 +6754,7 @@
             }
 
             // sleep if there are no active tracks to process
-            if (activeTracks.size() == 0) {
+            if (activeTracks.isEmpty()) {
                 if (sleepUs == 0) {
                     sleepUs = kRecordThreadSleepUs;
                 }
@@ -6642,6 +6805,14 @@
                 }
                 didModify = true;
             }
+            AudioBufferProvider* abp = (fastTrack != 0 && fastTrack->isPatchTrack()) ?
+                    reinterpret_cast<AudioBufferProvider*>(fastTrack.get()) : nullptr;
+            if (state->mFastPatchRecordBufferProvider != abp) {
+                state->mFastPatchRecordBufferProvider = abp;
+                state->mFastPatchRecordFormat = fastTrack == 0 ?
+                        AUDIO_FORMAT_INVALID : fastTrack->format();
+                didModify = true;
+            }
             sq->end(didModify);
             if (didModify) {
                 sq->push(block);
@@ -6667,8 +6838,7 @@
 
         // If an NBAIO source is present, use it to read the normal capture's data
         if (mPipeSource != 0) {
-            size_t framesToRead = mBufferSize / mFrameSize;
-            framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
+            size_t framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
 
             // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
             // to the full buffer point (clearing the overflow condition).  Upon OVERRUN error,
@@ -6730,8 +6900,24 @@
         // Update server timestamp with kernel stats
         if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
             int64_t position, time;
-            int ret = mInput->stream->getCapturePosition(&position, &time);
-            if (ret == NO_ERROR) {
+            if (mStandby) {
+                mTimestampVerifier.discontinuity();
+            } else if (mInput->stream->getCapturePosition(&position, &time) == NO_ERROR
+                    && time > mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]) {
+
+                mTimestampVerifier.add(position, time, mSampleRate);
+
+                // Correct timestamps
+                if (isTimestampCorrectionEnabled()) {
+                    ALOGV("TS_BEFORE: %d %lld %lld",
+                            id(), (long long)time, (long long)position);
+                    auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
+                    position = correctedTimestamp.mFrames;
+                    time = correctedTimestamp.mTimeNs;
+                    ALOGV("TS_AFTER: %d %lld %lld",
+                            id(), (long long)time, (long long)position);
+                }
+
                 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
                 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
                 // Note: In general record buffers should tend to be empty in
@@ -6739,6 +6925,8 @@
                 //
                 // Also, it is not advantageous to call get_presentation_position during the read
                 // as the read obtains a lock, preventing the timestamp call from executing.
+            } else {
+                mTimestampVerifier.error();
             }
         }
         // Use this to track timestamp information
@@ -6754,6 +6942,7 @@
             goto unlock;
         }
         ALOG_ASSERT(framesRead > 0);
+        mFramesRead += framesRead;
 
 #ifdef TEE_SINK
         (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
@@ -7012,6 +7201,12 @@
         goto Exit;
     }
 
+    if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
+        ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
     if (*pSampleRate == 0) {
         *pSampleRate = mSampleRate;
     }
@@ -7372,7 +7567,8 @@
     audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
     dprintf(fd, "  AudioStreamIn: %p flags %#x (%s)\n",
             input, flags, inputFlagsToString(flags).c_str());
-    if (mActiveTracks.size() == 0) {
+    dprintf(fd, "  Frames read: %lld\n", (long long)mFramesRead);
+    if (mActiveTracks.isEmpty()) {
         dprintf(fd, "  No active record clients\n");
     }
 
@@ -7381,7 +7577,8 @@
         (void)input->stream->dump(fd);
     }
 
-    const double latencyMs = - mTimestamp.getOutputServerLatencyMs(mSampleRate);
+    const double latencyMs = audio_is_linear_pcm(mFormat)
+            ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
     if (latencyMs != 0.) {
         dprintf(fd, "  NormalRecord latency ms: %.2lf\n", latencyMs);
     } else {
@@ -7395,9 +7592,8 @@
     // while we are dumping it.  It may be inconsistent, but it won't mutate!
     // This is a large object so we place it on the heap.
     // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
-    const FastCaptureDumpState *copy = new FastCaptureDumpState(mFastCaptureDumpState);
+    std::unique_ptr<FastCaptureDumpState> copy(new FastCaptureDumpState(mFastCaptureDumpState));
     copy->dump(fd);
-    delete copy;
 }
 
 void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -7726,10 +7922,15 @@
 {
     status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
-    mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
-    LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d", mChannelCount, FCC_8);
     mFormat = mHALFormat;
-    LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+    mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+    if (audio_is_linear_pcm(mFormat)) {
+        LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d",
+                mChannelCount, FCC_8);
+    } else {
+        // Can have more that FCC_8 channels in encoded streams.
+        ALOGI("HAL format %#x is not linear pcm", mFormat);
+    }
     result = mInput->stream->getFrameSize(&mFrameSize);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
     result = mInput->stream->getBufferSize(&mBufferSize);
@@ -7834,7 +8035,7 @@
 status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
 {
     // only one chain per input thread
-    if (mEffectChains.size() != 0) {
+    if (!mEffectChains.isEmpty()) {
         ALOGW("addEffectChain_l() already one chain %p on thread %p", chain.get(), this);
         return INVALID_OPERATION;
     }
@@ -8047,7 +8248,7 @@
     }
     // This will decrement references and may cause the destruction of this thread.
     if (isOutput()) {
-        AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+        AudioSystem::releaseOutput(mPortId);
     } else {
         AudioSystem::releaseInput(mPortId);
     }
@@ -8161,7 +8362,7 @@
 
     bool silenced = false;
     if (isOutput()) {
-        ret = AudioSystem::startOutput(mId, streamType(), mSessionId);
+        ret = AudioSystem::startOutput(portId);
     } else {
         ret = AudioSystem::startInput(portId, &silenced);
     }
@@ -8170,10 +8371,10 @@
     // abort if start is rejected by audio policy manager
     if (ret != NO_ERROR) {
         ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
-        if (mActiveTracks.size() != 0) {
+        if (!mActiveTracks.isEmpty()) {
             mLock.unlock();
             if (isOutput()) {
-                AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+                AudioSystem::releaseOutput(portId);
             } else {
                 AudioSystem::releaseInput(portId);
             }
@@ -8245,8 +8446,8 @@
 
     mLock.unlock();
     if (isOutput()) {
-        AudioSystem::stopOutput(mId, streamType(), track->sessionId());
-        AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
+        AudioSystem::stopOutput(track->portId());
+        AudioSystem::releaseOutput(track->portId());
     } else {
         AudioSystem::stopInput(track->portId());
         AudioSystem::releaseInput(track->portId());
@@ -8271,7 +8472,7 @@
     if (mHalStream == 0) {
         return NO_INIT;
     }
-    if (mActiveTracks.size() != 0) {
+    if (!mActiveTracks.isEmpty()) {
         return INVALID_OPERATION;
     }
     mHalStream->standby();
@@ -8709,7 +8910,7 @@
     dprintf(fd, "  Attributes: content type %d usage %d source %d\n",
             mAttr.content_type, mAttr.usage, mAttr.source);
     dprintf(fd, "  Session: %d port Id: %d\n", mSessionId, mPortId);
-    if (mActiveTracks.size() == 0) {
+    if (mActiveTracks.isEmpty()) {
         dprintf(fd, "  No active clients\n");
     }
 }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index b691ca9..dce3d2e 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -393,6 +393,10 @@
 
                         void        broadcast_l();
 
+                virtual bool        isTimestampCorrectionEnabled() const { return false; }
+
+                bool                isMsdDevice() const { return mIsMsdDevice; }
+
     mutable     Mutex                   mLock;
 
 protected:
@@ -499,6 +503,10 @@
                 sp<NBLog::Writer>       mNBLogWriter;
                 bool                    mSystemReady;
                 ExtendedTimestamp       mTimestamp;
+                TimestampVerifier< // For timestamp statistics.
+                        int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
+                audio_devices_t         mTimestampCorrectedDevices = AUDIO_DEVICE_NONE;
+                bool                    mIsMsdDevice = false;
                 // A condition that must be evaluated by the thread loop has changed and
                 // we must not wait for async write callback in the thread loop before evaluating it
                 bool                    mSignalPending;
@@ -552,6 +560,9 @@
                     size_t          size() const {
                         return mActiveTracks.size();
                     }
+                    bool            isEmpty() const {
+                        return mActiveTracks.isEmpty();
+                    }
                     ssize_t         indexOf(const sp<T>& item) {
                         return mActiveTracks.indexOf(item);
                     }
@@ -655,6 +666,8 @@
     virtual             ~PlaybackThread();
 
                 void        dump(int fd, const Vector<String16>& args);
+                // returns a string of audio performance related data in JSON format.
+    virtual     Json::Value getJsonDump() const;
 
     // Thread virtuals
     virtual     bool        threadLoop();
@@ -721,6 +734,8 @@
     virtual     void        setStreamMute(audio_stream_type_t stream, bool muted);
     virtual     float       streamVolume(audio_stream_type_t stream) const;
 
+                void        setVolumeForOutput_l(float left, float right) const;
+
                 sp<Track>   createTrack_l(
                                 const sp<AudioFlinger::Client>& client,
                                 audio_stream_type_t streamType,
@@ -808,6 +823,11 @@
                                        && mTracks.size() < PlaybackThread::kMaxTracks;
                             }
 
+                bool        isTimestampCorrectionEnabled() const override {
+                                const audio_devices_t device =
+                                        mOutDevice & mTimestampCorrectedDevices;
+                                return audio_is_output_devices(device) && popcount(device) > 0;
+                            }
 protected:
     // updated by readOutputParameters_l()
     size_t                          mNormalFrameCount;  // normal mixer and effects
@@ -1100,6 +1120,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
     virtual     void        dumpInternals(int fd, const Vector<String16>& args);
+                Json::Value getJsonDump() const override;
 
     virtual     bool        isTrackAllowed_l(
                                     audio_channel_mask_t channelMask, audio_format_t format,
@@ -1221,6 +1242,23 @@
     virtual     bool        hasFastMixer() const { return false; }
 
     virtual     int64_t     computeWaitTimeNs_l() const override;
+
+    status_t    threadloop_getHalTimestamp_l(ExtendedTimestamp *timestamp) const override {
+                    // For DIRECT and OFFLOAD threads, query the output sink directly.
+                    if (mOutput != nullptr) {
+                        uint64_t uposition64;
+                        struct timespec time;
+                        if (mOutput->getPresentationPosition(
+                                &uposition64, &time) == OK) {
+                            timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+                                    = (int64_t)uposition64;
+                            timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
+                                    = audio_utils_ns_from_timespec(&time);
+                            return NO_ERROR;
+                        }
+                    }
+                    return INVALID_OPERATION;
+                }
 };
 
 class OffloadThread : public DirectOutputThread {
@@ -1504,6 +1542,13 @@
 
             void        updateMetadata_l() override;
 
+            bool        fastTrackAvailable() const { return mFastTrackAvail; }
+
+            bool        isTimestampCorrectionEnabled() const override {
+                            // checks popcount for exactly one device.
+                            return audio_is_input_device(
+                                    mInDevice & mTimestampCorrectedDevices);
+                        }
 private:
             // Enter standby if not already in standby, and set mStandby flag
             void    standbyIfNotAlreadyInStandby();
@@ -1573,6 +1618,8 @@
             bool                                mFastTrackAvail;    // true if fast track available
             // common state to all record threads
             std::atomic_bool                    mBtNrecSuspended;
+
+            int64_t                             mFramesRead = 0;    // continuous running counter.
 };
 
 class MmapThread : public ThreadBase
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 95da9d7..a43cb75 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -187,6 +187,19 @@
                             return status;
                         }
 
+           // TODO: Consider making this external.
+           struct FrameTime {
+               int64_t frames;
+               int64_t timeNs;
+           };
+
+           // KernelFrameTime is updated per "mix" period even for non-pcm tracks.
+           void         getKernelFrameTime(FrameTime *ft) const {
+                           *ft = mKernelFrameTime.load();
+                        }
+
+           audio_format_t format() const { return mFormat; }
+
 protected:
     DISALLOW_COPY_AND_ASSIGN(TrackBase);
 
@@ -198,8 +211,6 @@
     // but putting it in TrackBase avoids the complexity of virtual inheritance
     virtual size_t  framesReady() const { return SIZE_MAX; }
 
-    audio_format_t format() const { return mFormat; }
-
     uint32_t channelCount() const { return mChannelCount; }
 
     audio_channel_mask_t channelMask() const { return mChannelMask; }
@@ -307,6 +318,7 @@
     bool                mServerLatencySupported = false;
     std::atomic<bool>   mServerLatencyFromTrack{}; // latency from track or server timestamp.
     std::atomic<double> mServerLatencyMs{};        // last latency pushed from server thread.
+    std::atomic<FrameTime> mKernelFrameTime{};     // last frame time on kernel side.
 };
 
 // PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 22e610e..78e6c6c 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -486,7 +486,7 @@
             wasActive = playbackThread->destroyTrack_l(this);
         }
         if (isExternalTrack() && !wasActive) {
-            AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, mSessionId);
+            AudioSystem::releaseOutput(mPortId);
         }
     }
 }
@@ -1258,6 +1258,16 @@
 void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
         int64_t trackFramesReleased, int64_t sinkFramesWritten,
         uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
+   // Make the kernel frametime available.
+    const FrameTime ft{
+            timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+            timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
+    // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
+    mKernelFrameTime.store(ft);
+    if (!audio_is_linear_pcm(mFormat)) {
+        return;
+    }
+
     //update frame map
     mFrameMap.push(trackFramesReleased, sinkFramesWritten);
 
@@ -1720,7 +1730,7 @@
         thread->mFastTrackAvail = false;
     } else {
         // TODO: only Normal Record has timestamps (Fast Record does not).
-        mServerLatencySupported = true;
+        mServerLatencySupported = audio_is_linear_pcm(mFormat);
     }
 #ifdef TEE_SINK
     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
@@ -1886,6 +1896,16 @@
         int64_t trackFramesReleased, int64_t sourceFramesRead,
         uint32_t halSampleRate, const ExtendedTimestamp &timestamp)
 {
+   // Make the kernel frametime available.
+    const FrameTime ft{
+            timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+            timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
+    // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
+    mKernelFrameTime.store(ft);
+    if (!audio_is_linear_pcm(mFormat)) {
+        return;
+    }
+
     ExtendedTimestamp local = timestamp;
 
     // Convert HAL frames to server-side track frames at track sample rate.
diff --git a/services/audioflinger/TypedLogger.h b/services/audioflinger/TypedLogger.h
index 38c3c02..0fea42a 100644
--- a/services/audioflinger/TypedLogger.h
+++ b/services/audioflinger/TypedLogger.h
@@ -97,6 +97,11 @@
 #define LOG_AUDIO_STATE() do { NBLog::Writer *x = tlNBLogWriter; if (x != nullptr) \
         x->logEventHistTs(NBLog::EVENT_AUDIO_STATE, hash(__FILE__, __LINE__)); } while(0)
 
+// Record a typed entry that represents a thread's cycle time in nanoseconds.
+// Parameter ns should be of type uint32_t.
+#define LOG_MONOTONIC_CYCLE_TIME(ns) do { NBLog::Writer *x = tlNBLogWriter; if (x != nullptr) \
+        x->logMonotonicCycleTime(ns); } while (0)
+
 namespace android {
 extern "C" {
 extern thread_local NBLog::Writer *tlNBLogWriter;
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 4812b1f..d4c49d9 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -124,17 +124,11 @@
                                         audio_port_handle_t *selectedDeviceId,
                                         audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the output starts being used by corresponding stream.
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session) = 0;
+    virtual status_t startOutput(audio_port_handle_t portId) = 0;
     // indicates to the audio policy manager that the output stops being used by corresponding stream.
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session) = 0;
+    virtual status_t stopOutput(audio_port_handle_t portId) = 0;
     // releases the output.
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session) = 0;
+    virtual void releaseOutput(audio_port_handle_t portId) = 0;
 
     // request an input appropriate for record from the supplied device with supplied parameters.
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
@@ -147,16 +141,13 @@
                                      input_type_t *inputType,
                                      audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the input starts being used.
-    virtual status_t startInput(audio_io_handle_t input,
-                                audio_session_t session,
+    virtual status_t startInput(audio_port_handle_t portId,
                                 bool silenced,
                                 concurrency_type__mask_t *concurrency) = 0;
     // indicates to the audio policy manager that the input stops being used.
-    virtual status_t stopInput(audio_io_handle_t input,
-                               audio_session_t session) = 0;
+    virtual status_t stopInput(audio_port_handle_t portId) = 0;
     // releases the input.
-    virtual void releaseInput(audio_io_handle_t input,
-                              audio_session_t session) = 0;
+    virtual void releaseInput(audio_port_handle_t portId) = 0;
 
     //
     // volume control functions
@@ -235,9 +226,9 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle,
+                                      audio_port_handle_t *portId,
                                       uid_t uid) = 0;
-    virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
+    virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
@@ -324,11 +315,6 @@
     // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
     virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
 
-    // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
-    // over a telephony device during a phone call.
-    virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0;
-    virtual status_t stopTone() = 0;
-
     // set down link audio volume.
     virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
 
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index b3611c4..9b8f095 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -18,10 +18,10 @@
     src/EffectDescriptor.cpp \
     src/SoundTriggerSession.cpp \
     src/SessionRoute.cpp \
-    src/AudioSourceDescriptor.cpp \
     src/VolumeCurve.cpp \
     src/TypeConverter.cpp \
-    src/AudioSession.cpp
+    src/AudioSession.cpp \
+    src/ClientDescriptor.cpp
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
index 9f3fc0c..555412e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
@@ -34,12 +34,4 @@
     virtual void setPatchHandle(audio_patch_handle_t handle) = 0;
 };
 
-class AudioIODescriptorUpdateListener
-{
-public:
-    virtual ~AudioIODescriptorUpdateListener() {};
-
-    virtual void onIODescriptorUpdate() const = 0;
-};
-
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 85f3b86..44662e5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -19,6 +19,7 @@
 #include "AudioIODescriptorInterface.h"
 #include "AudioPort.h"
 #include "AudioSession.h"
+#include "ClientDescriptor.h"
 #include <utils/Errors.h>
 #include <system/audio.h>
 #include <utils/SortedVector.h>
@@ -66,6 +67,8 @@
     AudioSessionCollection getAudioSessions(bool activeOnly) const;
     size_t getAudioSessionCount(bool activeOnly) const;
     audio_source_t getHighestPrioritySource(bool activeOnly) const;
+    void changeRefCount(audio_session_t session, int delta);
+
 
     // implementation of AudioIODescriptorInterface
     audio_config_base_t getConfig() const override;
@@ -79,14 +82,20 @@
                   audio_input_flags_t flags,
                   audio_io_handle_t *input);
     // Called when a stream is about to be started.
-    // Note: called after AudioSession::changeActiveCount(1)
+    // Note: called after changeRefCount(session, 1)
     status_t start();
     // Called after a stream is stopped
-    // Note: called after AudioSession::changeActiveCount(-1)
+    // Note: called after changeRefCount(session, -1)
     void stop();
     void close();
 
-private:
+    RecordClientMap& clients() { return mClients; }
+    RecordClientVector getClientsForSession(audio_session_t session);
+
+ private:
+
+    void updateSessionRecordingConfiguration(int event, const sp<AudioSession>& audioSession);
+
     audio_patch_handle_t          mPatchHandle;
     audio_port_handle_t           mId;
     // audio sessions attached to this input
@@ -99,6 +108,9 @@
     // We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
     SortedVector<audio_session_t> mPreemptedSessions;
     AudioPolicyClientInterface *mClientInterface;
+    uint32_t mGlobalRefCount;  // non-session-specific ref count
+
+    RecordClientMap mClients;
 };
 
 class AudioInputCollection :
@@ -122,6 +134,8 @@
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
+    sp<AudioInputDescriptor> getInputForClient(audio_port_handle_t portId);
+
     status_t dump(int fd) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 57d1cfa..ff0201a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -17,15 +17,14 @@
 #pragma once
 
 #include <sys/types.h>
-
-#include "AudioPort.h"
-#include <RoutingStrategy.h>
 #include <utils/Errors.h>
 #include <utils/Timers.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
+#include <RoutingStrategy.h>
 #include "AudioIODescriptorInterface.h"
-#include "AudioSourceDescriptor.h"
+#include "AudioPort.h"
+#include "ClientDescriptor.h"
 
 namespace android {
 
@@ -79,7 +78,9 @@
     audio_patch_handle_t getPatchHandle() const override;
     void setPatchHandle(audio_patch_handle_t handle) override;
 
-    sp<AudioPort>       mPort;
+    TrackClientMap& clients() { return mClients; }
+
+    sp<AudioPort> mPort;
     audio_devices_t mDevice;                   // current device this output is routed to
     uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
     nsecs_t mStopTime[AUDIO_STREAM_CNT];
@@ -92,6 +93,7 @@
 protected:
     audio_patch_handle_t mPatchHandle;
     audio_port_handle_t mId;
+    TrackClientMap mClients;
 };
 
 // Audio output driven by a software mixer in audio flinger.
@@ -156,7 +158,7 @@
 class HwAudioOutputDescriptor: public AudioOutputDescriptor
 {
 public:
-    HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+    HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                             AudioPolicyClientInterface *clientInterface);
     virtual ~HwAudioOutputDescriptor() {}
 
@@ -173,7 +175,7 @@
                            const struct audio_port_config *srcConfig = NULL) const;
     virtual void toAudioPort(struct audio_port *port) const;
 
-    const sp<AudioSourceDescriptor> mSource;
+    const sp<SourceClientDescriptor> mSource;
 
 };
 
@@ -227,6 +229,8 @@
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
+    sp<SwAudioOutputDescriptor> getOutputForClient(audio_port_handle_t portId);
+
     status_t dump(int fd) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
index 4226ff2..a1ee708 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
@@ -16,93 +16,84 @@
 
 #pragma once
 
-#include "policy.h"
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
+#include <vector>
+
 #include <system/audio.h>
-#include <cutils/config_utils.h>
+#include <utils/RefBase.h>
+#include <utils/SortedVector.h>
+#include <utils/String8.h>
+
+#include "policy.h"
 
 namespace android {
 
 typedef SortedVector<uint32_t> SampleRateVector;
-typedef SortedVector<audio_channel_mask_t> ChannelsVector;
 typedef Vector<audio_format_t> FormatVector;
 
 template <typename T>
-bool operator == (const SortedVector<T> &left, const SortedVector<T> &right);
+bool operator== (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+    if (left.size() != right.size()) {
+        return false;
+    }
+    for (size_t index = 0; index < right.size(); index++) {
+        if (left[index] != right[index]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+template <typename T>
+bool operator!= (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+    return !(left == right);
+}
+
+class ChannelsVector : public SortedVector<audio_channel_mask_t>
+{
+public:
+    ChannelsVector() = default;
+    ChannelsVector(const ChannelsVector&) = default;
+    ChannelsVector(const SortedVector<audio_channel_mask_t>& sv) :
+            SortedVector<audio_channel_mask_t>(sv) {}
+    ChannelsVector& operator=(const ChannelsVector&) = default;
+
+    // Applies audio_channel_mask_out_to_in to all elements and returns the result.
+    ChannelsVector asInMask() const;
+    // Applies audio_channel_mask_in_to_out to all elements and returns the result.
+    ChannelsVector asOutMask() const;
+};
 
 class AudioProfile : public virtual RefBase
 {
 public:
     static sp<AudioProfile> createFullDynamic();
 
-    AudioProfile(audio_format_t format,
-                 audio_channel_mask_t channelMasks,
-                 uint32_t samplingRate) :
-        mName(String8("")),
-        mFormat(format)
-    {
-        mChannelMasks.add(channelMasks);
-        mSamplingRates.add(samplingRate);
-    }
-
+    AudioProfile(audio_format_t format, audio_channel_mask_t channelMasks, uint32_t samplingRate);
     AudioProfile(audio_format_t format,
                  const ChannelsVector &channelMasks,
-                 const SampleRateVector &samplingRateCollection) :
-        mName(String8("")),
-        mFormat(format),
-        mChannelMasks(channelMasks),
-        mSamplingRates(samplingRateCollection)
-    {}
+                 const SampleRateVector &samplingRateCollection);
 
     audio_format_t getFormat() const { return mFormat; }
-
-    void setChannels(const ChannelsVector &channelMasks)
-    {
-        if (mIsDynamicChannels) {
-            mChannelMasks = channelMasks;
-        }
-    }
     const ChannelsVector &getChannels() const { return mChannelMasks; }
-
-    void setSampleRates(const SampleRateVector &sampleRates)
-    {
-        if (mIsDynamicRate) {
-            mSamplingRates = sampleRates;
-        }
-    }
     const SampleRateVector &getSampleRates() const { return mSamplingRates; }
+    void setChannels(const ChannelsVector &channelMasks);
+    void setSampleRates(const SampleRateVector &sampleRates);
 
+    void clear();
     bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
-
-    void clear()
-    {
-        if (mIsDynamicChannels) {
-            mChannelMasks.clear();
-        }
-        if (mIsDynamicRate) {
-            mSamplingRates.clear();
-        }
-    }
-
-    inline bool supportsChannels(audio_channel_mask_t channels) const
+    bool supportsChannels(audio_channel_mask_t channels) const
     {
         return mChannelMasks.indexOf(channels) >= 0;
     }
-    inline bool supportsRate(uint32_t rate) const
-    {
-        return mSamplingRates.indexOf(rate) >= 0;
-    }
+    bool supportsRate(uint32_t rate) const { return mSamplingRates.indexOf(rate) >= 0; }
 
     status_t checkExact(uint32_t rate, audio_channel_mask_t channels, audio_format_t format) const;
-
     status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
                                         audio_channel_mask_t &updatedChannelMask,
                                         audio_port_type_t portType,
                                         audio_port_role_t portRole) const;
-
     status_t checkCompatibleSamplingRate(uint32_t samplingRate,
                                          uint32_t &updatedSamplingRate) const;
 
@@ -138,213 +129,48 @@
 class AudioProfileVector : public Vector<sp<AudioProfile> >
 {
 public:
-    ssize_t add(const sp<AudioProfile> &profile)
-    {
-        ssize_t index = Vector::add(profile);
-        // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
-        // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
-        // [](const audio_format_t *format1, const audio_format_t *format2) {
-        //     return compareFormats(*format1, *format2);
-        // }
-        sort(compareFormats);
-        return index;
-    }
-
+    ssize_t add(const sp<AudioProfile> &profile);
     // This API is intended to be used by the policy manager once retrieving capabilities
     // for a profile with dynamic format, rate and channels attributes
-    ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd)
-    {
-        // Check valid profile to add:
-        if (!profileToAdd->hasValidFormat()) {
-            return -1;
-        }
-        if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
-            FormatVector formats;
-            formats.add(profileToAdd->getFormat());
-            setFormats(FormatVector(formats));
-            return 0;
-        }
-        if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
-            setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
-            return 0;
-        }
-        if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
-            setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
-            return 0;
-        }
-        // Go through the list of profile to avoid duplicates
-        for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
-            const sp<AudioProfile> &profile = itemAt(profileIndex);
-            if (profile->isValid() && profile == profileToAdd) {
-                // Nothing to do
-                return profileIndex;
-            }
-        }
-        profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
-        return add(profileToAdd);
-    }
-
-    sp<AudioProfile> getFirstValidProfile() const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->isValid()) {
-                return itemAt(i);
-            }
-        }
-        return 0;
-    }
-
-    bool hasValidProfile() const { return getFirstValidProfile() != 0; }
+    ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd);
 
     status_t checkExactProfile(uint32_t samplingRate, audio_channel_mask_t channelMask,
                                audio_format_t format) const;
-
     status_t checkCompatibleProfile(uint32_t &samplingRate, audio_channel_mask_t &channelMask,
                                     audio_format_t &format,
                                     audio_port_type_t portType,
                                     audio_port_role_t portRole) const;
+    void clearProfiles();
+    // Assuming that this profile vector contains input profiles,
+    // find the best matching config from 'outputProfiles', according to
+    // the given preferences for audio formats and channel masks.
+    // Note: std::vectors are used because specialized containers for formats
+    //       and channels can be sorted and use their own ordering.
+    status_t findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
+            const std::vector<audio_format_t>& preferredFormats, // order: most pref -> least pref
+            const std::vector<audio_channel_mask_t>& preferredOutputChannels,
+            bool preferHigherSamplingRates,
+            audio_config_base *bestOutputConfig) const;
 
-    FormatVector getSupportedFormats() const
-    {
-        FormatVector supportedFormats;
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->hasValidFormat()) {
-                supportedFormats.add(itemAt(i)->getFormat());
-            }
-        }
-        return supportedFormats;
-    }
+    sp<AudioProfile> getFirstValidProfile() const;
+    sp<AudioProfile> getFirstValidProfileFor(audio_format_t format) const;
+    bool hasValidProfile() const { return getFirstValidProfile() != 0; }
 
-    bool hasDynamicProfile() const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->isDynamic()) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    bool hasDynamicFormat() const
-    {
-        return getProfileFor(gDynamicFormat) != 0;
-    }
-
-    bool hasDynamicChannelsFor(audio_format_t format) const
-    {
-       for (size_t i = 0; i < size(); i++) {
-           sp<AudioProfile> profile = itemAt(i);
-           if (profile->getFormat() == format && profile->isDynamicChannels()) {
-               return true;
-           }
-       }
-       return false;
-    }
-
-    bool hasDynamicRateFor(audio_format_t format) const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            sp<AudioProfile> profile = itemAt(i);
-            if (profile->getFormat() == format && profile->isDynamicRate()) {
-                return true;
-            }
-        }
-        return false;
-    }
+    FormatVector getSupportedFormats() const;
+    bool hasDynamicChannelsFor(audio_format_t format) const;
+    bool hasDynamicFormat() const { return getProfileFor(gDynamicFormat) != 0; }
+    bool hasDynamicProfile() const;
+    bool hasDynamicRateFor(audio_format_t format) const;
 
     // One audio profile will be added for each format supported by Audio HAL
-    void setFormats(const FormatVector &formats)
-    {
-        // Only allow to change the format of dynamic profile
-        sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
-        if (dynamicFormatProfile == 0) {
-            return;
-        }
-        for (size_t i = 0; i < formats.size(); i++) {
-            sp<AudioProfile> profile = new AudioProfile(formats[i],
-                                                        dynamicFormatProfile->getChannels(),
-                                                        dynamicFormatProfile->getSampleRates());
-            profile->setDynamicFormat(true);
-            profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
-            profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
-            add(profile);
-        }
-    }
+    void setFormats(const FormatVector &formats);
 
-    void clearProfiles()
-    {
-        for (size_t i = size(); i != 0; ) {
-            sp<AudioProfile> profile = itemAt(--i);
-            if (profile->isDynamicFormat() && profile->hasValidFormat()) {
-                removeAt(i);
-                continue;
-            }
-            profile->clear();
-        }
-    }
-
-    void dump(int fd, int spaces) const
-    {
-        const size_t SIZE = 256;
-        char buffer[SIZE];
-
-        snprintf(buffer, SIZE, "%*s- Profiles:\n", spaces, "");
-        write(fd, buffer, strlen(buffer));
-        for (size_t i = 0; i < size(); i++) {
-            snprintf(buffer, SIZE, "%*sProfile %zu:", spaces + 4, "", i);
-            write(fd, buffer, strlen(buffer));
-            itemAt(i)->dump(fd, spaces + 8);
-        }
-    }
+    void dump(int fd, int spaces) const;
 
 private:
-    void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format)
-    {
-        for (size_t i = 0; i < size(); i++) {
-            sp<AudioProfile> profile = itemAt(i);
-            if (profile->getFormat() == format && profile->isDynamicRate()) {
-                if (profile->hasValidRates()) {
-                    // Need to create a new profile with same format
-                    sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
-                                                                     sampleRates);
-                    profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
-                    add(profileToAdd);
-                } else {
-                    profile->setSampleRates(sampleRates);
-                }
-                return;
-            }
-        }
-    }
-
-    void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
-    {
-        for (size_t i = 0; i < size(); i++) {
-            sp<AudioProfile> profile = itemAt(i);
-            if (profile->getFormat() == format && profile->isDynamicChannels()) {
-                if (profile->hasValidChannels()) {
-                    // Need to create a new profile with same format
-                    sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
-                                                                     profile->getSampleRates());
-                    profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
-                    add(profileToAdd);
-                } else {
-                    profile->setChannels(channelMasks);
-                }
-                return;
-            }
-        }
-    }
-
-    sp<AudioProfile> getProfileFor(audio_format_t format) const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->getFormat() == format) {
-                return itemAt(i);
-            }
-        }
-        return 0;
-    }
+    sp<AudioProfile> getProfileFor(audio_format_t format) const;
+    void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format);
+    void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format);
 
     static int compareFormats(const sp<AudioProfile> *profile1, const sp<AudioProfile> *profile2);
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index 53e6ec9..1636d3a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -29,7 +29,7 @@
 
 class AudioPolicyClientInterface;
 
-class AudioSession : public RefBase, public AudioIODescriptorUpdateListener
+class AudioSession : public RefBase
 {
 public:
     AudioSession(audio_session_t session,
@@ -39,9 +39,7 @@
                  audio_channel_mask_t channelMask,
                  audio_input_flags_t flags,
                  uid_t uid,
-                 bool isSoundTrigger,
-                 AudioMix* policyMix,
-                 AudioPolicyClientInterface *clientInterface);
+                 bool isSoundTrigger);
 
     status_t dump(int fd, int spaces, int index) const;
 
@@ -50,6 +48,8 @@
     audio_format_t format() const { return mConfig.format; }
     uint32_t sampleRate() const { return mConfig.sample_rate; }
     audio_channel_mask_t channelMask() const { return mConfig.channel_mask; }
+    audio_config_base config() const { return mConfig; }
+    record_client_info_t recordClientInfo() const { return mRecordClientInfo; }
     audio_input_flags_t flags() const { return mFlags; }
     uid_t uid() const { return mRecordClientInfo.uid; }
     void setUid(uid_t uid) { mRecordClientInfo.uid = uid; }
@@ -63,10 +63,6 @@
     uint32_t changeOpenCount(int delta);
     uint32_t changeActiveCount(int delta);
 
-    void setInfoProvider(AudioIODescriptorInterface *provider);
-    // implementation of AudioIODescriptorUpdateListener
-    virtual void onIODescriptorUpdate() const;
-
 private:
     record_client_info_t mRecordClientInfo;
     const struct audio_config_base mConfig;
@@ -75,19 +71,14 @@
     bool mSilenced;
     uint32_t  mOpenCount;
     uint32_t  mActiveCount;
-    AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
-    AudioPolicyClientInterface* mClientInterface;
-    const AudioIODescriptorInterface* mInfoProvider;
 };
 
 class AudioSessionCollection :
-    public DefaultKeyedVector<audio_session_t, sp<AudioSession> >,
-    public AudioIODescriptorUpdateListener
+    public DefaultKeyedVector<audio_session_t, sp<AudioSession> >
 {
 public:
     status_t addSession(audio_session_t session,
-                             const sp<AudioSession>& audioSession,
-                             AudioIODescriptorInterface *provider);
+                             const sp<AudioSession>& audioSession);
 
     status_t removeSession(audio_session_t session);
 
@@ -99,9 +90,6 @@
     bool isSourceActive(audio_source_t source) const;
     audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
-    // implementation of AudioIODescriptorUpdateListener
-    virtual void onIODescriptorUpdate() const;
-
     status_t dump(int fd, int spaces) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
deleted file mode 100644
index 0d90f42..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <system/audio.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/RefBase.h>
-#include <RoutingStrategy.h>
-#include <AudioPatch.h>
-
-namespace android {
-
-class SwAudioOutputDescriptor;
-class HwAudioOutputDescriptor;
-class DeviceDescriptor;
-
-class AudioSourceDescriptor: public RefBase
-{
-public:
-    AudioSourceDescriptor(const sp<DeviceDescriptor> device, const audio_attributes_t *attributes,
-                          uid_t uid) :
-        mDevice(device), mAttributes(*attributes), mUid(uid) {}
-    virtual ~AudioSourceDescriptor() {}
-
-    audio_patch_handle_t getHandle() const { return mPatchDesc->mHandle; }
-
-    status_t    dump(int fd);
-
-    const sp<DeviceDescriptor> mDevice;
-    const audio_attributes_t mAttributes;
-    uid_t mUid;
-    sp<AudioPatch> mPatchDesc;
-    wp<SwAudioOutputDescriptor> mSwOutput;
-    wp<HwAudioOutputDescriptor> mHwOutput;
-};
-
-class AudioSourceCollection :
-        public DefaultKeyedVector< audio_patch_handle_t, sp<AudioSourceDescriptor> >
-{
-public:
-    status_t dump(int fd) const;
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
new file mode 100644
index 0000000..9efe57f
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+#include <map>
+#include <unistd.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include "AudioPatch.h"
+
+namespace android {
+
+class DeviceDescriptor;
+class HwAudioOutputDescriptor;
+class SwAudioOutputDescriptor;
+
+class ClientDescriptor: public RefBase
+{
+public:
+    ClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+                   audio_attributes_t attributes, audio_config_base_t config,
+                   audio_port_handle_t preferredDeviceId) :
+        mPortId(portId), mUid(uid), mSessionId(sessionId), mAttributes(attributes),
+        mConfig(config), mPreferredDeviceId(preferredDeviceId), mActive(false) {}
+    ~ClientDescriptor() override = default;
+
+    status_t dump(int fd, int spaces, int index);
+    virtual status_t dump(String8& dst, int spaces, int index);
+
+    audio_port_handle_t portId() const { return mPortId; }
+    uid_t uid() const { return mUid; }
+    audio_session_t session() const { return mSessionId; };
+    audio_attributes_t attributes() const { return mAttributes; }
+    audio_config_base_t config() const { return mConfig; }
+    audio_port_handle_t preferredDeviceId() const { return mPreferredDeviceId; };
+    void setActive(bool active) { mActive = active; }
+    bool active() const { return mActive; }
+
+private:
+    const audio_port_handle_t mPortId;  // unique Id for this client
+    const uid_t mUid;                     // client UID
+    const audio_session_t mSessionId;       // audio session ID
+    const audio_attributes_t mAttributes; // usage...
+    const audio_config_base_t mConfig;
+    const audio_port_handle_t mPreferredDeviceId;  // selected input device port ID
+          bool mActive;
+
+protected:
+    // FIXME: use until other descriptor classes have a dump to String8 method
+    int mDumpFd;
+};
+
+class TrackClientDescriptor: public ClientDescriptor
+{
+public:
+    TrackClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+                   audio_attributes_t attributes, audio_config_base_t config,
+                   audio_port_handle_t preferredDeviceId,
+                   audio_stream_type_t stream, audio_output_flags_t flags) :
+        ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
+        mStream(stream), mFlags(flags) {}
+    ~TrackClientDescriptor() override = default;
+
+    using ClientDescriptor::dump;
+    status_t dump(String8& dst, int spaces, int index) override;
+
+    audio_output_flags_t flags() const { return mFlags; }
+    audio_stream_type_t stream() const { return mStream; }
+
+private:
+    const audio_stream_type_t mStream;
+    const audio_output_flags_t mFlags;
+};
+
+class RecordClientDescriptor: public ClientDescriptor
+{
+public:
+    RecordClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+                        audio_attributes_t attributes, audio_config_base_t config,
+                        audio_port_handle_t preferredDeviceId,
+                        audio_source_t source, audio_input_flags_t flags) :
+        ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
+        mSource(source), mFlags(flags) {}
+    ~RecordClientDescriptor() override = default;
+
+    using ClientDescriptor::dump;
+    status_t dump(String8& dst, int spaces, int index) override;
+
+    audio_source_t source() const { return mSource; }
+    audio_input_flags_t flags() const { return mFlags; }
+
+private:
+    const audio_source_t mSource;
+    const audio_input_flags_t mFlags;
+};
+
+class SourceClientDescriptor: public TrackClientDescriptor
+{
+public:
+    SourceClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
+                           const sp<AudioPatch>& patchDesc, const sp<DeviceDescriptor>& srcDevice,
+                           audio_stream_type_t stream);
+    ~SourceClientDescriptor() override = default;
+
+    sp<AudioPatch> patchDesc() const { return mPatchDesc; }
+    sp<DeviceDescriptor> srcDevice() const { return mSrcDevice; };
+    wp<SwAudioOutputDescriptor> swOutput() const { return mSwOutput; }
+    void setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput);
+    wp<HwAudioOutputDescriptor> hwOutput() const { return mHwOutput; }
+    void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
+
+    using ClientDescriptor::dump;
+    status_t dump(String8& dst, int spaces, int index) override;
+
+ private:
+    const sp<AudioPatch> mPatchDesc;
+    const sp<DeviceDescriptor> mSrcDevice;
+    wp<SwAudioOutputDescriptor> mSwOutput;
+    wp<HwAudioOutputDescriptor> mHwOutput;
+};
+
+class SourceClientCollection :
+    public DefaultKeyedVector< audio_port_handle_t, sp<SourceClientDescriptor> >
+{
+public:
+    status_t dump(int fd) const;
+};
+
+typedef std::vector< sp<TrackClientDescriptor> > TrackClientVector;
+typedef std::map< audio_port_handle_t, sp<TrackClientDescriptor> > TrackClientMap;
+typedef std::vector< sp<RecordClientDescriptor> > RecordClientVector;
+typedef std::map< audio_port_handle_t, sp<RecordClientDescriptor> > RecordClientMap;
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 2325e4f..c08e752 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -82,8 +82,8 @@
     DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
     sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
     sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
-
-    audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+    DeviceVector getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+    audio_devices_t getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const;
 
     status_t dump(int fd, const String8 &tag, int spaces = 0, bool verbose = true) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index f0144db..2770e74 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -32,7 +32,7 @@
     : mIoHandle(0),
       mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
       mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
-      mClientInterface(clientInterface)
+      mClientInterface(clientInterface), mGlobalRefCount(0)
 {
     if (profile != NULL) {
         profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -164,7 +164,7 @@
 
 status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
                          const sp<AudioSession>& audioSession) {
-    return mSessions.addSession(session, audioSession, /*AudioIODescriptorInterface*/this);
+    return mSessions.addSession(session, audioSession);
 }
 
 status_t AudioInputDescriptor::removeAudioSession(audio_session_t session) {
@@ -179,7 +179,11 @@
 void AudioInputDescriptor::setPatchHandle(audio_patch_handle_t handle)
 {
     mPatchHandle = handle;
-    mSessions.onIODescriptorUpdate();
+    for (size_t i = 0; i < mSessions.size(); i++) {
+        if (mSessions[i]->activeCount() > 0) {
+            updateSessionRecordingConfiguration(RECORD_CONFIG_EVENT_START, mSessions[i]);
+        }
+    }
 }
 
 audio_config_base_t AudioInputDescriptor::getConfig() const
@@ -266,7 +270,7 @@
         LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
                             __FUNCTION__, mProfile->curOpenCount);
         // do not call stop() here as stop() is supposed to be called after
-        // AudioSession::changeActiveCount(-1) and we don't know how many sessions
+        //  changeRefCount(session, -1) and we don't know how many sessions
         // are still active at this time
         if (isActive()) {
             mProfile->curActiveCount--;
@@ -276,6 +280,78 @@
     }
 }
 
+void AudioInputDescriptor::changeRefCount(audio_session_t session, int delta)
+{
+    sp<AudioSession> audioSession = mSessions.valueFor(session);
+    if (audioSession == 0) {
+        return;
+    }
+    // handle session-independent ref count
+    uint32_t oldGlobalRefCount = mGlobalRefCount;
+    if ((delta + (int)mGlobalRefCount) < 0) {
+        ALOGW("changeRefCount() invalid delta %d globalRefCount %d", delta, mGlobalRefCount);
+        delta = -((int)mGlobalRefCount);
+    }
+    mGlobalRefCount += delta;
+    if ((oldGlobalRefCount == 0) && (mGlobalRefCount > 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+                                                            MIX_STATE_MIXING);
+        }
+
+    } else if ((oldGlobalRefCount > 0) && (mGlobalRefCount == 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+                                                            MIX_STATE_IDLE);
+        }
+    }
+
+    uint32_t oldActiveCount = audioSession->activeCount();
+    if ((delta + (int)oldActiveCount) < 0) {
+        ALOGW("changeRefCount() invalid delta %d for sesion %d active count %d",
+              delta, session, oldActiveCount);
+        delta = -((int)oldActiveCount);
+    }
+
+    audioSession->changeActiveCount(delta);
+
+    int event = RECORD_CONFIG_EVENT_NONE;
+    if ((oldActiveCount == 0) && (audioSession->activeCount() > 0)) {
+        event = RECORD_CONFIG_EVENT_START;
+    } else if ((oldActiveCount > 0) && (audioSession->activeCount() == 0)) {
+        event = RECORD_CONFIG_EVENT_STOP;
+    }
+    if (event != RECORD_CONFIG_EVENT_NONE) {
+        updateSessionRecordingConfiguration(event, audioSession);
+    }
+
+}
+
+void AudioInputDescriptor::updateSessionRecordingConfiguration(
+    int event, const sp<AudioSession>& audioSession) {
+
+    const audio_config_base_t sessionConfig = audioSession->config();
+    const record_client_info_t recordClientInfo = audioSession->recordClientInfo();
+    const audio_config_base_t config = getConfig();
+    mClientInterface->onRecordingConfigurationUpdate(event,
+                                                     &recordClientInfo, &sessionConfig,
+                                                     &config, mPatchHandle);
+}
+
+RecordClientVector AudioInputDescriptor::getClientsForSession(
+    audio_session_t session)
+{
+    RecordClientVector clients;
+    for (const auto &client : mClients) {
+        if (client.second->session() == session) {
+            clients.push_back(client.second);
+        }
+    }
+    return clients;
+}
+
 status_t AudioInputDescriptor::dump(int fd)
 {
     const size_t SIZE = 256;
@@ -297,6 +373,13 @@
 
     mSessions.dump(fd, 1);
 
+    size_t index = 0;
+    result = " AudioRecord clients:\n";
+    for (const auto& client: mClients) {
+        client.second->dump(result, 2, index++);
+    }
+    result.append(" \n");
+    write(fd, result.string(), result.size());
     return NO_ERROR;
 }
 
@@ -359,6 +442,19 @@
     return devices;
 }
 
+sp<AudioInputDescriptor> AudioInputCollection::getInputForClient(audio_port_handle_t portId)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioInputDescriptor> inputDesc = valueAt(i);
+        for (const auto& client : inputDesc->clients()) {
+            if (client.second->portId() == portId) {
+                return inputDesc;
+            }
+        }
+    }
+    return 0;
+}
+
 status_t AudioInputCollection::dump(int fd) const
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 3c69de5..39fce4d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -224,6 +224,13 @@
                  i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
         result.append(buffer);
     }
+
+    result.append(" AudioTrack clients:\n");
+    size_t index = 0;
+    for (const auto& client : mClients) {
+        client.second->dump(result, 2, index++);
+    }
+    result.append(" \n");
     write(fd, result.string(), result.size());
 
     return NO_ERROR;
@@ -551,9 +558,9 @@
 }
 
 // HwAudioOutputDescriptor implementation
-HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
-    : AudioOutputDescriptor(source->mDevice, clientInterface),
+    : AudioOutputDescriptor(source->srcDevice(), clientInterface),
       mSource(source)
 {
 }
@@ -569,7 +576,7 @@
     snprintf(buffer, SIZE, "Source:\n");
     result.append(buffer);
     write(fd, result.string(), result.size());
-    mSource->dump(fd);
+    mSource->dump(fd, 0, 0);
 
     return NO_ERROR;
 }
@@ -583,13 +590,13 @@
                                                  struct audio_port_config *dstConfig,
                                                  const struct audio_port_config *srcConfig) const
 {
-    mSource->mDevice->toAudioPortConfig(dstConfig, srcConfig);
+    mSource->srcDevice()->toAudioPortConfig(dstConfig, srcConfig);
 }
 
 void HwAudioOutputDescriptor::toAudioPort(
                                                     struct audio_port *port) const
 {
-    mSource->mDevice->toAudioPort(port);
+    mSource->srcDevice()->toAudioPort(port);
 }
 
 
@@ -731,6 +738,18 @@
     return devices;
 }
 
+sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputForClient(audio_port_handle_t portId)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
+        for (const auto& client : outputDesc->clients()) {
+            if (client.second->portId() == portId) {
+                return outputDesc;
+            }
+        }
+    }
+    return 0;
+}
 
 status_t SwAudioOutputCollection::dump(int fd) const
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 26af9b4..d04beec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -14,20 +14,53 @@
  * limitations under the License.
  */
 
+#include <algorithm>
+#include <set>
+#include <string>
+
 #define LOG_TAG "APM::AudioProfile"
 //#define LOG_NDEBUG 0
 
-#include "AudioProfile.h"
-#include "AudioPort.h"
-#include "HwModule.h"
-#include "AudioGain.h"
-#include <utils/SortedVector.h>
-#include "TypeConverter.h"
 #include <media/AudioResamplerPublic.h>
-#include <algorithm>
+#include <utils/Errors.h>
+
+#include "AudioGain.h"
+#include "AudioPort.h"
+#include "AudioProfile.h"
+#include "HwModule.h"
+#include "TypeConverter.h"
 
 namespace android {
 
+ChannelsVector ChannelsVector::asInMask() const
+{
+    ChannelsVector inMaskVector;
+    for (const auto& channel : *this) {
+        if (audio_channel_mask_out_to_in(channel) != AUDIO_CHANNEL_INVALID) {
+            inMaskVector.add(audio_channel_mask_out_to_in(channel));
+        }
+    }
+    return inMaskVector;
+}
+
+ChannelsVector ChannelsVector::asOutMask() const
+{
+    ChannelsVector outMaskVector;
+    for (const auto& channel : *this) {
+        if (audio_channel_mask_in_to_out(channel) != AUDIO_CHANNEL_INVALID) {
+            outMaskVector.add(audio_channel_mask_in_to_out(channel));
+        }
+    }
+    return outMaskVector;
+}
+
+bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
+{
+    return (left.getFormat() == compareTo.getFormat()) &&
+            (left.getChannels() == compareTo.getChannels()) &&
+            (left.getSampleRates() == compareTo.getSampleRates());
+}
+
 static AudioProfile* createFullDynamicImpl()
 {
     AudioProfile* dynamicProfile = new AudioProfile(gDynamicFormat,
@@ -45,6 +78,48 @@
     return dynamicProfile;
 }
 
+AudioProfile::AudioProfile(audio_format_t format,
+                           audio_channel_mask_t channelMasks,
+                           uint32_t samplingRate) :
+        mName(String8("")),
+        mFormat(format)
+{
+    mChannelMasks.add(channelMasks);
+    mSamplingRates.add(samplingRate);
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+                           const ChannelsVector &channelMasks,
+                           const SampleRateVector &samplingRateCollection) :
+        mName(String8("")),
+        mFormat(format),
+        mChannelMasks(channelMasks),
+        mSamplingRates(samplingRateCollection) {}
+
+void AudioProfile::setChannels(const ChannelsVector &channelMasks)
+{
+    if (mIsDynamicChannels) {
+        mChannelMasks = channelMasks;
+    }
+}
+
+void AudioProfile::setSampleRates(const SampleRateVector &sampleRates)
+{
+    if (mIsDynamicRate) {
+        mSamplingRates = sampleRates;
+    }
+}
+
+void AudioProfile::clear()
+{
+    if (mIsDynamicChannels) {
+        mChannelMasks.clear();
+    }
+    if (mIsDynamicRate) {
+        mSamplingRates.clear();
+    }
+}
+
 status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
                                   audio_format_t format) const
 {
@@ -56,27 +131,6 @@
     return BAD_VALUE;
 }
 
-template <typename T>
-bool operator == (const SortedVector<T> &left, const SortedVector<T> &right)
-{
-    if (left.size() != right.size()) {
-        return false;
-    }
-    for(size_t index = 0; index < right.size(); index++) {
-        if (left[index] != right[index]) {
-            return false;
-        }
-    }
-    return true;
-}
-
-bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
-{
-    return (left.getFormat() == compareTo.getFormat()) &&
-            (left.getChannels() == compareTo.getChannels()) &&
-            (left.getSampleRates() == compareTo.getSampleRates());
-}
-
 status_t AudioProfile::checkCompatibleSamplingRate(uint32_t samplingRate,
                                                    uint32_t &updatedSamplingRate) const
 {
@@ -242,6 +296,50 @@
     write(fd, result.string(), result.size());
 }
 
+ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
+{
+    ssize_t index = Vector::add(profile);
+    // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
+    // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
+    // [](const audio_format_t *format1, const audio_format_t *format2) {
+    //     return compareFormats(*format1, *format2);
+    // }
+    sort(compareFormats);
+    return index;
+}
+
+ssize_t AudioProfileVector::addProfileFromHal(const sp<AudioProfile> &profileToAdd)
+{
+    // Check valid profile to add:
+    if (!profileToAdd->hasValidFormat()) {
+        return -1;
+    }
+    if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+        FormatVector formats;
+        formats.add(profileToAdd->getFormat());
+        setFormats(FormatVector(formats));
+        return 0;
+    }
+    if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
+        setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
+        return 0;
+    }
+    if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+        setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
+        return 0;
+    }
+    // Go through the list of profile to avoid duplicates
+    for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
+        const sp<AudioProfile> &profile = itemAt(profileIndex);
+        if (profile->isValid() && profile == profileToAdd) {
+            // Nothing to do
+            return profileIndex;
+        }
+    }
+    profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
+    return add(profileToAdd);
+}
+
 status_t AudioProfileVector::checkExactProfile(uint32_t samplingRate,
                                                audio_channel_mask_t channelMask,
                                                audio_format_t format) const
@@ -298,6 +396,233 @@
     return BAD_VALUE;
 }
 
+void AudioProfileVector::clearProfiles()
+{
+    for (size_t i = size(); i != 0; ) {
+        sp<AudioProfile> profile = itemAt(--i);
+        if (profile->isDynamicFormat() && profile->hasValidFormat()) {
+            removeAt(i);
+            continue;
+        }
+        profile->clear();
+    }
+}
+
+// Returns an intersection between two possibly unsorted vectors and the contents of 'order'.
+// The result is ordered according to 'order'.
+template<typename T, typename Order>
+std::vector<typename T::value_type> intersectFilterAndOrder(
+        const T& input1, const T& input2, const Order& order)
+{
+    std::set<typename T::value_type> set1{input1.begin(), input1.end()};
+    std::set<typename T::value_type> set2{input2.begin(), input2.end()};
+    std::set<typename T::value_type> common;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+            std::inserter(common, common.begin()));
+    std::vector<typename T::value_type> result;
+    for (const auto& e : order) {
+        if (common.find(e) != common.end()) result.push_back(e);
+    }
+    return result;
+}
+
+// Intersect two possibly unsorted vectors, return common elements according to 'comp' ordering.
+// 'comp' is a comparator function.
+template<typename T, typename Compare>
+std::vector<typename T::value_type> intersectAndOrder(
+        const T& input1, const T& input2, Compare comp)
+{
+    std::set<typename T::value_type, Compare> set1{input1.begin(), input1.end(), comp};
+    std::set<typename T::value_type, Compare> set2{input2.begin(), input2.end(), comp};
+    std::vector<typename T::value_type> result;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+            std::back_inserter(result), comp);
+    return result;
+}
+
+status_t AudioProfileVector::findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
+            const std::vector<audio_format_t>& preferredFormats,
+            const std::vector<audio_channel_mask_t>& preferredOutputChannels,
+            bool preferHigherSamplingRates,
+            audio_config_base *bestOutputConfig) const
+{
+    auto formats = intersectFilterAndOrder(getSupportedFormats(),
+            outputProfiles.getSupportedFormats(), preferredFormats);
+    // Pick the best compatible profile.
+    for (const auto& f : formats) {
+        sp<AudioProfile> inputProfile = getFirstValidProfileFor(f);
+        sp<AudioProfile> outputProfile = outputProfiles.getFirstValidProfileFor(f);
+        if (inputProfile == nullptr || outputProfile == nullptr) {
+            continue;
+        }
+        auto channels = intersectFilterAndOrder(inputProfile->getChannels().asOutMask(),
+                outputProfile->getChannels(), preferredOutputChannels);
+        if (channels.empty()) {
+            continue;
+        }
+        auto sampleRates = preferHigherSamplingRates ?
+                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+                        std::greater<typename SampleRateVector::value_type>()) :
+                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+                        std::less<typename SampleRateVector::value_type>());
+        if (sampleRates.empty()) {
+            continue;
+        }
+        ALOGD("%s() found channel mask %#x and sample rate %d for format %#x.",
+                __func__, *channels.begin(), *sampleRates.begin(), f);
+        bestOutputConfig->format = f;
+        bestOutputConfig->sample_rate = *sampleRates.begin();
+        bestOutputConfig->channel_mask = *channels.begin();
+        return NO_ERROR;
+    }
+    return BAD_VALUE;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfile() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->isValid()) {
+            return itemAt(i);
+        }
+    }
+    return 0;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfileFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->isValid() && itemAt(i)->getFormat() == format) {
+            return itemAt(i);
+        }
+    }
+    return 0;
+}
+
+FormatVector AudioProfileVector::getSupportedFormats() const
+{
+    FormatVector supportedFormats;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->hasValidFormat()) {
+            supportedFormats.add(itemAt(i)->getFormat());
+        }
+    }
+    return supportedFormats;
+}
+
+bool AudioProfileVector::hasDynamicChannelsFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicChannels()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioProfileVector::hasDynamicProfile() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->isDynamic()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioProfileVector::hasDynamicRateFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicRate()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void AudioProfileVector::setFormats(const FormatVector &formats)
+{
+    // Only allow to change the format of dynamic profile
+    sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
+    if (dynamicFormatProfile == 0) {
+        return;
+    }
+    for (size_t i = 0; i < formats.size(); i++) {
+        sp<AudioProfile> profile = new AudioProfile(formats[i],
+                dynamicFormatProfile->getChannels(),
+                dynamicFormatProfile->getSampleRates());
+        profile->setDynamicFormat(true);
+        profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
+        profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
+        add(profile);
+    }
+}
+
+void AudioProfileVector::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "%*s- Profiles:\n", spaces, "");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "%*sProfile %zu:", spaces + 4, "", i);
+        write(fd, buffer, strlen(buffer));
+        itemAt(i)->dump(fd, spaces + 8);
+    }
+}
+
+sp<AudioProfile> AudioProfileVector::getProfileFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->getFormat() == format) {
+            return itemAt(i);
+        }
+    }
+    return 0;
+}
+
+void AudioProfileVector::setSampleRatesFor(
+        const SampleRateVector &sampleRates, audio_format_t format)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicRate()) {
+            if (profile->hasValidRates()) {
+                // Need to create a new profile with same format
+                sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
+                        sampleRates);
+                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                add(profileToAdd);
+            } else {
+                profile->setSampleRates(sampleRates);
+            }
+            return;
+        }
+    }
+}
+
+void AudioProfileVector::setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicChannels()) {
+            if (profile->hasValidChannels()) {
+                // Need to create a new profile with same format
+                sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
+                        profile->getSampleRates());
+                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                add(profileToAdd);
+            } else {
+                profile->setChannels(channelMasks);
+            }
+            return;
+        }
+    }
+}
+
+// static
 int AudioProfileVector::compareFormats(const sp<AudioProfile> *profile1,
                                        const sp<AudioProfile> *profile2)
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index 91dee35..5ea4c92 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -35,14 +35,11 @@
                            audio_channel_mask_t channelMask,
                            audio_input_flags_t flags,
                            uid_t uid,
-                           bool isSoundTrigger,
-                           AudioMix* policyMix,
-                           AudioPolicyClientInterface *clientInterface) :
+                           bool isSoundTrigger) :
     mRecordClientInfo({ .uid = uid, .session = session, .source = inputSource}),
     mConfig({ .format = format, .sample_rate = sampleRate, .channel_mask = channelMask}),
     mFlags(flags), mIsSoundTrigger(isSoundTrigger),
-    mOpenCount(1), mActiveCount(0), mPolicyMix(policyMix), mClientInterface(clientInterface),
-    mInfoProvider(NULL)
+    mOpenCount(1), mActiveCount(0)
 {
 }
 
@@ -60,7 +57,6 @@
 
 uint32_t AudioSession::changeActiveCount(int delta)
 {
-    const uint32_t oldActiveCount = mActiveCount;
     if ((delta + (int)mActiveCount) < 0) {
         ALOGW("%s invalid delta %d, active count %d",
               __FUNCTION__, delta, mActiveCount);
@@ -68,34 +64,6 @@
     }
     mActiveCount += delta;
     ALOGV("%s active count %d", __FUNCTION__, mActiveCount);
-    int event = RECORD_CONFIG_EVENT_NONE;
-
-    if ((oldActiveCount == 0) && (mActiveCount > 0)) {
-        event = RECORD_CONFIG_EVENT_START;
-    } else if ((oldActiveCount > 0) && (mActiveCount == 0)) {
-        event = RECORD_CONFIG_EVENT_STOP;
-    }
-
-    if (event != RECORD_CONFIG_EVENT_NONE) {
-        // Dynamic policy callback:
-        // if input maps to a dynamic policy with an activity listener, notify of state change
-        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
-        {
-            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
-                    (event == RECORD_CONFIG_EVENT_START) ? MIX_STATE_MIXING : MIX_STATE_IDLE);
-        }
-
-        // Recording configuration callback:
-        const AudioIODescriptorInterface* provider = mInfoProvider;
-        const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
-                AUDIO_CONFIG_BASE_INITIALIZER;
-        const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
-                AUDIO_PATCH_HANDLE_NONE;
-        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-            mClientInterface->onRecordingConfigurationUpdate(event, &mRecordClientInfo,
-                    &mConfig, &deviceConfig, patchHandle);
-        }
-    }
 
     return mActiveCount;
 }
@@ -114,27 +82,6 @@
     return false;
 }
 
-void AudioSession::setInfoProvider(AudioIODescriptorInterface *provider)
-{
-    mInfoProvider = provider;
-}
-
-void AudioSession::onIODescriptorUpdate() const
-{
-    if (mActiveCount > 0) {
-        // resend the callback after requerying the informations from the info provider
-        const AudioIODescriptorInterface* provider = mInfoProvider;
-        const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
-                AUDIO_CONFIG_BASE_INITIALIZER;
-        const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
-                AUDIO_PATCH_HANDLE_NONE;
-        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-            mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
-                    &mRecordClientInfo, &mConfig, &deviceConfig, patchHandle);
-        }
-    }
-}
-
 status_t AudioSession::dump(int fd, int spaces, int index) const
 {
     const size_t SIZE = 256;
@@ -169,8 +116,7 @@
 }
 
 status_t AudioSessionCollection::addSession(audio_session_t session,
-                                         const sp<AudioSession>& audioSession,
-                                         AudioIODescriptorInterface *provider)
+                                         const sp<AudioSession>& audioSession)
 {
     ssize_t index = indexOfKey(session);
 
@@ -178,7 +124,6 @@
         ALOGW("addSession() session %d already in", session);
         return ALREADY_EXISTS;
     }
-    audioSession->setInfoProvider(provider);
     add(session, audioSession);
     ALOGV("addSession() session %d  client %d source %d",
             session, audioSession->uid(), audioSession->inputSource());
@@ -194,7 +139,6 @@
         return ALREADY_EXISTS;
     }
     ALOGV("removeSession() session %d", session);
-    valueAt(index)->setInfoProvider(NULL);
     removeItemsAt(index);
     return NO_ERROR;
 }
@@ -271,13 +215,6 @@
     return source;
 }
 
-void AudioSessionCollection::onIODescriptorUpdate() const
-{
-    for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->onIODescriptorUpdate();
-    }
-}
-
 status_t AudioSessionCollection::dump(int fd, int spaces) const
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
deleted file mode 100644
index ba33e57..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioSourceDescriptor"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/String8.h>
-#include <media/AudioPolicyHelper.h>
-#include <HwModule.h>
-#include <AudioGain.h>
-#include <AudioSourceDescriptor.h>
-#include <DeviceDescriptor.h>
-#include <IOProfile.h>
-#include <AudioOutputDescriptor.h>
-
-namespace android {
-
-status_t AudioSourceDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "mStream: %d\n", audio_attributes_to_stream_type(&mAttributes));
-    result.append(buffer);
-    snprintf(buffer, SIZE, "mDevice:\n");
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-    mDevice->dump(fd, 2 , 0);
-    return NO_ERROR;
-}
-
-
-status_t AudioSourceCollection::dump(int fd) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-
-    snprintf(buffer, SIZE, "\nAudio sources dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < size(); i++) {
-        snprintf(buffer, SIZE, "- Source %d dump:\n", keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        valueAt(i)->dump(fd);
-    }
-
-    return NO_ERROR;
-}
-
-}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
new file mode 100644
index 0000000..5aca3cc
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM_ClientDescriptor"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include "AudioGain.h"
+#include "AudioOutputDescriptor.h"
+#include "AudioPatch.h"
+#include "ClientDescriptor.h"
+#include "DeviceDescriptor.h"
+#include "HwModule.h"
+#include "IOProfile.h"
+
+namespace android {
+
+status_t ClientDescriptor::dump(int fd, int spaces, int index)
+{
+    String8 out;
+
+    // FIXME: use until other descriptor classes have a dump to String8 method
+    mDumpFd = fd;
+
+    status_t status = dump(out, spaces, index);
+    if (status == NO_ERROR) {
+        write(fd, out.string(), out.size());
+    }
+
+    return status;
+}
+
+status_t ClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    out.appendFormat("%*sClient %d:\n", spaces, "", index+1);
+    out.appendFormat("%*s- Port ID: %d Session Id: %d UID: %d\n", spaces, "",
+             mPortId, mSessionId, mUid);
+    out.appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
+             mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
+    out.appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
+    out.appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+    return NO_ERROR;
+}
+
+status_t TrackClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    ClientDescriptor::dump(out, spaces, index);
+
+    out.appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
+
+    return NO_ERROR;
+}
+
+status_t RecordClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    ClientDescriptor::dump(out, spaces, index);
+
+    out.appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+
+    return NO_ERROR;
+}
+
+SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
+         audio_attributes_t attributes, const sp<AudioPatch>& patchDesc,
+         const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream) :
+    TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
+        AUDIO_CONFIG_BASE_INITIALIZER, AUDIO_PORT_HANDLE_NONE, stream, AUDIO_OUTPUT_FLAG_NONE),
+        mPatchDesc(patchDesc), mSrcDevice(srcDevice)
+{
+}
+
+void SourceClientDescriptor::setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput)
+{
+    mSwOutput = swOutput;
+}
+
+void SourceClientDescriptor::setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput)
+{
+    mHwOutput = hwOutput;
+}
+
+status_t SourceClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    TrackClientDescriptor::dump(out, spaces, index);
+
+    if (mDumpFd >= 0) {
+        out.appendFormat("%*s- Device:\n", spaces, "");
+        write(mDumpFd, out.string(), out.size());
+
+        mSrcDevice->dump(mDumpFd, 2, 0);
+        mDumpFd = -1;
+    }
+
+    return NO_ERROR;
+}
+
+status_t SourceClientCollection::dump(int fd) const
+{
+    String8 out;
+    out.append("\nAudio sources:\n");
+    write(fd, out.string(), out.size());
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->dump(fd, 2, i);
+    }
+
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index d3cc8b9..1638645 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -121,17 +121,28 @@
     return ret;
 }
 
-audio_devices_t DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
+DeviceVector DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
 {
-    audio_devices_t devices = AUDIO_DEVICE_NONE;
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->getModuleHandle() == moduleHandle) {
-            devices |= itemAt(i)->type();
+    DeviceVector devices;
+    for (const auto& device : *this) {
+        if (device->getModuleHandle() == moduleHandle) {
+            devices.add(device);
         }
     }
     return devices;
 }
 
+audio_devices_t DeviceVector::getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const
+{
+    audio_devices_t deviceTypes = AUDIO_DEVICE_NONE;
+    for (const auto& device : *this) {
+        if (device->getModuleHandle() == moduleHandle) {
+            deviceTypes |= device->type();
+        }
+    }
+    return deviceTypes;
+}
+
 sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address) const
 {
     sp<DeviceDescriptor> device;
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
index 38ab560..440a4e7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "APM::SessionRoute"
+#define LOG_TAG "APM_SessionRoute"
 //#define LOG_NDEBUG 0
 
 #include "SessionRoute.h"
@@ -122,19 +122,17 @@
 audio_devices_t SessionRouteMap::getActiveDeviceForStream(audio_stream_type_t streamType,
                                                           const DeviceVector& availableDevices)
 {
-    audio_devices_t device = AUDIO_DEVICE_NONE;
-
     for (size_t index = 0; index < size(); index++) {
         sp<SessionRoute> route = valueAt(index);
         if (streamType == route->mStreamType && route->isActiveOrChanged()
                 && route->mDeviceDescriptor != 0) {
-            device = route->mDeviceDescriptor->type();
+            audio_devices_t device = route->mDeviceDescriptor->type();
             if (!availableDevices.getDevicesFromTypeMask(device).isEmpty()) {
-                break;
+                return device;
             }
         }
     }
-    return device;
+    return AUDIO_DEVICE_NONE;
 }
 
 } // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 007eea0..30f275f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -313,7 +313,7 @@
             audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
             sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
             audio_devices_t availPrimaryInputDevices =
-                 availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
+                 availableInputDevices.getDeviceTypesFromHwModule(primaryOutput->getModuleHandle());
 
             // TODO: getPrimaryOutput return only devices from first module in
             // audio_policy_configuration.xml, hearing aid is not there, but it's
@@ -408,8 +408,7 @@
 
     case STRATEGY_SONIFICATION:
 
-        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
-        // handleIncallSonification().
+        // If incall, just select the STRATEGY_PHONE device
         if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
             device = getDeviceForStrategyInt(
                     STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
@@ -669,9 +668,8 @@
         if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
                 (availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
             sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
-            availableDeviceTypes =
-                    availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle())
-                    & ~AUDIO_DEVICE_BIT_IN;
+            availableDeviceTypes = availableInputDevices.getDeviceTypesFromHwModule(
+                    primaryOutput->getModuleHandle()) & ~AUDIO_DEVICE_BIT_IN;
         }
 
         switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
@@ -726,6 +724,9 @@
             device = AUDIO_DEVICE_IN_BACK_MIC;
         } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
             device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+            // This is specifically for a device without built-in mic
+            device = AUDIO_DEVICE_IN_USB_DEVICE;
         }
         break;
     case AUDIO_SOURCE_VOICE_DOWNLINK:
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 958ac30..2a8b397 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -31,6 +31,7 @@
 
 #include <inttypes.h>
 #include <math.h>
+#include <vector>
 
 #include <AudioPolicyManagerInterface.h>
 #include <AudioPolicyEngineInstance.h>
@@ -38,7 +39,6 @@
 #include <utils/Log.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicyHelper.h>
-#include <media/PatchBuilder.h>
 #include <private/android_filesystem_config.h>
 #include <soundtrigger/SoundTrigger.h>
 #include <system/audio.h>
@@ -82,6 +82,16 @@
     AUDIO_FORMAT_AAC_XHE,
 };
 
+// Compressed formats for MSD module, ordered from most preferred to least preferred.
+static const std::vector<audio_format_t> compressedFormatsOrder = {{
+        AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
+        AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
+// Channel masks for MSD module, 3D > 2D > 1D ordering (most preferred to least preferred).
+static const std::vector<audio_channel_mask_t> surroundChannelMasksOrder = {{
+        AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
+        AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
+        AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
+
 // ----------------------------------------------------------------------------
 // AudioPolicyInterface implementation
 // ----------------------------------------------------------------------------
@@ -202,31 +212,30 @@
             return BAD_VALUE;
         }
 
-        // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
-        // output is suspended before any tracks are moved to it
-        checkA2dpSuspend();
-        checkOutputForAllStrategies();
-        // outputs must be closed after checkOutputForAllStrategies() is executed
-        if (!outputs.isEmpty()) {
-            for (audio_io_handle_t output : outputs) {
-                sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
-                // close unused outputs after device disconnection or direct outputs that have been
-                // opened by checkOutputsForDevice() to query dynamic parameters
-                if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
-                        (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
-                         (desc->mDirectOpenCount == 0))) {
-                    closeOutput(output);
+        checkForDeviceAndOutputChanges([&]() {
+            // outputs must be closed after checkOutputForAllStrategies() is executed
+            if (!outputs.isEmpty()) {
+                for (audio_io_handle_t output : outputs) {
+                    sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+                    // close unused outputs after device disconnection or direct outputs that have been
+                    // opened by checkOutputsForDevice() to query dynamic parameters
+                    if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
+                            (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+                             (desc->mDirectOpenCount == 0))) {
+                        closeOutput(output);
+                    }
                 }
+                // check A2DP again after closing A2DP output to reset mA2dpSuspended if needed
+                return true;
             }
-            // check again after closing A2DP output to reset mA2dpSuspended if needed
-            checkA2dpSuspend();
-        }
+            return false;
+        });
 
-        updateDevicesAndOutputs();
         if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
             audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
             updateCallRouting(newDevice);
         }
+        const audio_devices_t msdOutDevice = getMsdAudioOutDeviceTypes();
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
@@ -234,7 +243,8 @@
                 // do not force device change on duplicated output because if device is 0, it will
                 // also force a device 0 for the two outputs it is duplicated to which may override
                 // a valid device selection on those outputs.
-                bool force = !desc->isDuplicated()
+                bool force = (msdOutDevice == AUDIO_DEVICE_NONE || msdOutDevice != desc->device())
+                        && !desc->isDuplicated()
                         && (!device_distinguishes_on_address(device)
                                 // always force when disconnecting (a non-duplicated device)
                                 || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
@@ -507,13 +517,7 @@
         // symmetric to the one in startInput()
         for (const auto& activeDesc : mInputs.getActiveInputs()) {
             if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
-                AudioSessionCollection activeSessions =
-                        activeDesc->getAudioSessions(true /*activeOnly*/);
-                for (size_t j = 0; j < activeSessions.size(); j++) {
-                    audio_session_t activeSession = activeSessions.keyAt(j);
-                    stopInput(activeDesc->mIoHandle, activeSession);
-                    releaseInput(activeDesc->mIoHandle, activeSession);
-                }
+                closeSessions(activeDesc, true  /*activeOnly*/);
             }
         }
     }
@@ -533,7 +537,7 @@
 }
 
 sp<DeviceDescriptor> AudioPolicyManager::findDevice(
-        const DeviceVector& devices, audio_devices_t device) {
+        const DeviceVector& devices, audio_devices_t device) const {
     DeviceVector deviceList = devices.getDevicesFromTypeMask(device);
     ALOG_ASSERT(!deviceList.isEmpty(),
             "%s() selected device type %#x is not in devices list", __func__, device);
@@ -551,14 +555,8 @@
         return;
     }
     /// Opens: can these line be executed after the switch of volume curves???
-    // if leaving call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(oldState)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-            handleIncallSonification((audio_stream_type_t)stream, false, true);
-        }
-
         // force reevaluating accessibility routing when call stops
         mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
     }
@@ -571,9 +569,7 @@
                   || (is_state_in_call(state) && (state != oldState)));
 
     // check for device and output changes triggered by new phone state
-    checkA2dpSuspend();
-    checkOutputForAllStrategies();
-    updateDevicesAndOutputs();
+    checkForDeviceAndOutputChanges();
 
     int delayMs = 0;
     if (isStateInCall(state)) {
@@ -637,14 +633,8 @@
         }
     }
 
-    // if entering in call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(state)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-            handleIncallSonification((audio_stream_type_t)stream, true, true);
-        }
-
         // force reevaluating accessibility routing when call starts
         mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
     }
@@ -679,9 +669,7 @@
             (usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
 
     // check for device and output changes triggered by new force usage
-    checkA2dpSuspend();
-    checkOutputForAllStrategies();
-    updateDevicesAndOutputs();
+    checkForDeviceAndOutputChanges();
 
     //FIXME: workaround for truncated touch sounds
     // to be removed when the problem is handled by system UI
@@ -797,6 +785,12 @@
                                               audio_port_handle_t *portId)
 {
     audio_attributes_t attributes;
+    DeviceVector outputDevices;
+    routing_strategy strategy;
+    audio_devices_t device;
+    audio_port_handle_t requestedDeviceId = *selectedDeviceId;
+    audio_devices_t msdDevice = getMsdAudioOutDeviceTypes();
+
     if (attr != NULL) {
         if (!isValidAttributes(attr)) {
             ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
@@ -813,63 +807,116 @@
         stream_type_to_audio_attributes(*stream, &attributes);
     }
 
-    // TODO: check for existing client for this port ID
-    if (*portId == AUDIO_PORT_HANDLE_NONE) {
-        *portId = AudioPort::getNextUniqueId();
-    }
-
-    sp<SwAudioOutputDescriptor> desc;
-    if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
-        ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
-        if (!audio_has_proportional_frames(config->format)) {
-            return BAD_VALUE;
-        }
-        *stream = streamTypefromAttributesInt(&attributes);
-        *output = desc->mIoHandle;
-        ALOGV("getOutputForAttr() returns output %d", *output);
-        return NO_ERROR;
-    }
-    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
-        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
-        return BAD_VALUE;
-    }
-
     ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
             " session %d selectedDeviceId %d",
             attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
             session, *selectedDeviceId);
 
-    *stream = streamTypefromAttributesInt(&attributes);
+    // TODO: check for existing client for this port ID
+    if (*portId == AUDIO_PORT_HANDLE_NONE) {
+        *portId = AudioPort::getNextUniqueId();
+    }
 
-    // Explicit routing?
+    // First check for explicit routing (eg. setPreferredDevice)
     sp<DeviceDescriptor> deviceDesc;
     if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
         deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
+    } else {
+        // If no explict route, is there a matching dynamic policy that applies?
+        sp<SwAudioOutputDescriptor> desc;
+        if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
+            ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+            if (!audio_has_proportional_frames(config->format)) {
+                return BAD_VALUE;
+            }
+            *stream = streamTypefromAttributesInt(&attributes);
+            *output = desc->mIoHandle;
+            ALOGV("getOutputForAttr() returns output %d", *output);
+            goto exit;
+        }
+
+        // Virtual sources must always be dynamicaly or explicitly routed
+        if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+            ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+            return BAD_VALUE;
+        }
     }
+
+    // Virtual sources must always be dynamicaly or explicitly routed
+    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+        return BAD_VALUE;
+    }
+
+    *stream = streamTypefromAttributesInt(&attributes);
+
+    // TODO:  Should this happen only if an explicit route is active?
+    // the previous code structure meant that this would always happen which
+    // would appear to result in adding a null deviceDesc when not using an
+    // explicit route.  Is that the intended and necessary behavior?
     mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
 
-    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    strategy = (routing_strategy) getStrategyForAttr(&attributes);
+    device = getDeviceForStrategy(strategy, false /*fromCache*/);
 
     if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
         *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
     }
 
+    // Set incall music only if device was explicitly set, and fallback to the device which is
+    // chosen by the engine if not.
+    // FIXME: provide a more generic approach which is not device specific and move this back
+    // to getOutputForDevice.
+    if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
+        *stream == AUDIO_STREAM_MUSIC &&
+        audio_is_linear_pcm(config->format) &&
+        isInCall()) {
+        if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
+            *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
+        } else {
+            device = mEngine->getDeviceForStrategy(strategy);
+        }
+    }
+
     ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
           "flags %#x",
           device, config->sample_rate, config->format, config->channel_mask, *flags);
 
-    *output = getOutputForDevice(device, session, *stream, config, flags);
+    *output = AUDIO_IO_HANDLE_NONE;
+    if (msdDevice != AUDIO_DEVICE_NONE) {
+        *output = getOutputForDevice(msdDevice, session, *stream, config, flags);
+        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
+            ALOGV("%s() Using MSD device 0x%x instead of device 0x%x",
+                    __func__, msdDevice, device);
+            device = msdDevice;
+        } else {
+            *output = AUDIO_IO_HANDLE_NONE;
+        }
+    }
+    if (*output == AUDIO_IO_HANDLE_NONE) {
+        *output = getOutputForDevice(device, session, *stream, config, flags);
+    }
     if (*output == AUDIO_IO_HANDLE_NONE) {
         mOutputRoutes.removeRoute(session);
         return INVALID_OPERATION;
     }
 
-    DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
+    outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
     *selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
             : AUDIO_PORT_HANDLE_NONE;
 
-    ALOGV("  getOutputForAttr() returns output %d selectedDeviceId %d", *output, *selectedDeviceId);
+exit:
+    audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
+        .format = config->format,
+        .channel_mask = config->channel_mask };
+    sp<TrackClientDescriptor> clientDesc =
+        new TrackClientDescriptor(*portId, uid, session,
+                                  attributes, clientConfig, requestedDeviceId, *stream, *flags);
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
+    outputDesc->clients().emplace(*portId, clientDesc);
+
+    ALOGV("  getOutputForAttr() returns output %d selectedDeviceId %d for port ID %d",
+          *output, *selectedDeviceId, *portId);
 
     return NO_ERROR;
 }
@@ -910,11 +957,6 @@
         *flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
                                        AUDIO_OUTPUT_FLAG_DIRECT);
         ALOGV("Set VoIP and Direct output flags for PCM format");
-    } else if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
-        stream == AUDIO_STREAM_MUSIC &&
-        audio_is_linear_pcm(config->format) &&
-        isInCall()) {
-        *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
     }
 
 
@@ -1036,6 +1078,164 @@
     return output;
 }
 
+sp<DeviceDescriptor> AudioPolicyManager::getMsdAudioInDevice() const {
+    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+    if (msdModule != 0) {
+        DeviceVector msdInputDevices = mAvailableInputDevices.getDevicesFromHwModule(
+                msdModule->getHandle());
+        if (!msdInputDevices.isEmpty()) return msdInputDevices.itemAt(0);
+    }
+    return 0;
+}
+
+audio_devices_t AudioPolicyManager::getMsdAudioOutDeviceTypes() const {
+    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+    if (msdModule != 0) {
+        return mAvailableOutputDevices.getDeviceTypesFromHwModule(msdModule->getHandle());
+    }
+    return AUDIO_DEVICE_NONE;
+}
+
+const AudioPatchCollection AudioPolicyManager::getMsdPatches() const {
+    AudioPatchCollection msdPatches;
+    audio_module_handle_t msdModuleHandle = mHwModules.getModuleFromName(
+            AUDIO_HARDWARE_MODULE_ID_MSD)->getHandle();
+    if (msdModuleHandle == AUDIO_MODULE_HANDLE_NONE) return msdPatches;
+    for (size_t i = 0; i < mAudioPatches.size(); ++i) {
+        sp<AudioPatch> patch = mAudioPatches.valueAt(i);
+        for (size_t j = 0; j < patch->mPatch.num_sources; ++j) {
+            const struct audio_port_config *source = &patch->mPatch.sources[j];
+            if (source->type == AUDIO_PORT_TYPE_DEVICE &&
+                    source->ext.device.hw_module == msdModuleHandle) {
+                msdPatches.addAudioPatch(patch->mHandle, patch);
+            }
+        }
+    }
+    return msdPatches;
+}
+
+status_t AudioPolicyManager::getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+        bool hwAvSync, audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
+{
+    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+    if (msdModule == nullptr) {
+        ALOGE("%s() unable to get MSD module", __func__);
+        return NO_INIT;
+    }
+    sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice);
+    if (deviceModule == nullptr) {
+        ALOGE("%s() unable to get module for %#x", __func__, outputDevice);
+        return NO_INIT;
+    }
+    const InputProfileCollection &inputProfiles = msdModule->getInputProfiles();
+    if (inputProfiles.isEmpty()) {
+        ALOGE("%s() no input profiles for MSD module", __func__);
+        return NO_INIT;
+    }
+    const OutputProfileCollection &outputProfiles = deviceModule->getOutputProfiles();
+    if (outputProfiles.isEmpty()) {
+        ALOGE("%s() no output profiles for device %#x", __func__, outputDevice);
+        return NO_INIT;
+    }
+    AudioProfileVector msdProfiles;
+    // Each IOProfile represents a MixPort from audio_policy_configuration.xml
+    for (const auto &inProfile : inputProfiles) {
+        if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0)) {
+            msdProfiles.appendVector(inProfile->getAudioProfiles());
+        }
+    }
+    AudioProfileVector deviceProfiles;
+    for (const auto &outProfile : outputProfiles) {
+        if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) {
+            deviceProfiles.appendVector(outProfile->getAudioProfiles());
+        }
+    }
+    struct audio_config_base bestSinkConfig;
+    status_t result = msdProfiles.findBestMatchingOutputConfig(deviceProfiles,
+            compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/,
+            &bestSinkConfig);
+    if (result != NO_ERROR) {
+        ALOGD("%s() no matching profiles found for device: %#x, hwAvSync: %d",
+                __func__, outputDevice, hwAvSync);
+        return result;
+    }
+    sinkConfig->sample_rate = bestSinkConfig.sample_rate;
+    sinkConfig->channel_mask = bestSinkConfig.channel_mask;
+    sinkConfig->format = bestSinkConfig.format;
+    // For encoded streams force direct flag to prevent downstream mixing.
+    sinkConfig->flags.output = static_cast<audio_output_flags_t>(
+            sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_DIRECT);
+    sourceConfig->sample_rate = bestSinkConfig.sample_rate;
+    // Specify exact channel mask to prevent guessing by bit count in PatchPanel.
+    sourceConfig->channel_mask = audio_channel_mask_out_to_in(bestSinkConfig.channel_mask);
+    sourceConfig->format = bestSinkConfig.format;
+    // Copy input stream directly without any processing (e.g. resampling).
+    sourceConfig->flags.input = static_cast<audio_input_flags_t>(
+            sourceConfig->flags.input | AUDIO_INPUT_FLAG_DIRECT);
+    if (hwAvSync) {
+        sinkConfig->flags.output = static_cast<audio_output_flags_t>(
+                sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+        sourceConfig->flags.input = static_cast<audio_input_flags_t>(
+                sourceConfig->flags.input | AUDIO_INPUT_FLAG_HW_AV_SYNC);
+    }
+    const unsigned int config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE |
+            AUDIO_PORT_CONFIG_CHANNEL_MASK | AUDIO_PORT_CONFIG_FORMAT | AUDIO_PORT_CONFIG_FLAGS;
+    sinkConfig->config_mask |= config_mask;
+    sourceConfig->config_mask |= config_mask;
+    return NO_ERROR;
+}
+
+PatchBuilder AudioPolicyManager::buildMsdPatch(audio_devices_t outputDevice) const
+{
+    PatchBuilder patchBuilder;
+    patchBuilder.addSource(getMsdAudioInDevice()).
+            addSink(findDevice(mAvailableOutputDevices, outputDevice));
+    audio_port_config sourceConfig = patchBuilder.patch()->sources[0];
+    audio_port_config sinkConfig = patchBuilder.patch()->sinks[0];
+    // TODO: Figure out whether MSD module has HW_AV_SYNC flag set in the AP config file.
+    // For now, we just forcefully try with HwAvSync first.
+    status_t res = getBestMsdAudioProfileFor(outputDevice, true /*hwAvSync*/,
+            &sourceConfig, &sinkConfig) == NO_ERROR ? NO_ERROR :
+            getBestMsdAudioProfileFor(
+                    outputDevice, false /*hwAvSync*/, &sourceConfig, &sinkConfig);
+    if (res == NO_ERROR) {
+        // Found a matching profile for encoded audio. Re-create PatchBuilder with this config.
+        return (PatchBuilder()).addSource(sourceConfig).addSink(sinkConfig);
+    }
+    ALOGV("%s() no matching profile found. Fall through to default PCM patch"
+            " supporting PCM format conversion.", __func__);
+    return patchBuilder;
+}
+
+status_t AudioPolicyManager::setMsdPatch(audio_devices_t outputDevice) {
+    ALOGV("%s() for outputDevice %#x", __func__, outputDevice);
+    if (outputDevice == AUDIO_DEVICE_NONE) {
+        // Use media strategy for unspecified output device. This should only
+        // occur on checkForDeviceAndOutputChanges(). Device connection events may
+        // therefore invalidate explicit routing requests.
+        outputDevice = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+    }
+    PatchBuilder patchBuilder = buildMsdPatch(outputDevice);
+    const struct audio_patch* patch = patchBuilder.patch();
+    const AudioPatchCollection msdPatches = getMsdPatches();
+    if (!msdPatches.isEmpty()) {
+        LOG_ALWAYS_FATAL_IF(msdPatches.size() > 1,
+                "The current MSD prototype only supports one output patch");
+        sp<AudioPatch> currentPatch = msdPatches.valueAt(0);
+        if (audio_patches_are_equal(&currentPatch->mPatch, patch)) {
+            return NO_ERROR;
+        }
+        releaseAudioPatch(currentPatch->mHandle, mUidCached);
+    }
+    status_t status = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/,
+            patch, 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
+    ALOGE_IF(status != NO_ERROR, "%s() error %d creating MSD audio patch", __func__, status);
+    ALOGI_IF(status == NO_ERROR, "%s() Patch created from MSD_IN to "
+           "device:%#x (format:%#x channels:%#x samplerate:%d)", __func__, outputDevice,
+           patch->sources[0].format, patch->sources[0].channel_mask, patch->sources[0].sample_rate);
+    return status;
+}
+
 audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                                        audio_output_flags_t flags,
                                                        audio_format_t format)
@@ -1116,19 +1316,21 @@
     return outputs[0];
 }
 
-status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
-                                             audio_stream_type_t stream,
-                                             audio_session_t session)
+status_t AudioPolicyManager::startOutput(audio_port_handle_t portId)
 {
-    ALOGV("startOutput() output %d, stream %d, session %d",
-          output, stream, session);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("startOutput() unknown output %d", output);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+    if (outputDesc == 0) {
+        ALOGW("startOutput() no output for client %d", portId);
         return BAD_VALUE;
     }
+    sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+    audio_stream_type_t stream = client->stream();
+    audio_session_t session = client->session();
 
-    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+    ALOGV("startOutput() output %d, stream %d, session %d",
+          outputDesc->mIoHandle, stream, session);
 
     status_t status = outputDesc->start();
     if (status != NO_ERROR) {
@@ -1152,7 +1354,7 @@
     } else if (mOutputRoutes.getAndClearRouteChanged(session)) {
         newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
         if (newDevice != outputDesc->device()) {
-            checkStrategyRoute(getStrategy(stream), output);
+            checkStrategyRoute(getStrategy(stream), outputDesc->mIoHandle);
         }
     } else {
         newDevice = AUDIO_DEVICE_NONE;
@@ -1279,11 +1481,6 @@
         const uint32_t muteWaitMs =
                 setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
 
-        // handle special case for sonification while in call
-        if (isInCall()) {
-            handleIncallSonification(stream, true, false);
-        }
-
         // apply volume rules for current stream and device if necessary
         checkAndSetVolume(stream,
                           mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
@@ -1317,27 +1514,23 @@
         setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
     }
 
-    if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
-            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-        setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
-    }
-
     return NO_ERROR;
 }
 
-
-status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
-                                            audio_stream_type_t stream,
-                                            audio_session_t session)
+status_t AudioPolicyManager::stopOutput(audio_port_handle_t portId)
 {
-    ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("stopOutput() unknown output %d", output);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+    if (outputDesc == 0) {
+        ALOGW("stopOutput() no output for client %d", portId);
         return BAD_VALUE;
     }
+    sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+    audio_stream_type_t stream = client->stream();
+    audio_session_t session = client->session();
 
-    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+    ALOGV("stopOutput() output %d, stream %d, session %d", outputDesc->mIoHandle, stream, session);
 
     if (outputDesc->mRefCount[stream] == 1) {
         // Automatically disable the remote submix input when output is stopped on a
@@ -1378,11 +1571,6 @@
     // always handle stream stop, check which stream type is stopping
     handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
 
-    // handle special case for sonification while in call
-    if (isInCall()) {
-        handleIncallSonification(stream, false, false);
-    }
-
     if (outputDesc->mRefCount[stream] > 0) {
         // decrement usage count of this stream on the output
         outputDesc->changeRefCount(stream, -1);
@@ -1439,32 +1627,35 @@
     }
 }
 
-void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
-                                       audio_stream_type_t stream __unused,
-                                       audio_session_t session __unused)
+void AudioPolicyManager::releaseOutput(audio_port_handle_t portId)
 {
-    ALOGV("releaseOutput() %d", output);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("releaseOutput() releasing unknown output %d", output);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+    if (outputDesc == 0) {
+        ALOGW("releaseOutput() no output for client %d", portId);
         return;
     }
+    sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+    audio_session_t session = client->session();
+
+    ALOGV("releaseOutput() %d", outputDesc->mIoHandle);
 
     // Routing
     mOutputRoutes.removeRoute(session);
 
-    sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(index);
-    if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-        if (desc->mDirectOpenCount <= 0) {
+    if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+        if (outputDesc->mDirectOpenCount <= 0) {
             ALOGW("releaseOutput() invalid open count %d for output %d",
-                                                              desc->mDirectOpenCount, output);
+                  outputDesc->mDirectOpenCount, outputDesc->mIoHandle);
             return;
         }
-        if (--desc->mDirectOpenCount == 0) {
-            closeOutput(output);
+        if (--outputDesc->mDirectOpenCount == 0) {
+            closeOutput(outputDesc->mIoHandle);
             mpClientInterface->onAudioPortListUpdate();
         }
     }
+    outputDesc->clients().erase(portId);
 }
 
 
@@ -1489,6 +1680,9 @@
     audio_source_t inputSource = attr->source;
     AudioMix *policyMix = NULL;
     DeviceVector inputDevices;
+    sp<AudioInputDescriptor> inputDesc;
+    sp<RecordClientDescriptor> clientDesc;
+    audio_port_handle_t requestedDeviceId = *selectedDeviceId;
 
     if (inputSource == AUDIO_SOURCE_DEFAULT) {
         inputSource = AUDIO_SOURCE_MIC;
@@ -1544,7 +1738,7 @@
                 : AUDIO_PORT_HANDLE_NONE;
         ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session);
 
-        return NO_ERROR;
+        goto exit;
     }
 
     *input = AUDIO_IO_HANDLE_NONE;
@@ -1610,8 +1804,14 @@
     *selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
             : AUDIO_PORT_HANDLE_NONE;
 
-    ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d",
-            *input, *inputType, *selectedDeviceId);
+exit:
+    clientDesc = new RecordClientDescriptor(*portId, uid, session,
+                                  *attr, *config, requestedDeviceId, inputSource, flags);
+    inputDesc = mInputs.valueFor(*input);
+    inputDesc->clients().emplace(*portId, clientDesc);
+
+    ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d for port ID %d",
+            *input, *inputType, *selectedDeviceId, *portId);
 
     return NO_ERROR;
 
@@ -1693,74 +1893,7 @@
                                                      config->channel_mask,
                                                      flags,
                                                      uid,
-                                                     isSoundTrigger,
-                                                     policyMix, mpClientInterface);
-
-// FIXME: disable concurrent capture until UI is ready
-#if 0
-    // reuse an open input if possible
-    sp<AudioInputDescriptor> reusedInputDesc;
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
-        // reuse input if:
-        // - it shares the same profile
-        //      AND
-        // - it is not a reroute submix input
-        //      AND
-        // - it is: not used for sound trigger
-        //                OR
-        //          used for sound trigger and all clients use the same session ID
-        //
-        if ((profile == desc->mProfile) &&
-            (isSoundTrigger == desc->isSoundTrigger()) &&
-            !is_virtual_input_device(device)) {
-
-            sp<AudioSession> as = desc->getAudioSession(session);
-            if (as != 0) {
-                // do not allow unmatching properties on same session
-                if (as->matches(audioSession)) {
-                    as->changeOpenCount(1);
-                } else {
-                    ALOGW("getInputForDevice() record with different attributes"
-                          " exists for session %d", session);
-                    continue;
-                }
-            } else if (isSoundTrigger) {
-                continue;
-            }
-
-            // Reuse the already opened input stream on this profile if:
-            // - the new capture source is background OR
-            // - the path requested configurations match OR
-            // - the new source priority is less than the highest source priority on this input
-            // If the input stream cannot be reused, close it before opening a new stream
-            // on the same profile for the new client so that the requested path configuration
-            // can be selected.
-            if (!isConcurrentSource(inputSource) &&
-                    ((desc->mSamplingRate != samplingRate ||
-                    desc->mChannelMask != config->channel_mask ||
-                    !audio_formats_match(desc->mFormat, config->format)) &&
-                    (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
-                     source_priority(inputSource)))) {
-                reusedInputDesc = desc;
-                continue;
-            } else {
-                desc->addAudioSession(session, audioSession);
-                ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
-                return mInputs.keyAt(i);
-            }
-        }
-    }
-
-    if (reusedInputDesc != 0) {
-        AudioSessionCollection sessions = reusedInputDesc->getAudioSessions(false /*activeOnly*/);
-        for (size_t j = 0; j < sessions.size(); j++) {
-            audio_session_t currentSession = sessions.keyAt(j);
-            stopInput(reusedInputDesc->mIoHandle, currentSession);
-            releaseInput(reusedInputDesc->mIoHandle, currentSession);
-        }
-    }
-#endif
+                                                     isSoundTrigger);
 
     if (!profile->canOpenNewIo()) {
         return AUDIO_IO_HANDLE_NONE;
@@ -1881,44 +2014,32 @@
 }
 
 
-status_t AudioPolicyManager::startInput(audio_io_handle_t input,
-                                        audio_session_t session,
+status_t AudioPolicyManager::startInput(audio_port_handle_t portId,
                                         bool silenced,
                                         concurrency_type__mask_t *concurrency)
 {
+    *concurrency = API_INPUT_CONCURRENCY_NONE;
+
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+    if (inputDesc == 0) {
+        ALOGW("startInput() no input for client %d", portId);
+        return BAD_VALUE;
+    }
+    sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+    audio_session_t session = client->session();
+    audio_io_handle_t input = inputDesc->mIoHandle;
 
     ALOGV("AudioPolicyManager::startInput(input:%d, session:%d, silenced:%d, concurrency:%d)",
             input, session, silenced, *concurrency);
 
-    *concurrency = API_INPUT_CONCURRENCY_NONE;
-
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("startInput() unknown input %d", input);
-        return BAD_VALUE;
-    }
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-
     sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
     if (audioSession == 0) {
         ALOGW("startInput() unknown session %d on input %d", session, input);
         return BAD_VALUE;
     }
 
-// FIXME: disable concurrent capture until UI is ready
-#if 0
-    if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
-        ALOGW("startInput(%d) failed: other input already started", input);
-        return INVALID_OPERATION;
-    }
-
-    if (isInCall()) {
-        *concurrency |= API_INPUT_CONCURRENCY_CALL;
-    }
-    if (mInputs.activeInputsCountOnDevices() != 0) {
-        *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
-    }
-#else
     if (!is_virtual_input_device(inputDesc->mDevice)) {
         if (mCallTxPatch != 0 &&
             inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
@@ -1943,11 +2064,8 @@
                         true /*activeOnly*/);
                 sp<AudioSession> activeSession = activeSessions.valueAt(0);
                 if (activeSession->isSilenced()) {
-                    audio_io_handle_t activeInput = activeDesc->mIoHandle;
-                    audio_session_t activeSessionId = activeSession->session();
-                    stopInput(activeInput, activeSessionId);
-                    releaseInput(activeInput, activeSessionId);
-                    ALOGV("startInput(%d) stopping silenced input %d", input, activeInput);
+                    closeSession(activeDesc, activeSession);
+                    ALOGV("startInput() session %d stopping silenced session %d", session, activeSession->session());
                     activeInputs = mInputs.getActiveInputs();
                 }
             }
@@ -2001,27 +2119,24 @@
             if (activeSource == AUDIO_SOURCE_HOTWORD) {
                 AudioSessionCollection activeSessions =
                         activeDesc->getAudioSessions(true /*activeOnly*/);
-                audio_session_t activeSession = activeSessions.keyAt(0);
-                audio_io_handle_t activeHandle = activeDesc->mIoHandle;
+                sp<AudioSession> activeSession = activeSessions[0];
                 SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
                 *concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
-                sessions.add(activeSession);
+                sessions.add(activeSession->session());
                 inputDesc->setPreemptedSessions(sessions);
-                stopInput(activeHandle, activeSession);
-                releaseInput(activeHandle, activeSession);
+                closeSession(inputDesc, activeSession);
                 ALOGV("startInput(%d) for HOTWORD preempting HOTWORD input %d",
                       input, activeDesc->mIoHandle);
             }
         }
     }
-#endif
 
     // Make sure we start with the correct silence state
     audioSession->setSilenced(silenced);
 
     // increment activity count before calling getNewInputDevice() below as only active sessions
     // are considered for device selection
-    audioSession->changeActiveCount(1);
+    inputDesc->changeRefCount(session, 1);
 
     // Routing?
     mInputRoutes.incRouteActivity(session);
@@ -2035,7 +2150,7 @@
         status_t status = inputDesc->start();
         if (status != NO_ERROR) {
             mInputRoutes.decRouteActivity(session);
-            audioSession->changeActiveCount(-1);
+            inputDesc->changeRefCount(session, -1);
             return status;
         }
 
@@ -2077,29 +2192,29 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
-                                       audio_session_t session)
+status_t AudioPolicyManager::stopInput(audio_port_handle_t portId)
 {
-    ALOGV("stopInput() input %d", input);
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("stopInput() unknown input %d", input);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+    if (inputDesc == 0) {
+        ALOGW("stopInput() no input for client %d", portId);
         return BAD_VALUE;
     }
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+    sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+    audio_session_t session = client->session();
+    audio_io_handle_t input = inputDesc->mIoHandle;
+
+    ALOGV("stopInput() input %d", input);
 
     sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
-    if (index < 0) {
-        ALOGW("stopInput() unknown session %d on input %d", session, input);
-        return BAD_VALUE;
-    }
 
     if (audioSession->activeCount() == 0) {
         ALOGW("stopInput() input %d already stopped", input);
         return INVALID_OPERATION;
     }
 
-    audioSession->changeActiveCount(-1);
+    inputDesc->changeRefCount(session, -1);
 
     // Routing?
     mInputRoutes.decRouteActivity(session);
@@ -2148,22 +2263,24 @@
     return NO_ERROR;
 }
 
-void AudioPolicyManager::releaseInput(audio_io_handle_t input,
-                                      audio_session_t session)
+void AudioPolicyManager::releaseInput(audio_port_handle_t portId)
 {
-    ALOGV("releaseInput() %d", input);
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("releaseInput() releasing unknown input %d", input);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+    if (inputDesc == 0) {
+        ALOGW("releaseInput() no input for client %d", portId);
         return;
     }
+    sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+    audio_session_t session = client->session();
+    audio_io_handle_t input = inputDesc->mIoHandle;
+
+    ALOGV("releaseInput() %d", input);
 
     // Routing
     mInputRoutes.removeRoute(session);
 
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-    ALOG_ASSERT(inputDesc != 0);
-
     sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
     if (audioSession == 0) {
         ALOGW("releaseInput() unknown session %d on input %d", session, input);
@@ -2186,10 +2303,31 @@
     }
 
     closeInput(input);
+    inputDesc->clients().erase(portId);
     mpClientInterface->onAudioPortListUpdate();
     ALOGV("releaseInput() exit");
 }
 
+void AudioPolicyManager::closeSessions(const sp<AudioInputDescriptor>& input, bool activeOnly)
+{
+    AudioSessionCollection sessions = input->getAudioSessions(activeOnly /*activeOnly*/);
+    for (size_t i = 0; i < sessions.size(); i++) {
+        closeSession(input, sessions[i]);
+    }
+}
+
+void AudioPolicyManager::closeSession(const sp<AudioInputDescriptor>& input,
+                                      const sp<AudioSession>& session)
+{
+    RecordClientVector clients = input->getClientsForSession(session->session());
+
+    for (const auto& client : clients) {
+        stopInput(client->portId());
+        releaseInput(client->portId());
+    }
+}
+
+
 void AudioPolicyManager::closeAllInputs() {
     bool patchRemoved = false;
 
@@ -2273,11 +2411,10 @@
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
         audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
         for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
-            if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+            if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
                 continue;
             }
-            if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
-                    (isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
+            if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
                 continue;
             }
             routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
@@ -2295,13 +2432,15 @@
                 applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
                         stream, curStreamDevice);
             }
-
+            // rescale index before applying to curStream as ranges may be different for
+            // stream and curStream
+            int idx = rescaleVolumeIndex(index, stream, (audio_stream_type_t)curStream);
             if (applyVolume) {
                 //FIXME: workaround for truncated touch sounds
                 // delayed volume change for system stream to be removed when the problem is
                 // handled by system UI
                 status_t volStatus =
-                        checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
+                        checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
                             (stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
                 if (volStatus != NO_ERROR) {
                     status = volStatus;
@@ -2650,6 +2789,7 @@
     mEffects.dump(fd);
     mAudioPatches.dump(fd);
     mPolicyMixes.dump(fd);
+    mAudioSources.dump(fd);
 
     return NO_ERROR;
 }
@@ -2834,8 +2974,7 @@
     }
     ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
 
-    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+    if (!audio_patch_is_valid(patch)) {
         return BAD_VALUE;
     }
     // only one source per audio patch supported for now
@@ -3293,8 +3432,8 @@
 void AudioPolicyManager::clearAudioSources(uid_t uid)
 {
     for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--)  {
-        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
-        if (sourceDesc->mUid == uid) {
+        sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        if (sourceDesc->uid() == uid) {
             stopAudioSource(mAudioSources.keyAt(i));
         }
     }
@@ -3312,20 +3451,23 @@
 }
 
 status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
-                                  const audio_attributes_t *attributes,
-                                  audio_patch_handle_t *handle,
-                                  uid_t uid)
+                                              const audio_attributes_t *attributes,
+                                              audio_port_handle_t *portId,
+                                              uid_t uid)
 {
-    ALOGV("%s source %p attributes %p handle %p", __FUNCTION__, source, attributes, handle);
-    if (source == NULL || attributes == NULL || handle == NULL) {
+    ALOGV("%s", __FUNCTION__);
+    *portId = AUDIO_PORT_HANDLE_NONE;
+
+    if (source == NULL || attributes == NULL || portId == NULL) {
+        ALOGW("%s invalid argument: source %p attributes %p handle %p",
+              __FUNCTION__, source, attributes, portId);
         return BAD_VALUE;
     }
 
-    *handle = AUDIO_PATCH_HANDLE_NONE;
-
     if (source->role != AUDIO_PORT_ROLE_SOURCE ||
             source->type != AUDIO_PORT_TYPE_DEVICE) {
-        ALOGV("%s INVALID_OPERATION source->role %d source->type %d", __FUNCTION__, source->role, source->type);
+        ALOGW("%s INVALID_OPERATION source->role %d source->type %d",
+              __FUNCTION__, source->role, source->type);
         return INVALID_OPERATION;
     }
 
@@ -3333,34 +3475,37 @@
             mAvailableInputDevices.getDevice(source->ext.device.type,
                                               String8(source->ext.device.address));
     if (srcDeviceDesc == 0) {
-        ALOGV("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
+        ALOGW("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
         return BAD_VALUE;
     }
-    sp<AudioSourceDescriptor> sourceDesc =
-            new AudioSourceDescriptor(srcDeviceDesc, attributes, uid);
+
+    *portId = AudioPort::getNextUniqueId();
 
     struct audio_patch dummyPatch = {};
     sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
-    sourceDesc->mPatchDesc = patchDesc;
+
+    sp<SourceClientDescriptor> sourceDesc =
+        new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDeviceDesc,
+                                   streamTypefromAttributesInt(attributes));
 
     status_t status = connectAudioSource(sourceDesc);
     if (status == NO_ERROR) {
-        mAudioSources.add(sourceDesc->getHandle(), sourceDesc);
-        *handle = sourceDesc->getHandle();
+        mAudioSources.add(*portId, sourceDesc);
     }
     return status;
 }
 
-status_t AudioPolicyManager::connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+status_t AudioPolicyManager::connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
-    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->portId());
 
     // make sure we only have one patch per source.
     disconnectAudioSource(sourceDesc);
 
-    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
-    audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
-    sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->mDevice;
+    audio_attributes_t attributes = sourceDesc->attributes();
+    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
+    audio_stream_type_t stream = sourceDesc->stream();
+    sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->srcDevice();
 
     audio_devices_t sinkDevice = getDeviceForStrategy(strategy, true);
     sp<DeviceDescriptor> sinkDeviceDesc =
@@ -3405,7 +3550,7 @@
                                                               0);
         ALOGV("%s patch panel returned %d patchHandle %d", __FUNCTION__,
                                                               status, afPatchHandle);
-        sourceDesc->mPatchDesc->mPatch = *patchBuilder.patch();
+        sourceDesc->patchDesc()->mPatch = *patchBuilder.patch();
         if (status != NO_ERROR) {
             ALOGW("%s patch panel could not connect device patch, error %d",
                   __FUNCTION__, status);
@@ -3415,32 +3560,32 @@
         status = startSource(outputDesc, stream, sinkDevice, NULL, &delayMs);
 
         if (status != NO_ERROR) {
-            mpClientInterface->releaseAudioPatch(sourceDesc->mPatchDesc->mAfPatchHandle, 0);
+            mpClientInterface->releaseAudioPatch(sourceDesc->patchDesc()->mAfPatchHandle, 0);
             return status;
         }
-        sourceDesc->mSwOutput = outputDesc;
+        sourceDesc->setSwOutput(outputDesc);
         if (delayMs != 0) {
             usleep(delayMs * 1000);
         }
     }
 
-    sourceDesc->mPatchDesc->mAfPatchHandle = afPatchHandle;
-    addAudioPatch(sourceDesc->mPatchDesc->mHandle, sourceDesc->mPatchDesc);
+    sourceDesc->patchDesc()->mAfPatchHandle = afPatchHandle;
+    addAudioPatch(sourceDesc->patchDesc()->mHandle, sourceDesc->patchDesc());
 
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::stopAudioSource(audio_patch_handle_t handle __unused)
+status_t AudioPolicyManager::stopAudioSource(audio_port_handle_t portId)
 {
-    sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueFor(handle);
-    ALOGV("%s handle %d", __FUNCTION__, handle);
+    sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueFor(portId);
+    ALOGV("%s port ID %d", __FUNCTION__, portId);
     if (sourceDesc == 0) {
-        ALOGW("%s unknown source for handle %d", __FUNCTION__, handle);
+        ALOGW("%s unknown source for port ID %d", __FUNCTION__, portId);
         return BAD_VALUE;
     }
     status_t status = disconnectAudioSource(sourceDesc);
 
-    mAudioSources.removeItem(handle);
+    mAudioSources.removeItem(portId);
     return status;
 }
 
@@ -3774,20 +3919,20 @@
     }
 }
 
-status_t AudioPolicyManager::disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+status_t AudioPolicyManager::disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
-    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+    ALOGV("%s port Id %d", __FUNCTION__, sourceDesc->portId());
 
-    sp<AudioPatch> patchDesc = mAudioPatches.valueFor(sourceDesc->mPatchDesc->mHandle);
+    sp<AudioPatch> patchDesc = mAudioPatches.valueFor(sourceDesc->patchDesc()->mHandle);
     if (patchDesc == 0) {
         ALOGW("%s source has no patch with handle %d", __FUNCTION__,
-              sourceDesc->mPatchDesc->mHandle);
+              sourceDesc->patchDesc()->mHandle);
         return BAD_VALUE;
     }
-    removeAudioPatch(sourceDesc->mPatchDesc->mHandle);
+    removeAudioPatch(sourceDesc->patchDesc()->mHandle);
 
-    audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
-    sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->mSwOutput.promote();
+    audio_stream_type_t stream = sourceDesc->stream();
+    sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->swOutput().promote();
     if (swOutputDesc != 0) {
         status_t status = stopSource(swOutputDesc, stream, false);
         if (status == NO_ERROR) {
@@ -3795,7 +3940,7 @@
         }
         mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
     } else {
-        sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->mHwOutput.promote();
+        sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->hwOutput().promote();
         if (hwOutputDesc != 0) {
           //   release patch between src device and output device
           //   close Hwoutput and remove from mHwOutputs
@@ -3806,15 +3951,16 @@
     return NO_ERROR;
 }
 
-sp<AudioSourceDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
+sp<SourceClientDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
         audio_io_handle_t output, routing_strategy strategy)
 {
-    sp<AudioSourceDescriptor> source;
+    sp<SourceClientDescriptor> source;
     for (size_t i = 0; i < mAudioSources.size(); i++)  {
-        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        audio_attributes_t attributes = sourceDesc->attributes();
         routing_strategy sourceStrategy =
-                (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
-        sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->mSwOutput.promote();
+                (routing_strategy) getStrategyForAttr(&attributes);
+        sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->swOutput().promote();
         if (sourceStrategy == strategy && outputDesc != 0 && outputDesc->mIoHandle == output) {
             source = sourceDesc;
             break;
@@ -4610,6 +4756,7 @@
 
     nextAudioPortGeneration();
 
+    audio_devices_t device = inputDesc->mDevice;
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
@@ -4620,6 +4767,12 @@
 
     inputDesc->close();
     mInputs.removeItem(input);
+
+    audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+    if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+            mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
+        SoundTrigger::setCaptureState(false);
+    }
 }
 
 SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(
@@ -4641,18 +4794,17 @@
     return outputs;
 }
 
-bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
-                                      SortedVector<audio_io_handle_t>& outputs2)
+void AudioPolicyManager::checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked)
 {
-    if (outputs1.size() != outputs2.size()) {
-        return false;
+    // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
+    // output is suspended before any tracks are moved to it
+    checkA2dpSuspend();
+    checkOutputForAllStrategies();
+    if (onOutputsChecked != nullptr && onOutputsChecked()) checkA2dpSuspend();
+    updateDevicesAndOutputs();
+    if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
+        setMsdPatch();
     }
-    for (size_t i = 0; i < outputs1.size(); i++) {
-        if (outputs1[i] != outputs2[i]) {
-            return false;
-        }
-    }
-    return true;
 }
 
 void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
@@ -4680,7 +4832,7 @@
         }
     }
 
-    if (!vectorsEqual(srcOutputs,dstOutputs)) {
+    if (srcOutputs != dstOutputs) {
         // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
         // audio from invalidated tracks will be rendered when unmuting
         uint32_t maxLatency = 0;
@@ -4699,7 +4851,7 @@
                 setStrategyMute(strategy, true, desc);
                 setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR, newDevice);
             }
-            sp<AudioSourceDescriptor> source =
+            sp<SourceClientDescriptor> source =
                     getSourceForStrategyOnOutput(srcOut, strategy);
             if (source != 0){
                 connectAudioSource(source);
@@ -5423,8 +5575,8 @@
         return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
     }
 
-    // in-call: always cap earpiece volume by voice volume + some low headroom
-    if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+    // in-call: always cap volume by voice volume + some low headroom
+    if ((stream != AUDIO_STREAM_VOICE_CALL) &&
             (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
         switch (stream) {
         case AUDIO_STREAM_SYSTEM:
@@ -5436,9 +5588,9 @@
         case AUDIO_STREAM_DTMF:
         case AUDIO_STREAM_ACCESSIBILITY: {
             int voiceVolumeIndex =
-                mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+                mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
             const float maxVoiceVolDb =
-                computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+                computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
                 + IN_CALL_EARPIECE_HEADROOM_DB;
             if (volumeDB > maxVoiceVolDb) {
                 ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
@@ -5509,6 +5661,21 @@
     return volumeDB;
 }
 
+int AudioPolicyManager::rescaleVolumeIndex(int srcIndex,
+                                           audio_stream_type_t srcStream,
+                                           audio_stream_type_t dstStream)
+{
+    if (srcStream == dstStream) {
+        return srcIndex;
+    }
+    float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
+    float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
+    float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
+    float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+
+    return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
+}
+
 status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
                                                    int index,
                                                    const sp<AudioOutputDescriptor>& outputDesc,
@@ -5635,55 +5802,6 @@
     }
 }
 
-void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
-                                                      bool starting, bool stateChange)
-{
-    if(!hasPrimaryOutput()) {
-        return;
-    }
-
-    // if the stream pertains to sonification strategy and we are in call we must
-    // mute the stream if it is low visibility. If it is high visibility, we must play a tone
-    // in the device used for phone strategy and play the tone if the selected device does not
-    // interfere with the device used for phone strategy
-    // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
-    // many times as there are active tracks on the output
-    const routing_strategy stream_strategy = getStrategy(stream);
-    if ((stream_strategy == STRATEGY_SONIFICATION) ||
-            ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
-        sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput;
-        ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
-                stream, starting, outputDesc->mDevice, stateChange);
-        if (outputDesc->mRefCount[stream]) {
-            int muteCount = 1;
-            if (stateChange) {
-                muteCount = outputDesc->mRefCount[stream];
-            }
-            if (audio_is_low_visibility(stream)) {
-                ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
-                for (int i = 0; i < muteCount; i++) {
-                    setStreamMute(stream, starting, mPrimaryOutput);
-                }
-            } else {
-                ALOGV("handleIncallSonification() high visibility");
-                if (outputDesc->device() &
-                        getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
-                    ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
-                    for (int i = 0; i < muteCount; i++) {
-                        setStreamMute(stream, starting, mPrimaryOutput);
-                    }
-                }
-                if (starting) {
-                    mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
-                                                 AUDIO_STREAM_VOICE_CALL);
-                } else {
-                    mpClientInterface->stopTone();
-                }
-            }
-        }
-    }
-}
-
 audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
 {
     // flags to stream type mapping
@@ -5697,39 +5815,7 @@
         return AUDIO_STREAM_TTS;
     }
 
-    // usage to stream type mapping
-    switch (attr->usage) {
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANT:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-        return AUDIO_STREAM_MUSIC;
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        return AUDIO_STREAM_ACCESSIBILITY;
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return AUDIO_STREAM_SYSTEM;
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return AUDIO_STREAM_VOICE_CALL;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return AUDIO_STREAM_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-        return AUDIO_STREAM_ALARM;
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return AUDIO_STREAM_RING;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return AUDIO_STREAM_NOTIFICATION;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return AUDIO_STREAM_MUSIC;
-    }
+    return audio_usage_to_stream_type(attr->usage);
 }
 
 bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa)
@@ -5814,10 +5900,10 @@
 void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
 {
     for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--)  {
-        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
-        if (sourceDesc->mDevice->equals(deviceDesc)) {
-            ALOGV("%s releasing audio source %d", __FUNCTION__, sourceDesc->getHandle());
-            stopAudioSource(sourceDesc->getHandle());
+        sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        if (sourceDesc->srcDevice()->equals(deviceDesc)) {
+            ALOGV("%s releasing audio source %d", __FUNCTION__, sourceDesc->portId());
+            stopAudioSource(sourceDesc->portId());
         }
     }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 893b963..9436767 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -17,6 +17,7 @@
 #pragma once
 
 #include <atomic>
+#include <functional>
 #include <memory>
 #include <unordered_set>
 
@@ -30,6 +31,7 @@
 #include <utils/SortedVector.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicy.h>
+#include <media/PatchBuilder.h>
 #include "AudioPolicyInterface.h"
 
 #include <AudioPolicyManagerInterface.h>
@@ -38,6 +40,7 @@
 #include <AudioPolicyConfig.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
+#include <AudioProfile.h>
 #include <DeviceDescriptor.h>
 #include <IOProfile.h>
 #include <HwModule.h>
@@ -118,15 +121,9 @@
                                           audio_output_flags_t *flags,
                                           audio_port_handle_t *selectedDeviceId,
                                           audio_port_handle_t *portId);
-        virtual status_t startOutput(audio_io_handle_t output,
-                                     audio_stream_type_t stream,
-                                     audio_session_t session);
-        virtual status_t stopOutput(audio_io_handle_t output,
-                                    audio_stream_type_t stream,
-                                    audio_session_t session);
-        virtual void releaseOutput(audio_io_handle_t output,
-                                   audio_stream_type_t stream,
-                                   audio_session_t session);
+        virtual status_t startOutput(audio_port_handle_t portId);
+        virtual status_t stopOutput(audio_port_handle_t portId);
+        virtual void releaseOutput(audio_port_handle_t portId);
         virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                          audio_io_handle_t *input,
                                          audio_session_t session,
@@ -138,16 +135,13 @@
                                          audio_port_handle_t *portId);
 
         // indicates to the audio policy manager that the input starts being used.
-        virtual status_t startInput(audio_io_handle_t input,
-                                    audio_session_t session,
+        virtual status_t startInput(audio_port_handle_t portId,
                                     bool silenced,
                                     concurrency_type__mask_t *concurrency);
 
         // indicates to the audio policy manager that the input stops being used.
-        virtual status_t stopInput(audio_io_handle_t input,
-                                   audio_session_t session);
-        virtual void releaseInput(audio_io_handle_t input,
-                                  audio_session_t session);
+        virtual status_t stopInput(audio_port_handle_t portId);
+        virtual void releaseInput(audio_port_handle_t portId);
         virtual void closeAllInputs();
         virtual void initStreamVolume(audio_stream_type_t stream,
                                                     int indexMin,
@@ -229,9 +223,9 @@
 
         virtual status_t startAudioSource(const struct audio_port_config *source,
                                           const audio_attributes_t *attributes,
-                                          audio_patch_handle_t *handle,
+                                          audio_port_handle_t *portId,
                                           uid_t uid);
-        virtual status_t stopAudioSource(audio_patch_handle_t handle);
+        virtual status_t stopAudioSource(audio_port_handle_t portId);
 
         virtual status_t setMasterMono(bool mono);
         virtual status_t getMasterMono(bool *mono);
@@ -353,6 +347,10 @@
                                     int index,
                                     audio_devices_t device);
 
+        // rescale volume index from srcStream within range of dstStream
+        int rescaleVolumeIndex(int srcIndex,
+                               audio_stream_type_t srcStream,
+                               audio_stream_type_t dstStream);
         // check that volume change is permitted, compute and send new volume to audio hardware
         virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
                                            const sp<AudioOutputDescriptor>& outputDesc,
@@ -377,10 +375,6 @@
                            int delayMs = 0,
                            audio_devices_t device = (audio_devices_t)0);
 
-        // handle special cases for sonification strategy while in call: mute streams or replace by
-        // a special tone in the device used for communication
-        void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
-
         audio_mode_t getPhoneState();
 
         // true if device is in a telephony or VoIP call
@@ -410,6 +404,12 @@
         // close an input.
         void closeInput(audio_io_handle_t input);
 
+        // runs all the checks required for accomodating changes in devices and outputs
+        // if 'onOutputsChecked' callback is provided, it is executed after the outputs
+        // check via 'checkOutputForAllStrategies'. If the callback returns 'true',
+        // A2DP suspend status is rechecked.
+        void checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked = nullptr);
+
         // checks and if necessary changes outputs used for all strategies.
         // must be called every time a condition that affects the output choice for a given strategy
         // changes: connected device, phone state, force use...
@@ -451,8 +451,6 @@
 
         SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
                                                             const SwAudioOutputCollection& openOutputs);
-        bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
-                                           SortedVector<audio_io_handle_t>& outputs2);
 
         // mute/unmute strategies using an incompatible device combination
         // if muting, wait for the audio in pcm buffer to be drained before proceeding
@@ -501,13 +499,14 @@
             if (!hasPrimaryOutput()) {
                 return AUDIO_DEVICE_NONE;
             }
-            return mAvailableInputDevices.getDevicesFromHwModule(mPrimaryOutput->getModuleHandle());
+            return mAvailableInputDevices.getDeviceTypesFromHwModule(
+                    mPrimaryOutput->getModuleHandle());
         }
 
         uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
         sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
         sp<DeviceDescriptor> findDevice(
-                const DeviceVector& devices, audio_devices_t device);
+                const DeviceVector& devices, audio_devices_t device) const;
 
         // if argument "device" is different from AUDIO_DEVICE_NONE,  startSource() will force
         // the re-evaluation of the output device.
@@ -526,10 +525,10 @@
 
         status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
 
-        status_t connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
-        status_t disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
+        status_t connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
+        status_t disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
 
-        sp<AudioSourceDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
+        sp<SourceClientDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
                                                                routing_strategy strategy);
 
         void cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc);
@@ -544,6 +543,10 @@
         static bool streamsMatchForvolume(audio_stream_type_t stream1,
                                           audio_stream_type_t stream2);
 
+        void closeSessions(const sp<AudioInputDescriptor>& input, bool activeOnly);
+        void closeSession(const sp<AudioInputDescriptor>& input,
+                          const sp<AudioSession>& session);
+
         const uid_t mUidCached;                         // AID_AUDIOSERVER
         AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
         sp<SwAudioOutputDescriptor> mPrimaryOutput;     // primary output descriptor
@@ -584,7 +587,7 @@
         sp<AudioPatch> mCallRxPatch;
 
         HwAudioOutputCollection mHwOutputs;
-        AudioSourceCollection mAudioSources;
+        SourceClientCollection mAudioSources;
 
         // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
         // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
@@ -617,6 +620,17 @@
 
         status_t getSupportedFormats(audio_io_handle_t ioHandle, FormatVector& formats);
 
+        // Support for Multi-Stream Decoder (MSD) module
+        sp<DeviceDescriptor> getMsdAudioInDevice() const;
+        audio_devices_t getMsdAudioOutDeviceTypes() const;
+        const AudioPatchCollection getMsdPatches() const;
+        status_t getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+                                           bool hwAvSync,
+                                           audio_port_config *sourceConfig,
+                                           audio_port_config *sinkConfig) const;
+        PatchBuilder buildMsdPatch(audio_devices_t outputDevice) const;
+        status_t setMsdPatch(audio_devices_t outputDevice = AUDIO_DEVICE_NONE);
+
         // If any, resolve any "dynamic" fields of an Audio Profiles collection
         void updateAudioProfiles(audio_devices_t device, audio_io_handle_t ioHandle,
                 AudioProfileVector &profiles);
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index b064f8c..21fffec 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -155,17 +155,6 @@
     return result;
 }
 
-status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
-              audio_stream_type_t stream)
-{
-    return mAudioPolicyService->startTone(tone, stream);
-}
-
-status_t AudioPolicyService::AudioPolicyClient::stopTone()
-{
-    return mAudioPolicyService->stopTone();
-}
-
 status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
 {
     return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index fdae23b..b3d564a 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -23,6 +23,7 @@
 #include <memory>
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
+#include <media/AudioPolicyHelper.h>
 #include <media/EffectsConfig.h>
 #include <mediautils/ServiceUtilities.h>
 #include <system/audio.h>
@@ -317,6 +318,201 @@
     return status;
 }
 
+status_t AudioPolicyEffects::addSourceDefaultEffect(const effect_uuid_t *type,
+                                                    const String16& opPackageName,
+                                                    const effect_uuid_t *uuid,
+                                                    int32_t priority,
+                                                    audio_source_t source,
+                                                    audio_unique_id_t* id)
+{
+    if (uuid == NULL || type == NULL) {
+        ALOGE("addSourceDefaultEffect(): Null uuid or type uuid pointer");
+        return BAD_VALUE;
+    }
+
+    // HOTWORD and FM_TUNER are two special case sources > MAX.
+    if (source < AUDIO_SOURCE_DEFAULT ||
+            (source > AUDIO_SOURCE_MAX &&
+             source != AUDIO_SOURCE_HOTWORD &&
+             source != AUDIO_SOURCE_FM_TUNER)) {
+        ALOGE("addSourceDefaultEffect(): Unsupported source type %d", source);
+        return BAD_VALUE;
+    }
+
+    // Check that |uuid| or |type| corresponds to an effect on the system.
+    effect_descriptor_t descriptor = {};
+    status_t res = AudioEffect::getEffectDescriptor(
+            uuid, type, EFFECT_FLAG_TYPE_PRE_PROC, &descriptor);
+    if (res != OK) {
+        ALOGE("addSourceDefaultEffect(): Failed to find effect descriptor matching uuid/type.");
+        return res;
+    }
+
+    // Only pre-processing effects can be added dynamically as source defaults.
+    if ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) {
+        ALOGE("addSourceDefaultEffect(): Desired effect cannot be attached "
+              "as a source default effect.");
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    // Find the EffectDescVector for the given source type, or create a new one if necessary.
+    ssize_t index = mInputSources.indexOfKey(source);
+    EffectDescVector *desc = NULL;
+    if (index < 0) {
+        // No effects for this source type yet.
+        desc = new EffectDescVector();
+        mInputSources.add(source, desc);
+    } else {
+        desc = mInputSources.valueAt(index);
+    }
+
+    // Create a new effect and add it to the vector.
+    res = AudioEffect::newEffectUniqueId(id);
+    if (res != OK) {
+        ALOGE("addSourceDefaultEffect(): failed to get new unique id.");
+        return res;
+    }
+    EffectDesc *effect = new EffectDesc(
+            descriptor.name, *type, opPackageName, *uuid, priority, *id);
+    desc->mEffects.add(effect);
+    // TODO(b/71813697): Support setting params as well.
+
+    // TODO(b/71814300): Retroactively attach to any existing sources of the given type.
+    // This requires tracking the source type of each session id in addition to what is
+    // already being tracked.
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyEffects::addStreamDefaultEffect(const effect_uuid_t *type,
+                                                    const String16& opPackageName,
+                                                    const effect_uuid_t *uuid,
+                                                    int32_t priority,
+                                                    audio_usage_t usage,
+                                                    audio_unique_id_t* id)
+{
+    if (uuid == NULL || type == NULL) {
+        ALOGE("addStreamDefaultEffect(): Null uuid or type uuid pointer");
+        return BAD_VALUE;
+    }
+
+    audio_stream_type_t stream = audio_usage_to_stream_type(usage);
+
+    if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
+        ALOGE("addStreamDefaultEffect(): Unsupported stream type %d", stream);
+        return BAD_VALUE;
+    }
+
+    // Check that |uuid| or |type| corresponds to an effect on the system.
+    effect_descriptor_t descriptor = {};
+    status_t res = AudioEffect::getEffectDescriptor(
+            uuid, type, EFFECT_FLAG_TYPE_INSERT, &descriptor);
+    if (res != OK) {
+        ALOGE("addStreamDefaultEffect(): Failed to find effect descriptor matching uuid/type.");
+        return res;
+    }
+
+    // Only insert effects can be added dynamically as stream defaults.
+    if ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_INSERT) {
+        ALOGE("addStreamDefaultEffect(): Desired effect cannot be attached "
+              "as a stream default effect.");
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    // Find the EffectDescVector for the given stream type, or create a new one if necessary.
+    ssize_t index = mOutputStreams.indexOfKey(stream);
+    EffectDescVector *desc = NULL;
+    if (index < 0) {
+        // No effects for this stream type yet.
+        desc = new EffectDescVector();
+        mOutputStreams.add(stream, desc);
+    } else {
+        desc = mOutputStreams.valueAt(index);
+    }
+
+    // Create a new effect and add it to the vector.
+    res = AudioEffect::newEffectUniqueId(id);
+    if (res != OK) {
+        ALOGE("addStreamDefaultEffect(): failed to get new unique id.");
+        return res;
+    }
+    EffectDesc *effect = new EffectDesc(
+            descriptor.name, *type, opPackageName, *uuid, priority, *id);
+    desc->mEffects.add(effect);
+    // TODO(b/71813697): Support setting params as well.
+
+    // TODO(b/71814300): Retroactively attach to any existing streams of the given type.
+    // This requires tracking the stream type of each session id in addition to what is
+    // already being tracked.
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyEffects::removeSourceDefaultEffect(audio_unique_id_t id)
+{
+    if (id == AUDIO_UNIQUE_ID_ALLOCATE) {
+        // ALLOCATE is not a unique identifier, but rather a reserved value indicating
+        // a real id has not been assigned. For default effects, this value is only used
+        // by system-owned defaults from the loaded config, which cannot be removed.
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    // Check each source type.
+    size_t numSources = mInputSources.size();
+    for (size_t i = 0; i < numSources; ++i) {
+        // Check each effect for each source.
+        EffectDescVector* descVector = mInputSources[i];
+        for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+            if ((*desc)->mId == id) {
+                // Found it!
+                // TODO(b/71814300): Remove from any sources the effect was attached to.
+                descVector->mEffects.erase(desc);
+                // Handles are unique; there can only be one match, so return early.
+                return NO_ERROR;
+            }
+        }
+    }
+
+    // Effect wasn't found, so it's been trivially removed successfully.
+    return NO_ERROR;
+}
+
+status_t AudioPolicyEffects::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+    if (id == AUDIO_UNIQUE_ID_ALLOCATE) {
+        // ALLOCATE is not a unique identifier, but rather a reserved value indicating
+        // a real id has not been assigned. For default effects, this value is only used
+        // by system-owned defaults from the loaded config, which cannot be removed.
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    // Check each stream type.
+    size_t numStreams = mOutputStreams.size();
+    for (size_t i = 0; i < numStreams; ++i) {
+        // Check each effect for each stream.
+        EffectDescVector* descVector = mOutputStreams[i];
+        for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+            if ((*desc)->mId == id) {
+                // Found it!
+                // TODO(b/71814300): Remove from any streams the effect was attached to.
+                descVector->mEffects.erase(desc);
+                // Handles are unique; there can only be one match, so return early.
+                return NO_ERROR;
+            }
+        }
+    }
+
+    // Effect wasn't found, so it's been trivially removed successfully.
+    return NO_ERROR;
+}
 
 void AudioPolicyEffects::EffectVector::setProcessorEnabled(bool enabled)
 {
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 623180e..6ad01f7 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -64,7 +64,6 @@
     status_t releaseInputEffects(audio_io_handle_t input,
                                  audio_session_t audioSession);
 
-
     // Return a list of effect descriptors for default output effects
     // associated with audioSession
     status_t queryDefaultOutputSessionEffects(audio_session_t audioSession,
@@ -82,18 +81,60 @@
                              audio_stream_type_t stream,
                              audio_session_t audioSession);
 
+    // Add the effect to the list of default effects for sources of type |source|.
+    status_t addSourceDefaultEffect(const effect_uuid_t *type,
+                                    const String16& opPackageName,
+                                    const effect_uuid_t *uuid,
+                                    int32_t priority,
+                                    audio_source_t source,
+                                    audio_unique_id_t* id);
+
+    // Add the effect to the list of default effects for streams of a given usage.
+    status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                    const String16& opPackageName,
+                                    const effect_uuid_t *uuid,
+                                    int32_t priority,
+                                    audio_usage_t usage,
+                                    audio_unique_id_t* id);
+
+    // Remove the default source effect from wherever it's attached.
+    status_t removeSourceDefaultEffect(audio_unique_id_t id);
+
+    // Remove the default stream effect from wherever it's attached.
+    status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
 private:
 
     // class to store the description of an effects and its parameters
     // as defined in audio_effects.conf
     class EffectDesc {
     public:
-        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+        EffectDesc(const char *name,
+                   const effect_uuid_t& typeUuid,
+                   const String16& opPackageName,
+                   const effect_uuid_t& uuid,
+                   uint32_t priority,
+                   audio_unique_id_t id) :
                         mName(strdup(name)),
-                        mUuid(uuid) { }
+                        mTypeUuid(typeUuid),
+                        mOpPackageName(opPackageName),
+                        mUuid(uuid),
+                        mPriority(priority),
+                        mId(id) { }
+        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+                        EffectDesc(name,
+                                   *EFFECT_UUID_NULL,
+                                   String16(""),
+                                   uuid,
+                                   0,
+                                   AUDIO_UNIQUE_ID_ALLOCATE) { }
         EffectDesc(const EffectDesc& orig) :
                         mName(strdup(orig.mName)),
-                        mUuid(orig.mUuid) {
+                        mTypeUuid(orig.mTypeUuid),
+                        mOpPackageName(orig.mOpPackageName),
+                        mUuid(orig.mUuid),
+                        mPriority(orig.mPriority),
+                        mId(orig.mId) {
                             // deep copy mParams
                             for (size_t k = 0; k < orig.mParams.size(); k++) {
                                 effect_param_t *origParam = orig.mParams[k];
@@ -116,7 +157,11 @@
             }
         }
         char *mName;
+        effect_uuid_t mTypeUuid;
+        String16 mOpPackageName;
         effect_uuid_t mUuid;
+        int32_t mPriority;
+        audio_unique_id_t mId;
         Vector <effect_param_t *> mParams;
     };
 
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index d2bc40d..02ab07f 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -17,12 +17,12 @@
 #define LOG_TAG "AudioPolicyIntefaceImpl"
 //#define LOG_NDEBUG 0
 
-#include <utils/Log.h>
-#include <media/MediaAnalyticsItem.h>
-
 #include "AudioPolicyService.h"
-#include <mediautils/ServiceUtilities.h>
 #include "TypeConverter.h"
+#include <media/AudioPolicyHelper.h>
+#include <media/MediaAnalyticsItem.h>
+#include <mediautils/ServiceUtilities.h>
+#include <utils/Log.h>
 
 namespace android {
 
@@ -200,7 +200,7 @@
         !modifyPhoneStateAllowed(pid, uid)) {
         // If the app tries to play music through the telephony device and doesn't have permission
         // the fallback to the default output device.
-        mAudioPolicyManager->releaseOutput(*output, *stream, session);
+        mAudioPolicyManager->releaseOutput(*portId);
         flags = originalFlags;
         *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
         *portId = AUDIO_PORT_HANDLE_NONE;
@@ -208,93 +208,116 @@
                                                  config,
                                                  &flags, selectedDeviceId, portId);
     }
+
+    if (result == NO_ERROR) {
+        sp <AudioPlaybackClient> client =
+            new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+        mAudioPlaybackClients.add(*portId, client);
+    }
     return result;
 }
 
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
-                                         audio_stream_type_t stream,
-                                         audio_session_t session)
+status_t AudioPolicyService::startOutput(audio_port_handle_t portId)
 {
-    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
-        return BAD_VALUE;
-    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     ALOGV("startOutput()");
+    sp<AudioPlaybackClient> client;
     sp<AudioPolicyEffects>audioPolicyEffects;
     {
         Mutex::Autolock _l(mLock);
+        const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+        if (index < 0) {
+            ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+            return INVALID_OPERATION;
+        }
+        client = mAudioPlaybackClients.valueAt(index);
         audioPolicyEffects = mAudioPolicyEffects;
     }
     if (audioPolicyEffects != 0) {
         // create audio processors according to stream
-        status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
+        status_t status = audioPolicyEffects->addOutputSessionEffects(
+            client->io, client->stream, client->session);
         if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to add effects on session %d", session);
+            ALOGW("Failed to add effects on session %d", client->session);
         }
     }
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return mAudioPolicyManager->startOutput(output, stream, session);
+    status_t status = mAudioPolicyManager->startOutput(portId);
+    if (status == NO_ERROR) {
+        client->active = true;
+    }
+    return status;
 }
 
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
-                                        audio_stream_type_t stream,
-                                        audio_session_t session)
+status_t AudioPolicyService::stopOutput(audio_port_handle_t portId)
 {
-    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
-        return BAD_VALUE;
-    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     ALOGV("stopOutput()");
-    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    mOutputCommandThread->stopOutputCommand(portId);
     return NO_ERROR;
 }
 
-status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
-                                      audio_stream_type_t stream,
-                                      audio_session_t session)
+status_t  AudioPolicyService::doStopOutput(audio_port_handle_t portId)
 {
-    ALOGV("doStopOutput from tid %d", gettid());
+    ALOGV("doStopOutput");
+    sp<AudioPlaybackClient> client;
     sp<AudioPolicyEffects>audioPolicyEffects;
     {
         Mutex::Autolock _l(mLock);
+
+        const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+        if (index < 0) {
+            ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+            return INVALID_OPERATION;
+        }
+        client = mAudioPlaybackClients.valueAt(index);
         audioPolicyEffects = mAudioPolicyEffects;
     }
     if (audioPolicyEffects != 0) {
         // release audio processors from the stream
-        status_t status = audioPolicyEffects->releaseOutputSessionEffects(output, stream, session);
+        status_t status = audioPolicyEffects->releaseOutputSessionEffects(
+            client->io, client->stream, client->session);
         if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to release effects on session %d", session);
+            ALOGW("Failed to release effects on session %d", client->session);
         }
     }
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return mAudioPolicyManager->stopOutput(output, stream, session);
+    status_t status = mAudioPolicyManager->stopOutput(portId);
+    if (status == NO_ERROR) {
+        client->active = false;
+    }
+    return status;
 }
 
-void AudioPolicyService::releaseOutput(audio_io_handle_t output,
-                                       audio_stream_type_t stream,
-                                       audio_session_t session)
+void AudioPolicyService::releaseOutput(audio_port_handle_t portId)
 {
     if (mAudioPolicyManager == NULL) {
         return;
     }
     ALOGV("releaseOutput()");
-    mOutputCommandThread->releaseOutputCommand(output, stream, session);
+    mOutputCommandThread->releaseOutputCommand(portId);
 }
 
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
-                                         audio_stream_type_t stream,
-                                         audio_session_t session)
+void AudioPolicyService::doReleaseOutput(audio_port_handle_t portId)
 {
     ALOGV("doReleaseOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
+    const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+    if (index < 0) {
+        ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+        return;
+    }
+    sp<AudioPlaybackClient> client = mAudioPlaybackClients.valueAt(index);
+    mAudioRecordClients.removeItem(portId);
+
     // called from internal thread: no need to clear caller identity
-    mAudioPolicyManager->releaseOutput(output, stream, session);
+    mAudioPolicyManager->releaseOutput(portId);
 }
 
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
@@ -398,17 +421,13 @@
         if (status != NO_ERROR) {
             if (status == PERMISSION_DENIED) {
                 AutoCallerClear acc;
-                mAudioPolicyManager->releaseInput(*input, session);
+                mAudioPolicyManager->releaseInput(*portId);
             }
             return status;
         }
 
-        sp<AudioRecordClient> client =
-                new AudioRecordClient(*attr, *input, uid, pid, opPackageName, session);
-        client->active = false;
-        client->isConcurrent = false;
-        client->isVirtualDevice = false; //TODO : update from APM->getInputForAttr()
-        client->deviceId = *selectedDeviceId;
+        sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
+                                                             *selectedDeviceId, opPackageName);
         mAudioRecordClients.add(*portId, client);
     }
 
@@ -496,8 +515,7 @@
     status_t status;
     {
         AutoCallerClear acc;
-        status = mAudioPolicyManager->startInput(
-                    client->input, client->session, *silenced, &concurrency);
+        status = mAudioPolicyManager->startInput(portId, *silenced, &concurrency);
 
     }
 
@@ -610,7 +628,7 @@
     // finish the recording app op
     finishRecording(client->opPackageName, client->uid);
     AutoCallerClear acc;
-    return mAudioPolicyManager->stopInput(client->input, client->session);
+    return mAudioPolicyManager->stopInput(portId);
 }
 
 void AudioPolicyService::releaseInput(audio_port_handle_t portId)
@@ -635,15 +653,15 @@
     }
     if (audioPolicyEffects != 0) {
         // release audio processors from the input
-        status_t status = audioPolicyEffects->releaseInputEffects(client->input, client->session);
+        status_t status = audioPolicyEffects->releaseInputEffects(client->io, client->session);
         if(status != NO_ERROR) {
-            ALOGW("Failed to release effects on input %d", client->input);
+            ALOGW("Failed to release effects on input %d", client->io);
         }
     }
     {
         Mutex::Autolock _l(mLock);
         AutoCallerClear acc;
-        mAudioPolicyManager->releaseInput(client->input, client->session);
+        mAudioPolicyManager->releaseInput(portId);
     }
 }
 
@@ -811,27 +829,100 @@
     return mAudioPolicyManager->isSourceActive(source);
 }
 
-status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
-                                                       effect_descriptor_t *descriptors,
-                                                       uint32_t *count)
+status_t AudioPolicyService::getAudioPolicyEffects(sp<AudioPolicyEffects>& audioPolicyEffects)
 {
     if (mAudioPolicyManager == NULL) {
-        *count = 0;
         return NO_INIT;
     }
-    sp<AudioPolicyEffects>audioPolicyEffects;
     {
         Mutex::Autolock _l(mLock);
         audioPolicyEffects = mAudioPolicyEffects;
     }
     if (audioPolicyEffects == 0) {
-        *count = 0;
         return NO_INIT;
     }
+
+    return OK;
+}
+
+status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
+                                                       effect_descriptor_t *descriptors,
+                                                       uint32_t *count)
+{
+    sp<AudioPolicyEffects>audioPolicyEffects;
+    status_t status = getAudioPolicyEffects(audioPolicyEffects);
+    if (status != OK) {
+        *count = 0;
+        return status;
+    }
     return audioPolicyEffects->queryDefaultInputEffects(
             (audio_session_t)audioSession, descriptors, count);
 }
 
+status_t AudioPolicyService::addSourceDefaultEffect(const effect_uuid_t *type,
+                                                    const String16& opPackageName,
+                                                    const effect_uuid_t *uuid,
+                                                    int32_t priority,
+                                                    audio_source_t source,
+                                                    audio_unique_id_t* id)
+{
+    sp<AudioPolicyEffects>audioPolicyEffects;
+    status_t status = getAudioPolicyEffects(audioPolicyEffects);
+    if (status != OK) {
+        return status;
+    }
+    if (!modifyDefaultAudioEffectsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    return audioPolicyEffects->addSourceDefaultEffect(
+            type, opPackageName, uuid, priority, source, id);
+}
+
+status_t AudioPolicyService::addStreamDefaultEffect(const effect_uuid_t *type,
+                                                    const String16& opPackageName,
+                                                    const effect_uuid_t *uuid,
+                                                    int32_t priority,
+                                                    audio_usage_t usage,
+                                                    audio_unique_id_t* id)
+{
+    sp<AudioPolicyEffects>audioPolicyEffects;
+    status_t status = getAudioPolicyEffects(audioPolicyEffects);
+    if (status != OK) {
+        return status;
+    }
+    if (!modifyDefaultAudioEffectsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    return audioPolicyEffects->addStreamDefaultEffect(
+            type, opPackageName, uuid, priority, usage, id);
+}
+
+status_t AudioPolicyService::removeSourceDefaultEffect(audio_unique_id_t id)
+{
+    sp<AudioPolicyEffects>audioPolicyEffects;
+    status_t status = getAudioPolicyEffects(audioPolicyEffects);
+    if (status != OK) {
+        return status;
+    }
+    if (!modifyDefaultAudioEffectsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    return audioPolicyEffects->removeSourceDefaultEffect(id);
+}
+
+status_t AudioPolicyService::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+    sp<AudioPolicyEffects>audioPolicyEffects;
+    status_t status = getAudioPolicyEffects(audioPolicyEffects);
+    if (status != OK) {
+        return status;
+    }
+    if (!modifyDefaultAudioEffectsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    return audioPolicyEffects->removeStreamDefaultEffect(id);
+}
+
 bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
 {
     if (mAudioPolicyManager == NULL) {
@@ -963,26 +1054,26 @@
 }
 
 status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
-                                  const audio_attributes_t *attributes,
-                                  audio_patch_handle_t *handle)
+                                              const audio_attributes_t *attributes,
+                                              audio_port_handle_t *portId)
 {
     Mutex::Autolock _l(mLock);
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     AutoCallerClear acc;
-    return mAudioPolicyManager->startAudioSource(source, attributes, handle,
+    return mAudioPolicyManager->startAudioSource(source, attributes, portId,
                                                  IPCThreadState::self()->getCallingUid());
 }
 
-status_t AudioPolicyService::stopAudioSource(audio_patch_handle_t handle)
+status_t AudioPolicyService::stopAudioSource(audio_port_handle_t portId)
 {
     Mutex::Autolock _l(mLock);
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     AutoCallerClear acc;
-    return mAudioPolicyManager->stopAudioSource(handle);
+    return mAudioPolicyManager->stopAudioSource(portId);
 }
 
 status_t AudioPolicyService::setMasterMono(bool mono)
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 1379223..8bca221 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -67,8 +67,6 @@
     {
         Mutex::Autolock _l(mLock);
 
-        // start tone playback thread
-        mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
         // start audio commands thread
         mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
         // start output activity command thread
@@ -90,7 +88,6 @@
 
 AudioPolicyService::~AudioPolicyService()
 {
-    mTonePlaybackThread->exit();
     mAudioCommandThread->exit();
     mOutputCommandThread->exit();
 
@@ -115,13 +112,17 @@
     Mutex::Autolock _l(mNotificationClientsLock);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
-    if (mNotificationClients.indexOfKey(uid) < 0) {
+    pid_t pid = IPCThreadState::self()->getCallingPid();
+    int64_t token = ((int64_t)uid<<32) | pid;
+
+    if (mNotificationClients.indexOfKey(token) < 0) {
         sp<NotificationClient> notificationClient = new NotificationClient(this,
                                                                            client,
-                                                                           uid);
-        ALOGV("registerClient() client %p, uid %d", client.get(), uid);
+                                                                           uid,
+                                                                           pid);
+        ALOGV("registerClient() client %p, uid %d pid %d", client.get(), uid, pid);
 
-        mNotificationClients.add(uid, notificationClient);
+        mNotificationClients.add(token, notificationClient);
 
         sp<IBinder> binder = IInterface::asBinder(client);
         binder->linkToDeath(notificationClient);
@@ -133,22 +134,33 @@
     Mutex::Autolock _l(mNotificationClientsLock);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
-    if (mNotificationClients.indexOfKey(uid) < 0) {
+    pid_t pid = IPCThreadState::self()->getCallingPid();
+    int64_t token = ((int64_t)uid<<32) | pid;
+
+    if (mNotificationClients.indexOfKey(token) < 0) {
         return;
     }
-    mNotificationClients.valueFor(uid)->setAudioPortCallbacksEnabled(enabled);
+    mNotificationClients.valueFor(token)->setAudioPortCallbacksEnabled(enabled);
 }
 
 // removeNotificationClient() is called when the client process dies.
-void AudioPolicyService::removeNotificationClient(uid_t uid)
+void AudioPolicyService::removeNotificationClient(uid_t uid, pid_t pid)
 {
     {
         Mutex::Autolock _l(mNotificationClientsLock);
-        mNotificationClients.removeItem(uid);
+        int64_t token = ((int64_t)uid<<32) | pid;
+        mNotificationClients.removeItem(token);
     }
     {
         Mutex::Autolock _l(mLock);
-        if (mAudioPolicyManager) {
+        bool hasSameUid = false;
+        for (size_t i = 0; i < mNotificationClients.size(); i++) {
+            if (mNotificationClients.valueAt(i)->uid() == uid) {
+                hasSameUid = true;
+                break;
+            }
+        }
+        if (mAudioPolicyManager && !hasSameUid) {
             // called from binder death notification: no need to clear caller identity
             mAudioPolicyManager->releaseResourcesForUid(uid);
         }
@@ -236,8 +248,9 @@
 
 AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
                                                      const sp<IAudioPolicyServiceClient>& client,
-                                                     uid_t uid)
-    : mService(service), mUid(uid), mAudioPolicyServiceClient(client),
+                                                     uid_t uid,
+                                                     pid_t pid)
+    : mService(service), mUid(uid), mPid(pid), mAudioPolicyServiceClient(client),
       mAudioPortCallbacksEnabled(false)
 {
 }
@@ -251,7 +264,7 @@
     sp<NotificationClient> keep(this);
     sp<AudioPolicyService> service = mService.promote();
     if (service != 0) {
-        service->removeNotificationClient(mUid);
+        service->removeNotificationClient(mUid, mPid);
     }
 }
 
@@ -322,8 +335,6 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
     result.append(buffer);
-    snprintf(buffer, SIZE, "Tones Thread: %p\n", mTonePlaybackThread.get());
-    result.append(buffer);
 
     write(fd, result.string(), result.size());
     return NO_ERROR;
@@ -359,9 +370,6 @@
         if (mAudioCommandThread != 0) {
             mAudioCommandThread->dump(fd);
         }
-        if (mTonePlaybackThread != 0) {
-            mTonePlaybackThread->dump(fd);
-        }
 
         if (mAudioPolicyManager) {
             mAudioPolicyManager->dump(fd);
@@ -632,7 +640,6 @@
                                                            const wp<AudioPolicyService>& service)
     : Thread(false), mName(name), mService(service)
 {
-    mpToneGenerator = NULL;
 }
 
 
@@ -642,7 +649,6 @@
         release_wake_lock(mName.string());
     }
     mAudioCommands.clear();
-    delete mpToneGenerator;
 }
 
 void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -667,26 +673,6 @@
                 mLastCommand = command;
 
                 switch (command->mCommand) {
-                case START_TONE: {
-                    mLock.unlock();
-                    ToneData *data = (ToneData *)command->mParam.get();
-                    ALOGV("AudioCommandThread() processing start tone %d on stream %d",
-                            data->mType, data->mStream);
-                    delete mpToneGenerator;
-                    mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
-                    mpToneGenerator->startTone(data->mType);
-                    mLock.lock();
-                    }break;
-                case STOP_TONE: {
-                    mLock.unlock();
-                    ALOGV("AudioCommandThread() processing stop tone");
-                    if (mpToneGenerator != NULL) {
-                        mpToneGenerator->stopTone();
-                        delete mpToneGenerator;
-                        mpToneGenerator = NULL;
-                    }
-                    mLock.lock();
-                    }break;
                 case SET_VOLUME: {
                     VolumeData *data = (VolumeData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set volume stream %d, \
@@ -709,26 +695,26 @@
                     }break;
                 case STOP_OUTPUT: {
                     StopOutputData *data = (StopOutputData *)command->mParam.get();
-                    ALOGV("AudioCommandThread() processing stop output %d",
-                            data->mIO);
+                    ALOGV("AudioCommandThread() processing stop output portId %d",
+                            data->mPortId);
                     svc = mService.promote();
                     if (svc == 0) {
                         break;
                     }
                     mLock.unlock();
-                    svc->doStopOutput(data->mIO, data->mStream, data->mSession);
+                    svc->doStopOutput(data->mPortId);
                     mLock.lock();
                     }break;
                 case RELEASE_OUTPUT: {
                     ReleaseOutputData *data = (ReleaseOutputData *)command->mParam.get();
-                    ALOGV("AudioCommandThread() processing release output %d",
-                            data->mIO);
+                    ALOGV("AudioCommandThread() processing release output portId %d",
+                            data->mPortId);
                     svc = mService.promote();
                     if (svc == 0) {
                         break;
                     }
                     mLock.unlock();
-                    svc->doReleaseOutput(data->mIO, data->mStream, data->mSession);
+                    svc->doReleaseOutput(data->mPortId);
                     mLock.lock();
                     }break;
                 case CREATE_AUDIO_PATCH: {
@@ -893,27 +879,6 @@
     return NO_ERROR;
 }
 
-void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type,
-        audio_stream_type_t stream)
-{
-    sp<AudioCommand> command = new AudioCommand();
-    command->mCommand = START_TONE;
-    sp<ToneData> data = new ToneData();
-    data->mType = type;
-    data->mStream = stream;
-    command->mParam = data;
-    ALOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream);
-    sendCommand(command);
-}
-
-void AudioPolicyService::AudioCommandThread::stopToneCommand()
-{
-    sp<AudioCommand> command = new AudioCommand();
-    command->mCommand = STOP_TONE;
-    ALOGV("AudioCommandThread() adding tone stop");
-    sendCommand(command);
-}
-
 status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
                                                                float volume,
                                                                audio_io_handle_t output,
@@ -960,33 +925,25 @@
     return sendCommand(command, delayMs);
 }
 
-void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
-                                                               audio_stream_type_t stream,
-                                                               audio_session_t session)
+void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_port_handle_t portId)
 {
     sp<AudioCommand> command = new AudioCommand();
     command->mCommand = STOP_OUTPUT;
     sp<StopOutputData> data = new StopOutputData();
-    data->mIO = output;
-    data->mStream = stream;
-    data->mSession = session;
+    data->mPortId = portId;
     command->mParam = data;
-    ALOGV("AudioCommandThread() adding stop output %d", output);
+    ALOGV("AudioCommandThread() adding stop output portId %d", portId);
     sendCommand(command);
 }
 
-void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output,
-                                                                  audio_stream_type_t stream,
-                                                                  audio_session_t session)
+void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_port_handle_t portId)
 {
     sp<AudioCommand> command = new AudioCommand();
     command->mCommand = RELEASE_OUTPUT;
     sp<ReleaseOutputData> data = new ReleaseOutputData();
-    data->mIO = output;
-    data->mStream = stream;
-    data->mSession = session;
+    data->mPortId = portId;
     command->mParam = data;
-    ALOGV("AudioCommandThread() adding release output %d", output);
+    ALOGV("AudioCommandThread() adding release output portId %d", portId);
     sendCommand(command);
 }
 
@@ -1250,8 +1207,6 @@
 
         } break;
 
-        case START_TONE:
-        case STOP_TONE:
         default:
             break;
         }
@@ -1324,27 +1279,6 @@
                                                    output, delayMs);
 }
 
-int AudioPolicyService::startTone(audio_policy_tone_t tone,
-                                  audio_stream_type_t stream)
-{
-    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
-        ALOGE("startTone: illegal tone requested (%d)", tone);
-    }
-    if (stream != AUDIO_STREAM_VOICE_CALL) {
-        ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
-            tone);
-    }
-    mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
-                                          AUDIO_STREAM_VOICE_CALL);
-    return 0;
-}
-
-int AudioPolicyService::stopTone()
-{
-    mTonePlaybackThread->stopToneCommand();
-    return 0;
-}
-
 int AudioPolicyService::setVoiceVolume(float volume, int delayMs)
 {
     return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
@@ -1400,9 +1334,6 @@
 int aps_set_stream_volume(void *service, audio_stream_type_t stream,
                                      float volume, audio_io_handle_t output,
                                      int delay_ms);
-int aps_start_tone(void *service, audio_policy_tone_t tone,
-                              audio_stream_type_t stream);
-int aps_stop_tone(void *service);
 int aps_set_voice_volume(void *service, float volume, int delay_ms);
 };
 
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 09375f1..6c55647 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -81,15 +81,9 @@
                                       audio_output_flags_t flags,
                                       audio_port_handle_t *selectedDeviceId,
                                       audio_port_handle_t *portId);
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session);
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session);
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session);
+    virtual status_t startOutput(audio_port_handle_t portId);
+    virtual status_t stopOutput(audio_port_handle_t portId);
+    virtual void releaseOutput(audio_port_handle_t portId);
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
@@ -132,6 +126,21 @@
     virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count);
+    virtual status_t addSourceDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_source_t source,
+                                            audio_unique_id_t* id);
+    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_usage_t usage,
+                                            audio_unique_id_t* id);
+    virtual status_t removeSourceDefaultEffect(audio_unique_id_t id);
+    virtual status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,
@@ -157,8 +166,6 @@
                                      float volume,
                                      audio_io_handle_t output,
                                      int delayMs = 0);
-    virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
-    virtual status_t stopTone();
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
     virtual bool isOffloadSupported(const audio_offload_info_t &config);
 
@@ -192,8 +199,8 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle);
-    virtual status_t stopAudioSource(audio_patch_handle_t handle);
+                                      audio_port_handle_t *portId);
+    virtual status_t stopAudioSource(audio_port_handle_t portId);
 
     virtual status_t setMasterMono(bool mono);
     virtual status_t getMasterMono(bool *mono);
@@ -207,12 +214,8 @@
                                         bool reported);
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
 
-            status_t doStopOutput(audio_io_handle_t output,
-                                  audio_stream_type_t stream,
-                                  audio_session_t session);
-            void doReleaseOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session);
+            status_t doStopOutput(audio_port_handle_t portId);
+            void doReleaseOutput(audio_port_handle_t portId);
 
             status_t clientCreateAudioPatch(const struct audio_patch *patch,
                                       audio_patch_handle_t *handle,
@@ -222,7 +225,7 @@
             virtual status_t clientSetAudioPortConfig(const struct audio_port_config *config,
                                                       int delayMs);
 
-            void removeNotificationClient(uid_t uid);
+            void removeNotificationClient(uid_t uid, pid_t pid);
             void onAudioPortListUpdate();
             void doOnAudioPortListUpdate();
             void onAudioPatchListUpdate();
@@ -263,6 +266,8 @@
 
     std::string getDeviceTypeStrForPortId(audio_port_handle_t portId);
 
+    status_t getAudioPolicyEffects(sp<AudioPolicyEffects>& audioPolicyEffects);
+
     // If recording we need to make sure the UID is allowed to do that. If the UID is idle
     // then it cannot record and gets buffers with zeros - silence. As soon as the UID
     // transitions to an active state we will start reporting buffers with data. This approach
@@ -304,10 +309,7 @@
         std::unordered_map<uid_t, bool> mCachedUids;
     };
 
-    // Thread used for tone playback and to send audio config commands to audio flinger
-    // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
-    // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
-    // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+    // Thread used to send audio config commands to audio flinger
     // For audio config commands, it is necessary because audio flinger requires that the calling
     // process (user) has permission to modify audio settings.
     class AudioCommandThread : public Thread {
@@ -316,8 +318,6 @@
 
         // commands for tone AudioCommand
         enum {
-            START_TONE,
-            STOP_TONE,
             SET_VOLUME,
             SET_PARAMETERS,
             SET_VOICE_VOLUME,
@@ -342,20 +342,13 @@
         virtual     bool        threadLoop();
 
                     void        exit();
-                    void        startToneCommand(ToneGenerator::tone_type type,
-                                                 audio_stream_type_t stream);
-                    void        stopToneCommand();
                     status_t    volumeCommand(audio_stream_type_t stream, float volume,
                                             audio_io_handle_t output, int delayMs = 0);
                     status_t    parametersCommand(audio_io_handle_t ioHandle,
                                             const char *keyValuePairs, int delayMs = 0);
                     status_t    voiceVolumeCommand(float volume, int delayMs = 0);
-                    void        stopOutputCommand(audio_io_handle_t output,
-                                                  audio_stream_type_t stream,
-                                                  audio_session_t session);
-                    void        releaseOutputCommand(audio_io_handle_t output,
-                                                     audio_stream_type_t stream,
-                                                     audio_session_t session);
+                    void        stopOutputCommand(audio_port_handle_t portId);
+                    void        releaseOutputCommand(audio_port_handle_t portId);
                     status_t    sendCommand(sp<AudioCommand>& command, int delayMs = 0);
                     void        insertCommand_l(sp<AudioCommand>& command, int delayMs = 0);
                     status_t    createAudioPatchCommand(const struct audio_patch *patch,
@@ -387,7 +380,7 @@
 
             void dump(char* buffer, size_t size);
 
-            int mCommand;   // START_TONE, STOP_TONE ...
+            int mCommand;   // SET_VOLUME, SET_PARAMETERS...
             nsecs_t mTime;  // time stamp
             Mutex mLock;    // mutex associated to mCond
             Condition mCond; // condition for status return
@@ -403,12 +396,6 @@
             AudioCommandData() {}
         };
 
-        class ToneData : public AudioCommandData {
-        public:
-            ToneGenerator::tone_type mType; // tone type (START_TONE only)
-            audio_stream_type_t mStream;    // stream type (START_TONE only)
-        };
-
         class VolumeData : public AudioCommandData {
         public:
             audio_stream_type_t mStream;
@@ -429,16 +416,12 @@
 
         class StopOutputData : public AudioCommandData {
         public:
-            audio_io_handle_t mIO;
-            audio_stream_type_t mStream;
-            audio_session_t mSession;
+            audio_port_handle_t mPortId;
         };
 
         class ReleaseOutputData : public AudioCommandData {
         public:
-            audio_io_handle_t mIO;
-            audio_stream_type_t mStream;
-            audio_session_t mSession;
+            audio_port_handle_t mPortId;
         };
 
         class CreateAudioPatchData : public AudioCommandData {
@@ -475,7 +458,6 @@
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
-        ToneGenerator *mpToneGenerator;     // the tone generator
         sp<AudioCommand> mLastCommand;      // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
         wp<AudioPolicyService> mService;
@@ -550,11 +532,6 @@
         // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
         virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
 
-        // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
-        // over a telephony device during a phone call.
-        virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
-        virtual status_t stopTone();
-
         // set down link audio volume.
         virtual status_t setVoiceVolume(float volume, int delayMs = 0);
 
@@ -594,7 +571,7 @@
     public:
                             NotificationClient(const sp<AudioPolicyService>& service,
                                                 const sp<IAudioPolicyServiceClient>& client,
-                                                uid_t uid);
+                                                uid_t uid, pid_t pid);
         virtual             ~NotificationClient();
 
                             void      onAudioPortListUpdate();
@@ -607,6 +584,10 @@
                                         audio_patch_handle_t patchHandle);
                             void      setAudioPortCallbacksEnabled(bool enabled);
 
+                            uid_t uid() {
+                                return mUid;
+                            }
+
                 // IBinder::DeathRecipient
                 virtual     void        binderDied(const wp<IBinder>& who);
 
@@ -616,34 +597,61 @@
 
         const wp<AudioPolicyService>        mService;
         const uid_t                         mUid;
+        const pid_t                         mPid;
         const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
               bool                          mAudioPortCallbacksEnabled;
     };
 
+    class AudioClient : public virtual RefBase {
+    public:
+                AudioClient(const audio_attributes_t attributes,
+                            const audio_io_handle_t io, uid_t uid, pid_t pid,
+                            const audio_session_t session, const audio_port_handle_t deviceId) :
+                                attributes(attributes), io(io), uid(uid), pid(pid),
+                                session(session), deviceId(deviceId), active(false) {}
+                ~AudioClient() override = default;
+
+
+        const audio_attributes_t attributes; // source, flags ...
+        const audio_io_handle_t io;          // audio HAL stream IO handle
+        const uid_t uid;                     // client UID
+        const pid_t pid;                     // client PID
+        const audio_session_t session;       // audio session ID
+        const audio_port_handle_t deviceId;  // selected input device port ID
+              bool active;                   // Playback/Capture is active or inactive
+    };
+
     // --- AudioRecordClient ---
     // Information about each registered AudioRecord client
     // (between calls to getInputForAttr() and releaseInput())
-    class AudioRecordClient : public RefBase {
+    class AudioRecordClient : public AudioClient {
     public:
                 AudioRecordClient(const audio_attributes_t attributes,
-                                  const audio_io_handle_t input, uid_t uid, pid_t pid,
-                                  const String16& opPackageName, const audio_session_t session) :
-                                      attributes(attributes),
-                                      input(input), uid(uid), pid(pid),
-                                      opPackageName(opPackageName), session(session),
-                                      active(false), isConcurrent(false), isVirtualDevice(false) {}
-        virtual ~AudioRecordClient() {}
+                          const audio_io_handle_t io, uid_t uid, pid_t pid,
+                          const audio_session_t session, const audio_port_handle_t deviceId,
+                          const String16& opPackageName) :
+                    AudioClient(attributes, io, uid, pid, session, deviceId),
+                    opPackageName(opPackageName), isConcurrent(false), isVirtualDevice(false) {}
+                ~AudioRecordClient() override = default;
 
-        const audio_attributes_t attributes; // source, flags ...
-        const audio_io_handle_t input;       // audio HAL input IO handle
-        const uid_t uid;                     // client UID
-        const pid_t pid;                     // client PID
         const String16 opPackageName;        // client package name
-        const audio_session_t session;       // audio session ID
-        bool active;                   // Capture is active or inactive
         bool isConcurrent;             // is allowed to concurrent capture
         bool isVirtualDevice;          // uses virtual device: updated by APM::getInputForAttr()
-        audio_port_handle_t deviceId;  // selected input device port ID
+    };
+
+    // --- AudioPlaybackClient ---
+    // Information about each registered AudioTrack client
+    // (between calls to getOutputForAttr() and releaseOutput())
+    class AudioPlaybackClient : public AudioClient {
+    public:
+                AudioPlaybackClient(const audio_attributes_t attributes,
+                      const audio_io_handle_t io, uid_t uid, pid_t pid,
+                            const audio_session_t session, audio_port_handle_t deviceId,
+                            audio_stream_type_t stream) :
+                    AudioClient(attributes, io, uid, pid, session, deviceId), stream(stream) {}
+                ~AudioPlaybackClient() override = default;
+
+        const audio_stream_type_t stream;
     };
 
     // A class automatically clearing and restoring binder caller identity inside
@@ -673,14 +681,13 @@
     // mLock protects AudioPolicyManager methods that can call into audio flinger
     // and possibly back in to audio policy service and acquire mEffectsLock.
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
-    sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
     AudioPolicyInterface *mAudioPolicyManager;
     AudioPolicyClient *mAudioPolicyClient;
 
-    DefaultKeyedVector< uid_t, sp<NotificationClient> >    mNotificationClients;
+    DefaultKeyedVector< int64_t, sp<NotificationClient> >    mNotificationClients;
     Mutex mNotificationClientsLock;  // protects mNotificationClients
     // Manage all effects configured in audio_effects.conf
     sp<AudioPolicyEffects> mAudioPolicyEffects;
@@ -688,6 +695,7 @@
 
     sp<UidPolicy> mUidPolicy;
     DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> >   mAudioRecordClients;
+    DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> >   mAudioPlaybackClients;
 };
 
 } // namespace android
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index cfa9ab1..b739b88 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -29,3 +29,26 @@
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 include $(BUILD_NATIVE_TEST)
+
+# system/audio.h utilities test
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+  libbase \
+  liblog \
+  libmedia_helper \
+  libutils
+
+LOCAL_SRC_FILES := \
+  systemaudio_tests.cpp \
+
+LOCAL_MODULE := systemaudio_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index eb8222c..2ff7675 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -60,9 +60,6 @@
                        int /*delayMs*/) override { }
     String8 getParameters(audio_io_handle_t /*ioHandle*/,
                           const String8& /*keys*/) override { return String8(); }
-    status_t startTone(audio_policy_tone_t /*tone*/,
-                       audio_stream_type_t /*stream*/) override { return NO_INIT; }
-    status_t stopTone() override { return NO_INIT; }
     status_t setVoiceVolume(float /*volume*/, int /*delayMs*/) override { return NO_INIT; }
     status_t moveEffects(audio_session_t /*session*/,
                          audio_io_handle_t /*srcOutput*/,
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
new file mode 100644
index 0000000..abaae52
--- /dev/null
+++ b/services/audiopolicy/tests/systemaudio_tests.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#define LOG_TAG "SysAudio_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+#include <system/audio.h>
+
+using namespace android;
+
+TEST(SystemAudioTest, PatchInvalid) {
+    audio_patch patch{};
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
+    patch.num_sinks = 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = 1;
+    patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = 0;
+    patch.num_sinks = 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+}
+
+TEST(SystemAudioTest, PatchValid) {
+    const audio_port_config src = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    // It's OK not to have sinks.
+    ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).patch()));
+    const audio_port_config sink = {
+        .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSink(sink).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSource(src).
+                    addSink(sink).addSink(sink).patch()));
+}
+
+TEST(SystemAudioTest, PatchHwAvSync) {
+    audio_port_config device_src_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+    device_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+    device_src_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+
+    audio_port_config device_sink_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+    device_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+    device_sink_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+
+    audio_port_config mix_sink_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_MIX };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+    mix_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+    mix_sink_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+
+    audio_port_config mix_src_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_MIX };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+    mix_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+    mix_src_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+}
+
+TEST(SystemAudioTest, PatchEqual) {
+    const audio_patch patch1{}, patch2{};
+    // Invalid patches are not equal.
+    ASSERT_FALSE(audio_patches_are_equal(&patch1, &patch2));
+    const audio_port_config src = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    const audio_port_config sink = {
+        .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+    audio_port_config sink_hw_av_sync = sink;
+    sink_hw_av_sync.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    sink_hw_av_sync.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+    ASSERT_TRUE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+}
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
new file mode 100644
index 0000000..eccbe54
--- /dev/null
+++ b/services/camera/libcameraservice/Android.bp
@@ -0,0 +1,106 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//
+// libcameraservice
+//
+
+cc_library_shared {
+    name: "libcameraservice",
+
+    // Camera service source
+
+    srcs: [
+        "CameraService.cpp",
+        "CameraFlashlight.cpp",
+        "common/Camera2ClientBase.cpp",
+        "common/CameraDeviceBase.cpp",
+        "common/CameraProviderManager.cpp",
+        "common/FrameProcessorBase.cpp",
+        "api1/CameraClient.cpp",
+        "api1/Camera2Client.cpp",
+        "api1/client2/Parameters.cpp",
+        "api1/client2/FrameProcessor.cpp",
+        "api1/client2/StreamingProcessor.cpp",
+        "api1/client2/JpegProcessor.cpp",
+        "api1/client2/CallbackProcessor.cpp",
+        "api1/client2/JpegCompressor.cpp",
+        "api1/client2/CaptureSequencer.cpp",
+        "api1/client2/ZslProcessor.cpp",
+        "api2/CameraDeviceClient.cpp",
+        "device1/CameraHardwareInterface.cpp",
+        "device3/Camera3Device.cpp",
+        "device3/Camera3Stream.cpp",
+        "device3/Camera3IOStreamBase.cpp",
+        "device3/Camera3InputStream.cpp",
+        "device3/Camera3OutputStream.cpp",
+        "device3/Camera3DummyStream.cpp",
+        "device3/Camera3SharedOutputStream.cpp",
+        "device3/StatusTracker.cpp",
+        "device3/Camera3BufferManager.cpp",
+        "device3/Camera3StreamSplitter.cpp",
+        "device3/DistortionMapper.cpp",
+        "gui/RingBufferConsumer.cpp",
+        "utils/CameraTraces.cpp",
+        "utils/AutoConditionLock.cpp",
+        "utils/TagMonitor.cpp",
+        "utils/LatencyHistogram.cpp",
+    ],
+
+    shared_libs: [
+        "libui",
+        "liblog",
+        "libutilscallstack",
+        "libutils",
+        "libbinder",
+        "libcutils",
+        "libmedia",
+        "libmediautils",
+        "libcamera_client",
+        "libcamera_metadata",
+        "libfmq",
+        "libgui",
+        "libhardware",
+        "libhidlbase",
+        "libhidltransport",
+        "libjpeg",
+        "libmemunreachable",
+        "android.hardware.camera.common@1.0",
+        "android.hardware.camera.provider@2.4",
+        "android.hardware.camera.device@1.0",
+        "android.hardware.camera.device@3.2",
+        "android.hardware.camera.device@3.3",
+        "android.hardware.camera.device@3.4",
+    ],
+
+    export_shared_lib_headers: [
+        "libbinder",
+        "libcamera_client",
+        "libfmq",
+    ],
+
+    include_dirs: [
+        "system/media/private/camera/include",
+        "frameworks/native/include/media/openmax",
+    ],
+
+    export_include_dirs: ["."],
+
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+
+}
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 96261ab..4cfecfd 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -14,91 +14,9 @@
 
 LOCAL_PATH:= $(call my-dir)
 
-#
-# libcameraservice
-#
-
 include $(CLEAR_VARS)
 
-# Camera service source
-
-LOCAL_SRC_FILES :=  \
-    CameraService.cpp \
-    CameraFlashlight.cpp \
-    common/Camera2ClientBase.cpp \
-    common/CameraDeviceBase.cpp \
-    common/CameraProviderManager.cpp \
-    common/FrameProcessorBase.cpp \
-    api1/CameraClient.cpp \
-    api1/Camera2Client.cpp \
-    api1/client2/Parameters.cpp \
-    api1/client2/FrameProcessor.cpp \
-    api1/client2/StreamingProcessor.cpp \
-    api1/client2/JpegProcessor.cpp \
-    api1/client2/CallbackProcessor.cpp \
-    api1/client2/JpegCompressor.cpp \
-    api1/client2/CaptureSequencer.cpp \
-    api1/client2/ZslProcessor.cpp \
-    api2/CameraDeviceClient.cpp \
-    device1/CameraHardwareInterface.cpp \
-    device3/Camera3Device.cpp \
-    device3/Camera3Stream.cpp \
-    device3/Camera3IOStreamBase.cpp \
-    device3/Camera3InputStream.cpp \
-    device3/Camera3OutputStream.cpp \
-    device3/Camera3DummyStream.cpp \
-    device3/Camera3SharedOutputStream.cpp \
-    device3/StatusTracker.cpp \
-    device3/Camera3BufferManager.cpp \
-    device3/Camera3StreamSplitter.cpp \
-    device3/DistortionMapper.cpp \
-    gui/RingBufferConsumer.cpp \
-    utils/CameraTraces.cpp \
-    utils/AutoConditionLock.cpp \
-    utils/TagMonitor.cpp \
-    utils/LatencyHistogram.cpp
-
-LOCAL_SHARED_LIBRARIES:= \
-    libui \
-    liblog \
-    libutilscallstack \
-    libutils \
-    libbinder \
-    libcutils \
-    libmedia \
-    libmediautils \
-    libcamera_client \
-    libcamera_metadata \
-    libfmq \
-    libgui \
-    libhardware \
-    libhidlbase \
-    libhidltransport \
-    libjpeg \
-    libmemunreachable \
-    android.hardware.camera.common@1.0 \
-    android.hardware.camera.provider@2.4 \
-    android.hardware.camera.device@1.0 \
-    android.hardware.camera.device@3.2 \
-    android.hardware.camera.device@3.3 \
-    android.hardware.camera.device@3.4
-
-LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder libcamera_client libfmq
-
-LOCAL_C_INCLUDES += \
-    system/media/private/camera/include \
-    frameworks/native/include/media/openmax
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := \
-    frameworks/av/services/camera/libcameraservice
-
-LOCAL_CFLAGS += -Wall -Wextra -Werror
-
-LOCAL_MODULE:= libcameraservice
-
-include $(BUILD_SHARED_LIBRARY)
-
-# Build tests too
+# Build tests
 
 include $(LOCAL_PATH)/tests/Android.mk
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c41de82..f9240db 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -582,7 +582,7 @@
 Status CameraService::makeClient(const sp<CameraService>& cameraService,
         const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
         int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
-        bool legacyMode, int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+        int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
         /*out*/sp<BasicClient>* client) {
 
     if (halVersion < 0 || halVersion == deviceVersion) {
@@ -594,7 +594,7 @@
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
                 *client = new CameraClient(cameraService, tmp, packageName,
                         api1CameraId, facing, clientPid, clientUid,
-                        getpid(), legacyMode);
+                        getpid());
             } else { // Camera2 API route
                 ALOGW("Camera using old HAL version: %d", deviceVersion);
                 return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
@@ -612,7 +612,7 @@
                 *client = new Camera2Client(cameraService, tmp, packageName,
                         cameraId, api1CameraId,
                         facing, clientPid, clientUid,
-                        servicePid, legacyMode);
+                        servicePid);
             } else { // Camera2 API route
                 sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
                         static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
@@ -636,7 +636,7 @@
             sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
             *client = new CameraClient(cameraService, tmp, packageName,
                     api1CameraId, facing, clientPid, clientUid,
-                    servicePid, legacyMode);
+                    servicePid);
         } else {
             // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
             ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
@@ -735,8 +735,7 @@
             sp<ICameraClient>{nullptr}, id, cameraId,
             static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED),
             internalPackageName, uid, USE_CALLING_PID,
-            API_1, /*legacyMode*/ false, /*shimUpdateOnly*/ true,
-            /*out*/ tmp)
+            API_1, /*shimUpdateOnly*/ true, /*out*/ tmp)
             ).isOk()) {
         ALOGE("%s: Error initializing shim metadata: %s", __FUNCTION__, ret.toString8().string());
     }
@@ -1200,8 +1199,7 @@
     sp<Client> client = nullptr;
     ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId,
             CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, clientPid, API_1,
-            /*legacyMode*/ false, /*shimUpdateOnly*/ false,
-            /*out*/client);
+            /*shimUpdateOnly*/ false, /*out*/client);
 
     if(!ret.isOk()) {
         logRejected(id, getCallingPid(), String8(clientPackageName),
@@ -1227,8 +1225,7 @@
     Status ret = Status::ok();
     sp<Client> client = nullptr;
     ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId, halVersion,
-            clientPackageName, clientUid, USE_CALLING_PID, API_1,
-            /*legacyMode*/ true, /*shimUpdateOnly*/ false,
+            clientPackageName, clientUid, USE_CALLING_PID, API_1, /*shimUpdateOnly*/ false,
             /*out*/client);
 
     if(!ret.isOk()) {
@@ -1256,9 +1253,7 @@
     ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
             /*api1CameraId*/-1,
             CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
-            clientUid, USE_CALLING_PID, API_2,
-            /*legacyMode*/ false, /*shimUpdateOnly*/ false,
-            /*out*/client);
+            clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, /*out*/client);
 
     if(!ret.isOk()) {
         logRejected(id, getCallingPid(), String8(clientPackageName),
@@ -1273,7 +1268,7 @@
 template<class CALLBACK, class CLIENT>
 Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
         int api1CameraId, int halVersion, const String16& clientPackageName, int clientUid,
-        int clientPid, apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+        int clientPid, apiLevel effectiveApiLevel, bool shimUpdateOnly,
         /*out*/sp<CLIENT>& device) {
     binder::Status ret = binder::Status::ok();
 
@@ -1358,7 +1353,7 @@
         sp<BasicClient> tmp = nullptr;
         if(!(ret = makeClient(this, cameraCb, clientPackageName,
                 cameraId, api1CameraId, facing,
-                clientPid, clientUid, getpid(), legacyMode,
+                clientPid, clientUid, getpid(),
                 halVersion, deviceVersion, effectiveApiLevel,
                 /*out*/&tmp)).isOk()) {
             return ret;
@@ -2434,7 +2429,8 @@
     return isUidActiveLocked(uid, callingPackage);
 }
 
-static const int kPollUidActiveTimeoutMillis = 50;
+static const int64_t kPollUidActiveTimeoutTotalMillis = 300;
+static const int64_t kPollUidActiveTimeoutMillis = 50;
 
 bool CameraService::UidPolicy::isUidActiveLocked(uid_t uid, String16 callingPackage) {
     // Non-app UIDs are considered always active
@@ -2462,7 +2458,8 @@
             // activity being resumed. The proper fix is very risky, so we temporary add
             // some polling which should happen pretty rarely anyway as the race is hard
             // to hit.
-            active = am.isUidActive(uid, callingPackage);
+            active = mActiveUids.find(uid) != mActiveUids.end();
+            if (!active) active = am.isUidActive(uid, callingPackage);
             if (active) {
                 break;
             }
@@ -2470,11 +2467,15 @@
                 startTimeMillis = uptimeMillis();
             }
             int64_t ellapsedTimeMillis = uptimeMillis() - startTimeMillis;
-            int64_t remainingTimeMillis = kPollUidActiveTimeoutMillis - ellapsedTimeMillis;
+            int64_t remainingTimeMillis = kPollUidActiveTimeoutTotalMillis - ellapsedTimeMillis;
             if (remainingTimeMillis <= 0) {
                 break;
             }
+            remainingTimeMillis = std::min(kPollUidActiveTimeoutMillis, remainingTimeMillis);
+
+            mUidLock.unlock();
             usleep(remainingTimeMillis * 1000);
+            mUidLock.lock();
         } while (true);
 
         if (active) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 8d4bcdb..e4a18d3 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -585,8 +585,7 @@
     template<class CALLBACK, class CLIENT>
     binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
             int api1CameraId, int halVersion, const String16& clientPackageName,
-            int clientUid, int clientPid,
-            apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+            int clientUid, int clientPid, apiLevel effectiveApiLevel, bool shimUpdateOnly,
             /*out*/sp<CLIENT>& device);
 
     // Lock guarding camera service state
@@ -844,7 +843,7 @@
     static binder::Status makeClient(const sp<CameraService>& cameraService,
             const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
             int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
-            bool legacyMode, int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+            int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
             /*out*/sp<BasicClient>* client);
 
     status_t checkCameraAccess(const String16& opPackageName);
diff --git a/services/camera/libcameraservice/TEST_MAPPING b/services/camera/libcameraservice/TEST_MAPPING
new file mode 100644
index 0000000..6fdac68
--- /dev/null
+++ b/services/camera/libcameraservice/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+  "presubmit": [
+    {
+       "name": "cameraservice_test"
+    }
+  ]
+}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 65faac9..bf18c48 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -54,8 +54,7 @@
         int cameraFacing,
         int clientPid,
         uid_t clientUid,
-        int servicePid,
-        bool legacyMode):
+        int servicePid):
         Camera2ClientBase(cameraService, cameraClient, clientPackageName,
                 cameraDeviceId, api1CameraId, cameraFacing,
                 clientPid, clientUid, servicePid),
@@ -65,8 +64,6 @@
 
     SharedParameters::Lock l(mParameters);
     l.mParameters.state = Parameters::DISCONNECTED;
-
-    mLegacyMode = legacyMode;
 }
 
 status_t Camera2Client::initialize(sp<CameraProviderManager> manager, const String8& monitorTags) {
@@ -102,7 +99,7 @@
     {
         SharedParameters::Lock l(mParameters);
 
-        res = l.mParameters.initialize(&(mDevice->info()), mDeviceVersion);
+        res = l.mParameters.initialize(mDevice.get(), mDeviceVersion);
         if (res != OK) {
             ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
                     __FUNCTION__, mCameraId, strerror(-res), res);
@@ -250,6 +247,7 @@
     switch (p.sceneMode) {
         case ANDROID_CONTROL_SCENE_MODE_DISABLED:
             result.append("AUTO\n"); break;
+        CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY)
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION)
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT)
         CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_LANDSCAPE)
@@ -455,8 +453,6 @@
 
     mDevice->disconnect();
 
-    mDevice.clear();
-
     CameraService::Client::disconnect();
 
     return res;
@@ -779,33 +775,35 @@
     int lastJpegStreamId = mJpegProcessor->getStreamId();
     // If jpeg stream will slow down preview, make sure we remove it before starting preview
     if (params.slowJpegMode) {
-        // Pause preview if we are streaming
-        int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
-        if (activeRequestId != 0) {
-            res = mStreamingProcessor->togglePauseStream(/*pause*/true);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
+        if (lastJpegStreamId != NO_STREAM) {
+            // Pause preview if we are streaming
+            int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
+            if (activeRequestId != 0) {
+                res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+                            __FUNCTION__, mCameraId, strerror(-res), res);
+                }
+                res = mDevice->waitUntilDrained();
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                            __FUNCTION__, mCameraId, strerror(-res), res);
+                }
             }
-            res = mDevice->waitUntilDrained();
+
+            res = mJpegProcessor->deleteStream();
+
             if (res != OK) {
-                ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
+                ALOGE("%s: Camera %d: delete Jpeg stream failed: %s (%d)",
+                        __FUNCTION__, mCameraId,  strerror(-res), res);
             }
-        }
 
-        res = mJpegProcessor->deleteStream();
-
-        if (res != OK) {
-            ALOGE("%s: Camera %d: delete Jpeg stream failed: %s (%d)",
-                    __FUNCTION__, mCameraId,  strerror(-res), res);
-        }
-
-        if (activeRequestId != 0) {
-            res = mStreamingProcessor->togglePauseStream(/*pause*/false);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
+            if (activeRequestId != 0) {
+                res = mStreamingProcessor->togglePauseStream(/*pause*/false);
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
+                            __FUNCTION__, mCameraId, strerror(-res), res);
+                }
             }
         }
     } else {
@@ -1440,7 +1438,7 @@
     return OK;
 }
 
-status_t Camera2Client::takePicture(int msgType) {
+status_t Camera2Client::takePicture(int /*msgType*/) {
     ATRACE_CALL();
     Mutex::Autolock icl(mBinderSerializationLock);
     status_t res;
@@ -1539,7 +1537,7 @@
     // Need HAL to have correct settings before (possibly) triggering precapture
     syncWithDevice();
 
-    res = mCaptureSequencer->startCapture(msgType);
+    res = mCaptureSequencer->startCapture();
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to start capture: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
@@ -1659,27 +1657,6 @@
         return OK;
     }
 
-    // the camera2 api legacy mode can unconditionally disable the shutter sound
-    if (mLegacyMode) {
-        ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
-        l.mParameters.playShutterSound = false;
-        return OK;
-    }
-
-    // Disabling shutter sound may not be allowed. In that case only
-    // allow the mediaserver process to disable the sound.
-    char value[PROPERTY_VALUE_MAX];
-    property_get("ro.camera.sound.forced", value, "0");
-    if (strncmp(value, "0", 2) != 0) {
-        // Disabling shutter sound is not allowed. Deny if the current
-        // process is not mediaserver.
-        if (getCallingPid() != getpid()) {
-            ALOGE("Failed to disable shutter sound. Permission denied (pid %d)",
-                    getCallingPid());
-            return PERMISSION_DENIED;
-        }
-    }
-
     l.mParameters.playShutterSound = false;
     return OK;
 }
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 44929c3..a9ea271 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -96,8 +96,7 @@
             int cameraFacing,
             int clientPid,
             uid_t clientUid,
-            int servicePid,
-            bool legacyMode);
+            int servicePid);
 
     virtual ~Camera2Client();
 
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index f1203f9..ce44efe 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -40,7 +40,7 @@
         const String16& clientPackageName,
         int cameraId, int cameraFacing,
         int clientPid, int clientUid,
-        int servicePid, bool legacyMode):
+        int servicePid):
         Client(cameraService, cameraClient, clientPackageName,
                 String8::format("%d", cameraId), cameraId, cameraFacing, clientPid,
                 clientUid, servicePid)
@@ -57,7 +57,6 @@
     // Callback is disabled by default
     mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
     mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
-    mLegacyMode = legacyMode;
     mPlayShutterSound = true;
     LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
 }
@@ -715,26 +714,6 @@
         return OK;
     }
 
-    // the camera2 api legacy mode can unconditionally disable the shutter sound
-    if (mLegacyMode) {
-        ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
-        mPlayShutterSound = false;
-        return OK;
-    }
-
-    // Disabling shutter sound may not be allowed. In that case only
-    // allow the mediaserver process to disable the sound.
-    char value[PROPERTY_VALUE_MAX];
-    property_get("ro.camera.sound.forced", value, "0");
-    if (strcmp(value, "0") != 0) {
-        // Disabling shutter sound is not allowed. Deny if the current
-        // process is not mediaserver.
-        if (getCallingPid() != getpid()) {
-            ALOGE("Failed to disable shutter sound. Permission denied (pid %d)", getCallingPid());
-            return PERMISSION_DENIED;
-        }
-    }
-
     mPlayShutterSound = false;
     return OK;
 }
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 1910536..9530b6c 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -68,8 +68,7 @@
             int cameraFacing,
             int clientPid,
             int clientUid,
-            int servicePid,
-            bool legacyMode = false);
+            int servicePid);
     ~CameraClient();
 
     virtual status_t initialize(sp<CameraProviderManager> manager,
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 1ee216f..f42cdd3 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -50,8 +50,7 @@
         mStateTransitionCount(0),
         mTriggerId(0),
         mTimeoutCount(0),
-        mCaptureId(Camera2Client::kCaptureRequestIdStart),
-        mMsgType(0) {
+        mCaptureId(Camera2Client::kCaptureRequestIdStart) {
     ALOGV("%s", __FUNCTION__);
 }
 
@@ -64,7 +63,7 @@
     mZslProcessor = processor;
 }
 
-status_t CaptureSequencer::startCapture(int msgType) {
+status_t CaptureSequencer::startCapture() {
     ALOGV("%s", __FUNCTION__);
     ATRACE_CALL();
     Mutex::Autolock l(mInputMutex);
@@ -73,7 +72,6 @@
         return INVALID_OPERATION;
     }
     if (!mStartCapture) {
-        mMsgType = msgType;
         mStartCapture = true;
         mStartCaptureSignal.signal();
     }
@@ -386,7 +384,7 @@
 
     SharedParameters::Lock l(client->getParameters());
     /* warning: this also locks a SharedCameraCallbacks */
-    shutterNotifyLocked(l.mParameters, client, mMsgType);
+    shutterNotifyLocked(l.mParameters, client);
     mShutterNotified = true;
     mTimeoutCount = kMaxTimeoutsForCaptureEnd;
     return STANDARD_CAPTURE_WAIT;
@@ -610,7 +608,7 @@
         if (!mShutterNotified) {
             SharedParameters::Lock l(client->getParameters());
             /* warning: this also locks a SharedCameraCallbacks */
-            shutterNotifyLocked(l.mParameters, client, mMsgType);
+            shutterNotifyLocked(l.mParameters, client);
             mShutterNotified = true;
         }
     } else if (mTimeoutCount <= 0) {
@@ -715,12 +713,11 @@
 }
 
 /*static*/ void CaptureSequencer::shutterNotifyLocked(const Parameters &params,
-            const sp<Camera2Client>& client, int msgType) {
+            const sp<Camera2Client>& client) {
     ATRACE_CALL();
 
     if (params.state == Parameters::STILL_CAPTURE
-        && params.playShutterSound
-        && (msgType & CAMERA_MSG_SHUTTER)) {
+        && params.playShutterSound) {
         client->getCameraService()->playSound(CameraService::SOUND_SHUTTER);
     }
 
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index f2e3750..c23b12d 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -51,7 +51,7 @@
     void setZslProcessor(const wp<ZslProcessor>& processor);
 
     // Begin still image capture
-    status_t startCapture(int msgType);
+    status_t startCapture();
 
     // Wait until current image capture completes; returns immediately if no
     // capture is active. Returns TIMED_OUT if capture does not complete during
@@ -145,7 +145,6 @@
     bool mAeInPrecapture;
 
     int32_t mCaptureId;
-    int mMsgType;
 
     // Main internal methods
 
@@ -172,7 +171,7 @@
 
     // Emit Shutter/Raw callback to java, and maybe play a shutter sound
     static void shutterNotifyLocked(const Parameters &params,
-            const sp<Camera2Client>& client, int msgType);
+            const sp<Camera2Client>& client);
 };
 
 }; // namespace camera2
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index d66dec4..28d186a 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -41,23 +41,29 @@
         int cameraFacing) :
         cameraId(cameraId),
         cameraFacing(cameraFacing),
-        info(NULL) {
+        info(NULL),
+        mDefaultSceneMode(ANDROID_CONTROL_SCENE_MODE_DISABLED) {
 }
 
 Parameters::~Parameters() {
 }
 
-status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) {
+status_t Parameters::initialize(CameraDeviceBase *device, int deviceVersion) {
     status_t res;
+    if (device == nullptr) {
+        ALOGE("%s: device is null!", __FUNCTION__);
+        return BAD_VALUE;
+    }
 
-    if (info->entryCount() == 0) {
+    const CameraMetadata& info = device->info();
+    if (info.entryCount() == 0) {
         ALOGE("%s: No static information provided!", __FUNCTION__);
         return BAD_VALUE;
     }
-    Parameters::info = info;
+    Parameters::info = &info;
     mDeviceVersion = deviceVersion;
 
-    res = buildFastInfo();
+    res = buildFastInfo(device);
     if (res != OK) return res;
 
     res = buildQuirks();
@@ -557,6 +563,10 @@
                     noSceneModes = true;
                     break;
                 case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
+                    // Face priority can be used as alternate default if supported.
+                    // Per API contract it shouldn't override the user set flash,
+                    // white balance and focus modes.
+                    mDefaultSceneMode = availableSceneModes.data.u8[i];
                     // Not in old API
                     addComma = false;
                     break;
@@ -761,17 +771,7 @@
     focusingAreas.clear();
     focusingAreas.add(Parameters::Area(0,0,0,0,0));
 
-    if (fastInfo.isExternalCamera) {
-        params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, -1.0);
-    } else {
-        camera_metadata_ro_entry_t availableFocalLengths =
-            staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
-        if (!availableFocalLengths.count) return NO_INIT;
-
-        float minFocalLength = availableFocalLengths.data.f[0];
-        params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength);
-    }
-
+    params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, fastInfo.defaultFocalLength);
 
     float horizFov, vertFov;
     res = calculatePictureFovs(&horizFov, &vertFov);
@@ -993,7 +993,7 @@
     return paramsFlattened;
 }
 
-status_t Parameters::buildFastInfo() {
+status_t Parameters::buildFastInfo(CameraDeviceBase *device) {
 
     camera_metadata_ro_entry_t activeArraySize =
         staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4);
@@ -1109,20 +1109,12 @@
             focusDistanceCalibration.data.u8[0] !=
             ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED);
 
-
-    camera_metadata_ro_entry_t hwLevel = staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
-    if (!hwLevel.count) return NO_INIT;
-    fastInfo.isExternalCamera =
-            hwLevel.data.u8[0] == ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL;
-
-    camera_metadata_ro_entry_t availableFocalLengths =
-        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, /*required*/false);
-    if (!availableFocalLengths.count && !fastInfo.isExternalCamera) return NO_INIT;
+    res = getDefaultFocalLength(device);
+    if (res != OK) return res;
 
     SortedVector<int32_t> availableFormats = getAvailableOutputFormats();
     if (!availableFormats.size()) return NO_INIT;
 
-
     if (sceneModeOverrides.count > 0) {
         // sceneModeOverrides is defined to have 3 entries for each scene mode,
         // which are AE, AWB, and AF override modes the HAL wants for that scene
@@ -1200,19 +1192,6 @@
     fastInfo.bestFaceDetectMode = bestFaceDetectMode;
     fastInfo.maxFaces = maxFaces;
 
-    // Find smallest (widest-angle) focal length to use as basis of still
-    // picture FOV reporting.
-    if (fastInfo.isExternalCamera) {
-        fastInfo.minFocalLength = -1.0;
-    } else {
-        fastInfo.minFocalLength = availableFocalLengths.data.f[0];
-        for (size_t i = 1; i < availableFocalLengths.count; i++) {
-            if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) {
-                fastInfo.minFocalLength = availableFocalLengths.data.f[i];
-            }
-        }
-    }
-
     // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888
     fastInfo.useFlexibleYuv = false;
     for (size_t i = 0; i < availableFormats.size(); i++) {
@@ -1760,7 +1739,7 @@
 
     // SCENE_MODE
     validatedParams.sceneMode = sceneModeStringToEnum(
-        newParams.get(CameraParameters::KEY_SCENE_MODE) );
+        newParams.get(CameraParameters::KEY_SCENE_MODE), mDefaultSceneMode);
     if (validatedParams.sceneMode != sceneMode &&
             validatedParams.sceneMode !=
             ANDROID_CONTROL_SCENE_MODE_DISABLED) {
@@ -1778,7 +1757,7 @@
         }
     }
     bool sceneModeSet =
-            validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED;
+            validatedParams.sceneMode != mDefaultSceneMode;
 
     // FLASH_MODE
     if (sceneModeSet) {
@@ -2157,7 +2136,7 @@
     uint8_t reqSceneMode =
             sceneModeActive ? sceneMode :
             enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY :
-            (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED;
+            mDefaultSceneMode;
     res = request->update(ANDROID_CONTROL_SCENE_MODE,
             &reqSceneMode, 1);
     if (res != OK) return res;
@@ -2446,6 +2425,50 @@
     return true;
 }
 
+status_t Parameters::getDefaultFocalLength(CameraDeviceBase *device) {
+    if (device == nullptr) {
+        ALOGE("%s: Camera device is nullptr", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    camera_metadata_ro_entry_t hwLevel = staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
+    if (!hwLevel.count) return NO_INIT;
+    fastInfo.isExternalCamera =
+            hwLevel.data.u8[0] == ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL;
+
+    camera_metadata_ro_entry_t availableFocalLengths =
+        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, /*required*/false);
+    if (!availableFocalLengths.count && !fastInfo.isExternalCamera) return NO_INIT;
+
+    // Find focal length in PREVIEW template to use as default focal length.
+    if (fastInfo.isExternalCamera) {
+        fastInfo.defaultFocalLength = -1.0;
+    } else {
+        // Find smallest (widest-angle) focal length to use as basis of still
+        // picture FOV reporting.
+        fastInfo.defaultFocalLength = availableFocalLengths.data.f[0];
+        for (size_t i = 1; i < availableFocalLengths.count; i++) {
+            if (fastInfo.defaultFocalLength > availableFocalLengths.data.f[i]) {
+                fastInfo.defaultFocalLength = availableFocalLengths.data.f[i];
+            }
+        }
+
+        // Use focal length in preview template if it exists
+        CameraMetadata previewTemplate;
+        status_t res = device->createDefaultRequest(CAMERA3_TEMPLATE_PREVIEW, &previewTemplate);
+        if (res != OK) {
+            ALOGE("%s: Failed to create default PREVIEW request: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+        camera_metadata_entry entry = previewTemplate.find(ANDROID_LENS_FOCAL_LENGTH);
+        if (entry.count != 0) {
+            fastInfo.defaultFocalLength = entry.data.f[0];
+        }
+    }
+    return OK;
+}
+
 const char* Parameters::getStateName(State state) {
 #define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
     switch(state) {
@@ -2589,12 +2612,12 @@
         -1;
 }
 
-int Parameters::sceneModeStringToEnum(const char *sceneMode) {
+int Parameters::sceneModeStringToEnum(const char *sceneMode, uint8_t defaultSceneMode) {
     return
         !sceneMode ?
-            ANDROID_CONTROL_SCENE_MODE_DISABLED :
+            defaultSceneMode :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ?
-            ANDROID_CONTROL_SCENE_MODE_DISABLED :
+            defaultSceneMode :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ?
             ANDROID_CONTROL_SCENE_MODE_ACTION :
         !strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ?
@@ -3241,12 +3264,12 @@
     if (horizFov != NULL) {
         *horizFov = 180 / M_PI * 2 *
                 atanf(horizCropFactor * sensorSize.data.f[0] /
-                        (2 * fastInfo.minFocalLength));
+                        (2 * fastInfo.defaultFocalLength));
     }
     if (vertFov != NULL) {
         *vertFov = 180 / M_PI * 2 *
                 atanf(vertCropFactor * sensorSize.data.f[1] /
-                        (2 * fastInfo.minFocalLength));
+                        (2 * fastInfo.defaultFocalLength));
     }
     return OK;
 }
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 97f8ea7..42e7a47 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -30,6 +30,8 @@
 #include <camera/CameraParameters2.h>
 #include <camera/CameraMetadata.h>
 
+#include "common/CameraDeviceBase.h"
+
 namespace android {
 namespace camera2 {
 
@@ -241,7 +243,7 @@
         };
         DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides;
         bool isExternalCamera;
-        float minFocalLength;
+        float defaultFocalLength;
         bool useFlexibleYuv;
         Size maxJpegSize;
         Size maxZslSize;
@@ -264,10 +266,10 @@
     ~Parameters();
 
     // Sets up default parameters
-    status_t initialize(const CameraMetadata *info, int deviceVersion);
+    status_t initialize(CameraDeviceBase *device, int deviceVersion);
 
     // Build fast-access device static info from static info
-    status_t buildFastInfo();
+    status_t buildFastInfo(CameraDeviceBase *device);
     // Query for quirks from static info
     status_t buildQuirks();
 
@@ -300,6 +302,9 @@
     // whether zero shutter lag should be used for non-recording operation
     bool useZeroShutterLag() const;
 
+    // Get default focal length
+    status_t getDefaultFocalLength(CameraDeviceBase *camera);
+
     // Calculate the crop region rectangle, either tightly about the preview
     // resolution, or a region just based on the active array; both take
     // into account the current zoom level.
@@ -326,7 +331,7 @@
     static const char* wbModeEnumToString(uint8_t wbMode);
     static int effectModeStringToEnum(const char *effectMode);
     static int abModeStringToEnum(const char *abMode);
-    static int sceneModeStringToEnum(const char *sceneMode);
+    static int sceneModeStringToEnum(const char *sceneMode, uint8_t defaultScene);
     static flashMode_t flashModeStringToEnum(const char *flashMode);
     static const char* flashModeEnumToString(flashMode_t flashMode);
     static focusMode_t focusModeStringToEnum(const char *focusMode);
@@ -434,6 +439,7 @@
     Size getMaxSize(const Vector<Size>& sizes);
 
     int mDeviceVersion;
+    uint8_t mDefaultSceneMode;
 };
 
 // This class encapsulates the Parameters class so that it can only be accessed
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 98d0534..84428c2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -661,7 +661,8 @@
         }
 
         sp<Surface> surface;
-        res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer);
+        res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
+                physicalCameraId);
 
         if (!res.isOk())
             return res;
@@ -889,6 +890,8 @@
 
     const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
             outputConfiguration.getGraphicBufferProducers();
+    String8 physicalCameraId(outputConfiguration.getPhysicalCameraId());
+
     auto producerCount = bufferProducers.size();
     if (producerCount == 0) {
         ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -942,7 +945,7 @@
         OutputStreamInfo outInfo;
         sp<Surface> surface;
         res = createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false, surface,
-                newOutputsMap.valueAt(i));
+                newOutputsMap.valueAt(i), physicalCameraId);
         if (!res.isOk())
             return res;
 
@@ -1021,7 +1024,8 @@
 
 binder::Status CameraDeviceClient::createSurfaceFromGbp(
         OutputStreamInfo& streamInfo, bool isStreamInfoValid,
-        sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp) {
+        sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+        const String8& physicalId) {
 
     // bufferProducer must be non-null
     if (gbp == nullptr) {
@@ -1098,7 +1102,7 @@
     // Round dimensions to the nearest dimensions available for this format
     if (flexibleConsumer && isPublicFormat(format) &&
             !CameraDeviceClient::roundBufferDimensionNearest(width, height,
-            format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+            format, dataSpace, mDevice->info(physicalId), /*out*/&width, /*out*/&height)) {
         String8 msg = String8::format("Camera %s: No supported stream configurations with "
                 "format %#x defined, failed to create output stream",
                 mCameraIdStr.string(), format);
@@ -1468,6 +1472,7 @@
 
     const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
             outputConfiguration.getGraphicBufferProducers();
+    String8 physicalId(outputConfiguration.getPhysicalCameraId());
 
     if (bufferProducers.size() == 0) {
         ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -1521,7 +1526,7 @@
 
         sp<Surface> surface;
         res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
-                surface, bufferProducer);
+                surface, bufferProducer, physicalId);
 
         if (!res.isOk())
             return res;
@@ -1677,9 +1682,12 @@
 
     // WORKAROUND: HAL refuses to disconnect while there's streams in flight
     {
-        mDevice->clearStreamingRequest();
-
+        int64_t lastFrameNumber;
         status_t code;
+        if ((code = mDevice->flush(&lastFrameNumber)) != OK) {
+            ALOGE("%s: flush failed with code 0x%x", __FUNCTION__, code);
+        }
+
         if ((code = mDevice->waitUntilDrained()) != OK) {
             ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
                   code);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 5aaf5aa..c30561d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -258,7 +258,8 @@
     // Create a Surface from an IGraphicBufferProducer. Returns error if
     // IGraphicBufferProducer's property doesn't match with streamInfo
     binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
-            sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp);
+            sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+            const String8& physicalCameraId);
 
 
     // Utility method to insert the surface into SurfaceMap
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ce006a7..aeea473 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -57,13 +57,13 @@
                 cameraId, api1CameraId, cameraFacing, clientPid, clientUid, servicePid),
         mSharedCameraCallbacks(remoteCallback),
         mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
+        mDevice(new Camera3Device(cameraId)),
         mDeviceActive(false), mApi1CameraId(api1CameraId)
 {
     ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
             String8(clientPackageName).string(), clientPid, clientUid);
 
     mInitialClientPid = clientPid;
-    mDevice = new Camera3Device(cameraId);
     LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
 }
 
@@ -206,8 +206,6 @@
     if (mDevice == 0) return;
     mDevice->disconnect();
 
-    mDevice.clear();
-
     ALOGV("Camera %s: Detach complete", TClientBase::mCameraIdStr.string());
 }
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index e74fbdf..6693847 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -130,7 +130,10 @@
     /** CameraDeviceBase instance wrapping HAL3+ entry */
 
     const int mDeviceVersion;
-    sp<CameraDeviceBase>  mDevice;
+
+    // Set to const to avoid mDevice being updated (update of sp<> is racy) during
+    // dumpDevice (which is important to be lock free for debugging purpose)
+    const sp<CameraDeviceBase>  mDevice;
 
     /** Utility members */
 
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 0ba7403..98c1b5e 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -69,6 +69,10 @@
      * The device's static characteristics metadata buffer
      */
     virtual const CameraMetadata& info() const = 0;
+    /**
+     * The physical camera device's static characteristics metadata buffer
+     */
+    virtual const CameraMetadata& info(const String8& physicalId) const = 0;
 
     struct PhysicalCameraSettings {
         std::string cameraId;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 43f1a91..3be6399 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -336,6 +336,7 @@
         const hardware::hidl_string& /*fqName*/,
         const hardware::hidl_string& name,
         bool /*preexisting*/) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
     {
         std::lock_guard<std::mutex> lock(mInterfaceMutex);
 
@@ -458,6 +459,7 @@
 }
 
 status_t CameraProviderManager::removeProvider(const std::string& provider) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
     std::unique_lock<std::mutex> lock(mInterfaceMutex);
     std::vector<String8> removedDeviceIds;
     status_t res = NAME_NOT_FOUND;
@@ -631,7 +633,12 @@
 
     mUniqueCameraIds.insert(id);
     if (isAPI1Compatible) {
-        mUniqueAPI1CompatibleCameraIds.push_back(id);
+        // addDevice can be called more than once for the same camera id if HAL
+        // supports openLegacy.
+        if (std::find(mUniqueAPI1CompatibleCameraIds.begin(), mUniqueAPI1CompatibleCameraIds.end(),
+                id) == mUniqueAPI1CompatibleCameraIds.end()) {
+            mUniqueAPI1CompatibleCameraIds.push_back(id);
+        }
     }
 
     if (parsedId != nullptr) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index b8b8b8c..c523c2d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -246,6 +246,9 @@
     wp<StatusListener> mListener;
     ServiceInteractionProxy* mServiceProxy;
 
+    // mProviderLifecycleLock is locked during onRegistration and removeProvider
+    mutable std::mutex mProviderLifecycleLock;
+
     static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
 
     struct ProviderInfo :
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 543914e..7656407 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -90,7 +90,7 @@
 {
     ATRACE_CALL();
     ALOGV("%s: Tearing down for camera id %s", __FUNCTION__, mId.string());
-    disconnect();
+    disconnectImpl();
 }
 
 const String8& Camera3Device::getId() const {
@@ -121,11 +121,25 @@
 
     res = manager->getCameraCharacteristics(mId.string(), &mDeviceInfo);
     if (res != OK) {
-        SET_ERR_L("Could not retrive camera characteristics: %s (%d)", strerror(-res), res);
+        SET_ERR_L("Could not retrieve camera characteristics: %s (%d)", strerror(-res), res);
         session->close();
         return res;
     }
 
+    std::vector<std::string> physicalCameraIds;
+    bool isLogical = CameraProviderManager::isLogicalCamera(mDeviceInfo, &physicalCameraIds);
+    if (isLogical) {
+        for (auto& physicalId : physicalCameraIds) {
+            res = manager->getCameraCharacteristics(physicalId, &mPhysicalDeviceInfoMap[physicalId]);
+            if (res != OK) {
+                SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)",
+                        physicalId.c_str(), strerror(-res), res);
+                session->close();
+                return res;
+            }
+        }
+    }
+
     std::shared_ptr<RequestMetadataQueue> queue;
     auto requestQueueRet = session->getCaptureRequestMetadataQueue(
         [&queue](const auto& descriptor) {
@@ -261,8 +275,13 @@
 }
 
 status_t Camera3Device::disconnect() {
+    return disconnectImpl();
+}
+
+status_t Camera3Device::disconnectImpl() {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock stLock(mTrackerLock);
 
     ALOGI("%s: E", __FUNCTION__);
 
@@ -719,7 +738,7 @@
     return OK;
 }
 
-const CameraMetadata& Camera3Device::info() const {
+const CameraMetadata& Camera3Device::info(const String8& physicalId) const {
     ALOGVV("%s: E", __FUNCTION__);
     if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
                     mStatus == STATUS_ERROR)) {
@@ -727,7 +746,22 @@
                 mStatus == STATUS_ERROR ?
                 "when in error state" : "before init");
     }
-    return mDeviceInfo;
+    if (physicalId.isEmpty()) {
+        return mDeviceInfo;
+    } else {
+        std::string id(physicalId.c_str());
+        if (mPhysicalDeviceInfoMap.find(id) != mPhysicalDeviceInfoMap.end()) {
+            return mPhysicalDeviceInfoMap.at(id);
+        } else {
+            ALOGE("%s: Invalid physical camera id %s", __FUNCTION__, physicalId.c_str());
+            return mDeviceInfo;
+        }
+    }
+}
+
+const CameraMetadata& Camera3Device::info() const {
+    String8 emptyId;
+    return info(emptyId);
 }
 
 status_t Camera3Device::checkStatusOkToCaptureLocked() {
@@ -2143,8 +2177,14 @@
 
         res = stream->finishConfiguration();
         if (res != OK) {
-            SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
-                   stream->getId(), strerror(-res), res);
+            // If finishConfiguration fails due to abandoned surface, do not set
+            // device to error state.
+            bool isSurfaceAbandoned =
+                    (res == NO_INIT || res == DEAD_OBJECT) && stream->isAbandoned();
+            if (!isSurfaceAbandoned) {
+                SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+                        stream->getId(), strerror(-res), res);
+            }
             return res;
         }
     }
@@ -2361,9 +2401,16 @@
             //present streams end up with outstanding buffers that will
             //not get drained.
             internalUpdateStatusLocked(STATUS_ACTIVE);
+        } else if (rc == DEAD_OBJECT) {
+            // DEAD_OBJECT can be returned if either the consumer surface is
+            // abandoned, or the HAL has died.
+            // - If the HAL has died, configureStreamsLocked call will set
+            // device to error state,
+            // - If surface is abandoned, we should not set device to error
+            // state.
+            ALOGE("Failed to re-configure camera due to abandoned surface");
         } else {
-            setErrorStateLocked("%s: Failed to re-configure camera: %d",
-                    __FUNCTION__, rc);
+            SET_ERR_L("Failed to re-configure camera: %d", rc);
         }
     } else {
         ALOGE("%s: Failed to pause streaming: %d", __FUNCTION__, rc);
@@ -2497,6 +2544,9 @@
             CLOGE("Can't finish configuring input stream %d: %s (%d)",
                     mInputStream->getId(), strerror(-res), res);
             cancelStreamsConfigurationLocked();
+            if ((res == NO_INIT || res == DEAD_OBJECT) && mInputStream->isAbandoned()) {
+                return DEAD_OBJECT;
+            }
             return BAD_VALUE;
         }
     }
@@ -2510,6 +2560,9 @@
                 CLOGE("Can't finish configuring output stream %d: %s (%d)",
                         outputStream->getId(), strerror(-res), res);
                 cancelStreamsConfigurationLocked();
+                if ((res == NO_INIT || res == DEAD_OBJECT) && outputStream->isAbandoned()) {
+                    return DEAD_OBJECT;
+                }
                 return BAD_VALUE;
             }
         }
@@ -2689,18 +2742,19 @@
 status_t Camera3Device::registerInFlight(uint32_t frameNumber,
         int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
         bool hasAppCallback, nsecs_t maxExpectedDuration,
-        std::set<String8>& physicalCameraIds) {
+        std::set<String8>& physicalCameraIds, bool isStillCapture) {
     ATRACE_CALL();
     Mutex::Autolock l(mInFlightLock);
 
     ssize_t res;
     res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
-            hasAppCallback, maxExpectedDuration, physicalCameraIds));
+            hasAppCallback, maxExpectedDuration, physicalCameraIds, isStillCapture));
     if (res < 0) return res;
 
     if (mInFlightMap.size() == 1) {
-        // hold mLock to prevent race with disconnect
-        Mutex::Autolock l(mLock);
+        // Hold a separate dedicated tracker lock to prevent race with disconnect and also
+        // avoid a deadlock during reprocess requests.
+        Mutex::Autolock l(mTrackerLock);
         if (mStatusTracker != nullptr) {
             mStatusTracker->markComponentActive(mInFlightStatusId);
         }
@@ -2733,8 +2787,9 @@
 
     // Indicate idle inFlightMap to the status tracker
     if (mInFlightMap.size() == 0) {
-        // hold mLock to prevent race with disconnect
-        Mutex::Autolock l(mLock);
+        // Hold a separate dedicated tracker lock to prevent race with disconnect and also
+        // avoid a deadlock during reprocess requests.
+        Mutex::Autolock l(mTrackerLock);
         if (mStatusTracker != nullptr) {
             mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
         }
@@ -2759,6 +2814,10 @@
     if (request.numBuffersLeft == 0 &&
             (request.skipResultMetadata ||
             (request.haveResultMetadata && shutterTimestamp != 0))) {
+        if (request.stillCapture) {
+            ATRACE_ASYNC_END("still capture", frameNumber);
+        }
+
         ATRACE_ASYNC_END("frame capture", frameNumber);
 
         // Sanity check - if sensor timestamp matches shutter timestamp in the
@@ -3889,18 +3948,17 @@
     }
 
     hardware::details::return_status err;
+    auto resultCallback =
+        [&status, &numRequestProcessed] (auto s, uint32_t n) {
+                status = s;
+                *numRequestProcessed = n;
+        };
     if (hidlSession_3_4 != nullptr) {
         err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
-            [&status, &numRequestProcessed] (auto s, uint32_t n) {
-                status = s;
-                *numRequestProcessed = n;
-            });
+                                                         resultCallback);
     } else {
         err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
-            [&status, &numRequestProcessed] (auto s, uint32_t n) {
-                status = s;
-                *numRequestProcessed = n;
-            });
+                                                  resultCallback);
     }
     if (!err.isOk()) {
         ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
@@ -4710,6 +4768,7 @@
 status_t Camera3Device::RequestThread::prepareHalRequests() {
     ATRACE_CALL();
 
+    bool batchedRequest = mNextRequests[0].captureRequest->mBatchSize > 1;
     for (size_t i = 0; i < mNextRequests.size(); i++) {
         auto& nextRequest = mNextRequests.editItemAt(i);
         sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
@@ -4733,7 +4792,10 @@
         mPrevTriggers = triggerCount;
 
         // If the request is the same as last, or we had triggers last time
-        bool newRequest = mPrevRequest != captureRequest || triggersMixedIn;
+        bool newRequest = (mPrevRequest != captureRequest || triggersMixedIn) &&
+                // Request settings are all the same within one batch, so only treat the first
+                // request in a batch as new
+                !(batchedRequest && i > 0);
         if (newRequest) {
             /**
              * HAL workaround:
@@ -4882,15 +4944,24 @@
         // preview), and the current request is not the last one in the batch,
         // do not send callback to the app.
         bool hasCallback = true;
-        if (mNextRequests[0].captureRequest->mBatchSize > 1 && i != mNextRequests.size()-1) {
+        if (batchedRequest && i != mNextRequests.size()-1) {
             hasCallback = false;
         }
+        bool isStillCapture = false;
+        if (!mNextRequests[0].captureRequest->mSettingsList.begin()->metadata.isEmpty()) {
+            camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
+            find_camera_metadata_ro_entry(halRequest->settings, ANDROID_CONTROL_CAPTURE_INTENT, &e);
+            if ((e.count > 0) && (e.data.u8[0] == ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE)) {
+                isStillCapture = true;
+                ATRACE_ASYNC_BEGIN("still capture", mNextRequests[i].halRequest.frame_number);
+            }
+        }
         res = parent->registerInFlight(halRequest->frame_number,
                 totalNumBuffers, captureRequest->mResultExtras,
                 /*hasInput*/halRequest->input_buffer != NULL,
                 hasCallback,
                 calculateMaxExpectedDuration(halRequest->settings),
-                requestedPhysicalCameras);
+                requestedPhysicalCameras, isStillCapture);
         ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
                ", burstId = %" PRId32 ".",
                 __FUNCTION__,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d8fe19f..85f9614 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -101,6 +101,7 @@
     status_t disconnect() override;
     status_t dump(int fd, const Vector<String16> &args) override;
     const CameraMetadata& info() const override;
+    const CameraMetadata& info(const String8& physicalId) const override;
 
     // Capture and setStreamingRequest will configure streams if currently in
     // idle state
@@ -209,6 +210,8 @@
 
   private:
 
+    status_t disconnectImpl();
+
     // internal typedefs
     using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
     using ResultMetadataQueue  = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
@@ -379,6 +382,7 @@
     sp<HalInterface> mInterface;
 
     CameraMetadata             mDeviceInfo;
+    std::unordered_map<std::string, CameraMetadata> mPhysicalDeviceInfoMap;
 
     CameraMetadata             mRequestTemplateCache[CAMERA3_TEMPLATE_COUNT];
 
@@ -992,6 +996,9 @@
         // Map of physicalCameraId <-> Metadata
         std::vector<PhysicalCaptureResultInfo> physicalMetadatas;
 
+        // Indicates a still capture request.
+        bool stillCapture;
+
         // Default constructor needed by KeyedVector
         InFlightRequest() :
                 shutterTimestamp(0),
@@ -1002,12 +1009,13 @@
                 hasInputBuffer(false),
                 hasCallback(true),
                 maxExpectedDuration(kDefaultExpectedDuration),
-                skipResultMetadata(false) {
+                skipResultMetadata(false),
+                stillCapture(false) {
         }
 
         InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
                 bool hasAppCallback, nsecs_t maxDuration,
-                const std::set<String8>& physicalCameraIdSet) :
+                const std::set<String8>& physicalCameraIdSet, bool isStillCapture) :
                 shutterTimestamp(0),
                 sensorTimestamp(0),
                 requestStatus(OK),
@@ -1018,7 +1026,8 @@
                 hasCallback(hasAppCallback),
                 maxExpectedDuration(maxDuration),
                 skipResultMetadata(false),
-                physicalCameraIds(physicalCameraIdSet) {
+                physicalCameraIds(physicalCameraIdSet),
+                stillCapture(isStillCapture) {
         }
     };
 
@@ -1032,10 +1041,10 @@
     nsecs_t                mExpectedInflightDuration = 0;
     int                    mInFlightStatusId;
 
-
     status_t registerInFlight(uint32_t frameNumber,
             int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
-            bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds);
+            bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds,
+            bool isStillCapture);
 
     /**
      * Returns the maximum expected time it'll take for all currently in-flight
@@ -1208,6 +1217,9 @@
 
     static callbacks_notify_t sNotify;
 
+    // Synchronizes access to status tracker between inflight updates and disconnect.
+    // b/79972865
+    Mutex mTrackerLock;
 }; // class Camera3Device
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index b3c3717..2c020a2 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -564,7 +564,7 @@
 
             // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
             // let prepareNextBuffer handle the error.)
-            if (res == NO_INIT && mState == STATE_CONFIGURED) {
+            if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
                 mState = STATE_ABANDONED;
             }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 1105b75..6030d15 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -331,7 +331,14 @@
 
     status_t res;
     res = configureQueueLocked();
-    if (res != OK) {
+    // configureQueueLocked could return error in case of abandoned surface.
+    // Treat as non-fatal error.
+    if (res == NO_INIT || res == DEAD_OBJECT) {
+        ALOGE("%s: Unable to configure stream %d queue (non-fatal): %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        mState = STATE_ABANDONED;
+        return res;
+    } else if (res != OK) {
         ALOGE("%s: Unable to configure stream %d queue: %s (%d)",
                 __FUNCTION__, mId, strerror(-res), res);
         mState = STATE_ERROR;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index a60cb56..4ddcf1a 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -482,6 +482,7 @@
     // after the HAL has provided usage and max_buffers values. After this call,
     // the stream must be ready to produce all buffers for registration with
     // HAL.
+    // Returns NO_INIT or DEAD_OBJECT if the queue has been abandoned.
     virtual status_t configureQueueLocked() = 0;
 
     // Get the total number of buffers in the queue
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index 59ac636..8a9402e 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -493,6 +493,10 @@
     SP_LOGV("acquired buffer %" PRId64 " from input at slot %d",
             bufferItem.mGraphicBuffer->getId(), bufferItem.mSlot);
 
+    if (bufferItem.mTransformToDisplayInverse) {
+        bufferItem.mTransform |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
+    }
+
     // Attach and queue the buffer to each of the outputs
     BufferTracker& tracker = *(mBuffers[bufferId]);
 
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index eef6658..ae7af8e 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -49,7 +49,7 @@
 };
 
 // Only for capture result
-constexpr std::array<uint32_t, 2> DistortionMapper::kResultPointsToCorrect = {
+constexpr std::array<uint32_t, 2> DistortionMapper::kResultPointsToCorrectNoClamp = {
     ANDROID_STATISTICS_FACE_RECTANGLES, // Says rectangles, is really points
     ANDROID_STATISTICS_FACE_LANDMARKS,
 };
@@ -79,12 +79,21 @@
     array = deviceInfo.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
     if (array.count != 4) return BAD_VALUE;
 
-    mArrayWidth = array.data.i32[2];
-    mArrayHeight = array.data.i32[3];
+    float arrayX = static_cast<float>(array.data.i32[0]);
+    float arrayY = static_cast<float>(array.data.i32[1]);
+    mArrayWidth = static_cast<float>(array.data.i32[2]);
+    mArrayHeight = static_cast<float>(array.data.i32[3]);
 
     array = deviceInfo.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
-    mActiveWidth = array.data.i32[2];
-    mActiveHeight = array.data.i32[3];
+    if (array.count != 4) return BAD_VALUE;
+
+    float activeX = static_cast<float>(array.data.i32[0]);
+    float activeY = static_cast<float>(array.data.i32[1]);
+    mActiveWidth = static_cast<float>(array.data.i32[2]);
+    mActiveHeight = static_cast<float>(array.data.i32[3]);
+
+    mArrayDiffX = activeX - arrayX;
+    mArrayDiffY = activeY - arrayY;
 
     return updateCalibration(deviceInfo);
 }
@@ -111,22 +120,13 @@
                 if (weight == 0) {
                     continue;
                 }
-                res = mapCorrectedToRaw(e.data.i32 + j, 2);
+                res = mapCorrectedToRaw(e.data.i32 + j, 2, /*clamp*/true);
                 if (res != OK) return res;
-                for (size_t k = 0; k < 4; k+=2) {
-                    int32_t& x = e.data.i32[j + k];
-                    int32_t& y = e.data.i32[j + k + 1];
-                    // Clamp to within active array
-                    x = std::max(0, x);
-                    x = std::min(mActiveWidth - 1, x);
-                    y = std::max(0, y);
-                    y = std::min(mActiveHeight - 1, y);
-                }
             }
         }
         for (auto rect : kRequestRectsToCorrect) {
             e = request->find(rect);
-            res = mapCorrectedRectToRaw(e.data.i32, e.count / 4);
+            res = mapCorrectedRectToRaw(e.data.i32, e.count / 4, /*clamp*/true);
             if (res != OK) return res;
         }
     }
@@ -156,27 +156,18 @@
                 if (weight == 0) {
                     continue;
                 }
-                res = mapRawToCorrected(e.data.i32 + j, 2);
+                res = mapRawToCorrected(e.data.i32 + j, 2, /*clamp*/true);
                 if (res != OK) return res;
-                for (size_t k = 0; k < 4; k+=2) {
-                    int32_t& x = e.data.i32[j + k];
-                    int32_t& y = e.data.i32[j + k + 1];
-                    // Clamp to within active array
-                    x = std::max(0, x);
-                    x = std::min(mActiveWidth - 1, x);
-                    y = std::max(0, y);
-                    y = std::min(mActiveHeight - 1, y);
-                }
             }
         }
         for (auto rect : kResultRectsToCorrect) {
             e = result->find(rect);
-            res = mapRawRectToCorrected(e.data.i32, e.count / 4);
+            res = mapRawRectToCorrected(e.data.i32, e.count / 4, /*clamp*/true);
             if (res != OK) return res;
         }
-        for (auto pts : kResultPointsToCorrect) {
+        for (auto pts : kResultPointsToCorrectNoClamp) {
             e = result->find(pts);
-            res = mapRawToCorrected(e.data.i32, e.count / 2);
+            res = mapRawToCorrected(e.data.i32, e.count / 2, /*clamp*/false);
             if (res != OK) return res;
         }
     }
@@ -232,9 +223,12 @@
     return OK;
 }
 
-status_t DistortionMapper::mapRawToCorrected(int32_t *coordPairs, int coordCount) {
+status_t DistortionMapper::mapRawToCorrected(int32_t *coordPairs, int coordCount,
+        bool clamp, bool simple) {
     if (!mValidMapping) return INVALID_OPERATION;
 
+    if (simple) return mapRawToCorrectedSimple(coordPairs, coordCount, clamp);
+
     if (!mValidGrids) {
         status_t res = buildGrids();
         if (res != OK) return res;
@@ -275,6 +269,12 @@
         // Interpolate along left edge of corrected quad (which are axis-aligned) for y
         float corrY = corrQuad->coords[1] + v * (corrQuad->coords[7] - corrQuad->coords[1]);
 
+        // Clamp to within active array
+        if (clamp) {
+            corrX = std::min(mActiveWidth - 1, std::max(0.f, corrX));
+            corrY = std::min(mActiveHeight - 1, std::max(0.f, corrY));
+        }
+
         coordPairs[i] = static_cast<int32_t>(std::round(corrX));
         coordPairs[i + 1] = static_cast<int32_t>(std::round(corrY));
     }
@@ -282,37 +282,70 @@
     return OK;
 }
 
-status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount) {
+status_t DistortionMapper::mapRawToCorrectedSimple(int32_t *coordPairs, int coordCount,
+        bool clamp) const {
+    if (!mValidMapping) return INVALID_OPERATION;
+
+    float scaleX = mActiveWidth / mArrayWidth;
+    float scaleY = mActiveHeight / mArrayHeight;
+    for (int i = 0; i < coordCount * 2; i += 2) {
+        float x = coordPairs[i];
+        float y = coordPairs[i + 1];
+        float corrX = x * scaleX;
+        float corrY = y * scaleY;
+        if (clamp) {
+            corrX = std::min(mActiveWidth - 1, std::max(0.f, corrX));
+            corrY = std::min(mActiveHeight - 1, std::max(0.f, corrY));
+        }
+        coordPairs[i] = static_cast<int32_t>(std::round(corrX));
+        coordPairs[i + 1] = static_cast<int32_t>(std::round(corrY));
+    }
+
+    return OK;
+}
+
+status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount, bool clamp,
+        bool simple) {
     if (!mValidMapping) return INVALID_OPERATION;
     for (int i = 0; i < rectCount * 4; i += 4) {
         // Map from (l, t, width, height) to (l, t, r, b)
         int32_t coords[4] = {
             rects[i],
             rects[i + 1],
-            rects[i] + rects[i + 2],
-            rects[i + 1] + rects[i + 3]
+            rects[i] + rects[i + 2] - 1,
+            rects[i + 1] + rects[i + 3] - 1
         };
 
-        mapRawToCorrected(coords, 2);
+        mapRawToCorrected(coords, 2, clamp, simple);
 
         // Map back to (l, t, width, height)
         rects[i] = coords[0];
         rects[i + 1] = coords[1];
-        rects[i + 2] = coords[2] - coords[0];
-        rects[i + 3] = coords[3] - coords[1];
+        rects[i + 2] = coords[2] - coords[0] + 1;
+        rects[i + 3] = coords[3] - coords[1] + 1;
     }
 
     return OK;
 }
 
+status_t DistortionMapper::mapCorrectedToRaw(int32_t *coordPairs, int coordCount, bool clamp,
+        bool simple) const {
+    return mapCorrectedToRawImpl(coordPairs, coordCount, clamp, simple);
+}
+
 template<typename T>
-status_t DistortionMapper::mapCorrectedToRaw(T *coordPairs, int coordCount) const {
+status_t DistortionMapper::mapCorrectedToRawImpl(T *coordPairs, int coordCount, bool clamp,
+        bool simple) const {
     if (!mValidMapping) return INVALID_OPERATION;
 
+    if (simple) return mapCorrectedToRawImplSimple(coordPairs, coordCount, clamp);
+
+    float activeCx = mCx - mArrayDiffX;
+    float activeCy = mCy - mArrayDiffY;
     for (int i = 0; i < coordCount * 2; i += 2) {
-        // Move to normalized space
-        float ywi = (coordPairs[i + 1] - mCy) * mInvFy;
-        float xwi = (coordPairs[i] - mCx - mS * ywi) * mInvFx;
+        // Move to normalized space from active array space
+        float ywi = (coordPairs[i + 1] - activeCy) * mInvFy;
+        float xwi = (coordPairs[i] - activeCx - mS * ywi) * mInvFx;
         // Apply distortion model to calculate raw image coordinates
         float rSq = xwi * xwi + ywi * ywi;
         float Fr = 1.f + (mK[0] * rSq) + (mK[1] * rSq * rSq) + (mK[2] * rSq * rSq * rSq);
@@ -321,6 +354,11 @@
         // Move back to image space
         float xr = mFx * xc + mS * yc + mCx;
         float yr = mFy * yc + mCy;
+        // Clamp to within pre-correction active array
+        if (clamp) {
+            xr = std::min(mArrayWidth - 1, std::max(0.f, xr));
+            yr = std::min(mArrayHeight - 1, std::max(0.f, yr));
+        }
 
         coordPairs[i] = static_cast<T>(std::round(xr));
         coordPairs[i + 1] = static_cast<T>(std::round(yr));
@@ -329,10 +367,32 @@
     return OK;
 }
 
-template status_t DistortionMapper::mapCorrectedToRaw(int32_t*, int) const;
-template status_t DistortionMapper::mapCorrectedToRaw(float*, int) const;
+template<typename T>
+status_t DistortionMapper::mapCorrectedToRawImplSimple(T *coordPairs, int coordCount,
+        bool clamp) const {
+    if (!mValidMapping) return INVALID_OPERATION;
 
-status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount) const {
+    float scaleX = mArrayWidth / mActiveWidth;
+    float scaleY = mArrayHeight / mActiveHeight;
+    for (int i = 0; i < coordCount * 2; i += 2) {
+        float x = coordPairs[i];
+        float y = coordPairs[i + 1];
+        float rawX = x * scaleX;
+        float rawY = y * scaleY;
+        if (clamp) {
+            rawX = std::min(mArrayWidth - 1, std::max(0.f, rawX));
+            rawY = std::min(mArrayHeight - 1, std::max(0.f, rawY));
+        }
+        coordPairs[i] = static_cast<T>(std::round(rawX));
+        coordPairs[i + 1] = static_cast<T>(std::round(rawY));
+    }
+
+    return OK;
+}
+
+
+status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount, bool clamp,
+        bool simple) const {
     if (!mValidMapping) return INVALID_OPERATION;
 
     for (int i = 0; i < rectCount * 4; i += 4) {
@@ -340,17 +400,17 @@
         int32_t coords[4] = {
             rects[i],
             rects[i + 1],
-            rects[i] + rects[i + 2],
-            rects[i + 1] + rects[i + 3]
+            rects[i] + rects[i + 2] - 1,
+            rects[i + 1] + rects[i + 3] - 1
         };
 
-        mapCorrectedToRaw(coords, 2);
+        mapCorrectedToRaw(coords, 2, clamp, simple);
 
         // Map back to (l, t, width, height)
         rects[i] = coords[0];
         rects[i + 1] = coords[1];
-        rects[i + 2] = coords[2] - coords[0];
-        rects[i + 3] = coords[3] - coords[1];
+        rects[i + 2] = coords[2] - coords[0] + 1;
+        rects[i + 3] = coords[3] - coords[1] + 1;
     }
 
     return OK;
@@ -380,7 +440,8 @@
             };
             mDistortedGrid[index].src = &mCorrectedGrid[index];
             mDistortedGrid[index].coords = mCorrectedGrid[index].coords;
-            status_t res = mapCorrectedToRaw(mDistortedGrid[index].coords.data(), 4);
+            status_t res = mapCorrectedToRawImpl(mDistortedGrid[index].coords.data(), 4,
+                    /*clamp*/false, /*simple*/false);
             if (res != OK) return res;
         }
     }
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.h b/services/camera/libcameraservice/device3/DistortionMapper.h
index 00cbd32..4c0a1a6 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.h
+++ b/services/camera/libcameraservice/device3/DistortionMapper.h
@@ -73,8 +73,11 @@
      *
      *   coordPairs: A pointer to an array of consecutive (x,y) points
      *   coordCount: Number of (x,y) pairs to transform
+     *   clamp: Whether to clamp the result to the bounds of the active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    status_t mapRawToCorrected(int32_t *coordPairs, int coordCount);
+    status_t mapRawToCorrected(int32_t *coordPairs, int coordCount, bool clamp,
+            bool simple = true);
 
     /**
      * Transform from distorted (original) to corrected (warped) coordinates.
@@ -82,8 +85,11 @@
      *
      *   rects: A pointer to an array of consecutive (x,y, w, h) rectangles
      *   rectCount: Number of rectangles to transform
+     *   clamp: Whether to clamp the result to the bounds of the active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    status_t mapRawRectToCorrected(int32_t *rects, int rectCount);
+    status_t mapRawRectToCorrected(int32_t *rects, int rectCount, bool clamp,
+            bool simple = true);
 
     /**
      * Transform from corrected (warped) to distorted (original) coordinates.
@@ -91,9 +97,11 @@
      *
      *   coordPairs: A pointer to an array of consecutive (x,y) points
      *   coordCount: Number of (x,y) pairs to transform
+     *   clamp: Whether to clamp the result to the bounds of the precorrection active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    template<typename T>
-    status_t mapCorrectedToRaw(T* coordPairs, int coordCount) const;
+    status_t mapCorrectedToRaw(int32_t* coordPairs, int coordCount, bool clamp,
+            bool simple = true) const;
 
     /**
      * Transform from corrected (warped) to distorted (original) coordinates.
@@ -101,8 +109,11 @@
      *
      *   rects: A pointer to an array of consecutive (x,y, w, h) rectangles
      *   rectCount: Number of rectangles to transform
+     *   clamp: Whether to clamp the result to the bounds of the precorrection active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount) const;
+    status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount, bool clamp,
+            bool simple = true) const;
 
     struct GridQuad {
         // Source grid quad, or null
@@ -150,8 +161,18 @@
     // Only capture result
     static const std::array<uint32_t, 1> kResultRectsToCorrect;
 
-    // Only for capture results
-    static const std::array<uint32_t, 2> kResultPointsToCorrect;
+    // Only for capture results; don't clamp
+    static const std::array<uint32_t, 2> kResultPointsToCorrectNoClamp;
+
+    // Single implementation for various mapCorrectedToRaw methods
+    template<typename T>
+    status_t mapCorrectedToRawImpl(T* coordPairs, int coordCount, bool clamp, bool simple) const;
+
+    // Simple linear interpolation option
+    template<typename T>
+    status_t mapCorrectedToRawImplSimple(T* coordPairs, int coordCount, bool clamp) const;
+
+    status_t mapRawToCorrectedSimple(int32_t *coordPairs, int coordCount, bool clamp) const;
 
     // Utility to create reverse mapping grids
     status_t buildGrids();
@@ -168,9 +189,11 @@
     float mK[5];
 
     // pre-correction active array dimensions
-    int mArrayWidth, mArrayHeight;
+    float mArrayWidth, mArrayHeight;
     // active array dimensions
-    int mActiveWidth, mActiveHeight;
+    float mActiveWidth, mActiveHeight;
+    // corner offsets between pre-correction and active arrays
+    float mArrayDiffX, mArrayDiffY;
 
     std::vector<GridQuad> mCorrectedGrid;
     std::vector<GridQuad> mDistortedGrid;
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index f77069c..8d80ff1 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -38,6 +38,7 @@
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
 LOCAL_MODULE:= cameraservice_test
+LOCAL_COMPATIBILITY_SUITE := device-tests
 LOCAL_MODULE_TAGS := tests
 
 include $(BUILD_NATIVE_TEST)
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index ef93d9a..0086c6c 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -163,7 +163,7 @@
         mTestCameraProvider = provider;
     }
 
-    std::string mLastRequestedServiceName;
+    std::vector<std::string> mLastRequestedServiceNames;
 
     virtual ~TestInteractionProxy() {}
 
@@ -177,7 +177,7 @@
 
     virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
             const std::string &serviceName) override {
-        mLastRequestedServiceName = serviceName;
+        mLastRequestedServiceNames.push_back(serviceName);
         return mTestCameraProvider;
     }
 
@@ -210,9 +210,18 @@
     res = providerManager->initialize(statusListener, &serviceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
 
-    hardware::hidl_string legacyInstanceName = "legacy/0";
-    ASSERT_EQ(serviceProxy.mLastRequestedServiceName, legacyInstanceName) <<
+    std::string legacyInstanceName = "legacy/0";
+    std::string externalInstanceName = "external/0";
+    bool gotLegacy = false;
+    bool gotExternal = false;
+    for (auto& serviceName : serviceProxy.mLastRequestedServiceNames) {
+        if (serviceName == legacyInstanceName) gotLegacy = true;
+        if (serviceName == externalInstanceName) gotExternal = true;
+    }
+    ASSERT_TRUE(gotLegacy) <<
             "Legacy instance not requested from service manager";
+    ASSERT_TRUE(gotExternal) <<
+            "External instance not requested from service manager";
 
     hardware::hidl_string testProviderFqInterfaceName =
             "android.hardware.camera.provider@2.4::ICameraProvider";
@@ -221,7 +230,7 @@
             testProviderFqInterfaceName,
             testProviderInstanceName, false);
 
-    ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
+    ASSERT_EQ(serviceProxy.mLastRequestedServiceNames.back(), testProviderInstanceName) <<
             "Incorrect instance requested from service manager";
 }
 
@@ -255,7 +264,7 @@
             "android.hardware.camera.provider@2.4::ICameraProvider";
     serviceProxy.mManagerNotificationInterface->onRegistration(
             testProviderFqInterfaceName, testProviderInstanceName, false);
-    ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
+    ASSERT_EQ(serviceProxy.mLastRequestedServiceNames.back(), testProviderInstanceName) <<
             "Incorrect instance requested from service manager";
 
     hardware::hidl_string sectionNameSecond = "SecondVendorTestSection";
@@ -273,7 +282,7 @@
     hardware::hidl_string testProviderSecondInstanceName = "test2/0";
     serviceProxy.mManagerNotificationInterface->onRegistration(
             testProviderFqInterfaceName, testProviderSecondInstanceName, false);
-    ASSERT_EQ(serviceProxy.mLastRequestedServiceName,
+    ASSERT_EQ(serviceProxy.mLastRequestedServiceNames.back(),
               testProviderSecondInstanceName) <<
             "Incorrect instance requested from service manager";
 
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
index b489931..54935c9 100644
--- a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
@@ -30,6 +30,7 @@
 
 
 int32_t testActiveArray[] = {100, 100, 1000, 750};
+int32_t testPreCorrActiveArray[] = {90, 90, 1020, 770};
 
 float testICal[] = { 1000.f, 1000.f, 500.f, 500.f, 0.f };
 
@@ -45,14 +46,19 @@
 };
 
 
-void setupTestMapper(DistortionMapper *m, float distortion[5]) {
+void setupTestMapper(DistortionMapper *m,
+        float distortion[5], float intrinsics[5],
+        int32_t activeArray[4], int32_t preCorrectionActiveArray[4]) {
     CameraMetadata deviceInfo;
 
     deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
-            testActiveArray, 4);
+            preCorrectionActiveArray, 4);
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+            activeArray, 4);
 
     deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
-            testICal, 5);
+            intrinsics, 5);
 
     deviceInfo.update(ANDROID_LENS_DISTORTION,
             distortion, 5);
@@ -89,6 +95,9 @@
     ASSERT_FALSE(m.calibrationValid());
 
     deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+            testPreCorrActiveArray, 4);
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
             testActiveArray, 4);
 
     deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
@@ -118,17 +127,19 @@
     status_t res;
 
     DistortionMapper m;
-    setupTestMapper(&m, identityDistortion);
+    setupTestMapper(&m, identityDistortion, testICal,
+            /*activeArray*/ testActiveArray,
+            /*preCorrectionActiveArray*/ testActiveArray);
 
     auto coords = basicCoords;
-    res = m.mapCorrectedToRaw(coords.data(), 5);
+    res = m.mapCorrectedToRaw(coords.data(), 5,  /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < coords.size(); i++) {
         EXPECT_EQ(coords[i], basicCoords[i]);
     }
 
-    res = m.mapRawToCorrected(coords.data(), 5);
+    res = m.mapRawToCorrected(coords.data(), 5, /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < coords.size(); i++) {
@@ -137,18 +148,18 @@
 
     std::array<int32_t, 8> rects = {
         0, 0, 100, 100,
-        testActiveArray[2] - 100, testActiveArray[3]-100, 100, 100
+        testActiveArray[2] - 101, testActiveArray[3] - 101, 100, 100
     };
 
     auto rectsOrig = rects;
-    res = m.mapCorrectedRectToRaw(rects.data(), 2);
+    res = m.mapCorrectedRectToRaw(rects.data(), 2, /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < rects.size(); i++) {
         EXPECT_EQ(rects[i], rectsOrig[i]);
     }
 
-    res = m.mapRawRectToCorrected(rects.data(), 2);
+    res = m.mapRawRectToCorrected(rects.data(), 2, /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < rects.size(); i++) {
@@ -156,23 +167,63 @@
     }
 }
 
-TEST(DistortionMapperTest, LargeTransform) {
+TEST(DistortionMapperTest, ClampConsistency) {
+    status_t res;
+
+    std::array<int32_t, 4> activeArray = {0, 0, 4032, 3024};
+    DistortionMapper m;
+    setupTestMapper(&m, identityDistortion, testICal, /*activeArray*/ activeArray.data(),
+            /*preCorrectionActiveArray*/ activeArray.data());
+
+    auto rectsOrig = activeArray;
+    res = m.mapCorrectedRectToRaw(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < activeArray.size(); i++) {
+        EXPECT_EQ(activeArray[i], rectsOrig[i]);
+    }
+
+    res = m.mapRawRectToCorrected(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < activeArray.size(); i++) {
+        EXPECT_EQ(activeArray[i], rectsOrig[i]);
+    }
+}
+
+TEST(DistortionMapperTest, SimpleTransform) {
+    status_t res;
+
+    DistortionMapper m;
+    setupTestMapper(&m, identityDistortion, testICal,
+            /*activeArray*/ testActiveArray,
+            /*preCorrectionActiveArray*/ testPreCorrActiveArray);
+
+    auto coords = basicCoords;
+    res = m.mapCorrectedToRaw(coords.data(), 5,  /*clamp*/true, /*simple*/true);
+    ASSERT_EQ(res, OK);
+
+    ASSERT_EQ(coords[0], 0); ASSERT_EQ(coords[1], 0);
+    ASSERT_EQ(coords[2], testPreCorrActiveArray[2] - 1); ASSERT_EQ(coords[3], 0);
+    ASSERT_EQ(coords[4], testPreCorrActiveArray[2] - 1); ASSERT_EQ(coords[5], testPreCorrActiveArray[3] - 1);
+    ASSERT_EQ(coords[6], 0); ASSERT_EQ(coords[7], testPreCorrActiveArray[3] - 1);
+    ASSERT_EQ(coords[8], testPreCorrActiveArray[2] / 2); ASSERT_EQ(coords[9], testPreCorrActiveArray[3] / 2);
+}
+
+
+void RandomTransformTest(::testing::Test *test,
+        int32_t* activeArray, DistortionMapper &m, bool clamp, bool simple) {
     status_t res;
     constexpr int maxAllowedPixelError = 2; // Maximum per-pixel error allowed
     constexpr int bucketsPerPixel = 3; // Histogram granularity
 
     unsigned int seed = 1234; // Ensure repeatability for debugging
-    const size_t coordCount = 1e6; // Number of random test points
-
-    float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
-
-    DistortionMapper m;
-    setupTestMapper(&m, bigDistortion);
+    const size_t coordCount = 1e5; // Number of random test points
 
     std::default_random_engine gen(seed);
 
-    std::uniform_int_distribution<int> x_dist(0, testActiveArray[2] - 1);
-    std::uniform_int_distribution<int> y_dist(0, testActiveArray[3] - 1);
+    std::uniform_int_distribution<int> x_dist(0, activeArray[2] - 1);
+    std::uniform_int_distribution<int> y_dist(0, activeArray[3] - 1);
 
     std::vector<int32_t> randCoords(coordCount * 2);
 
@@ -186,12 +237,12 @@
     auto origCoords = randCoords;
 
     base::Timer correctedToRawTimer;
-    res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2);
+    res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2, clamp, simple);
     auto correctedToRawDurationMs = correctedToRawTimer.duration();
     EXPECT_EQ(res, OK);
 
     base::Timer rawToCorrectedTimer;
-    res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2);
+    res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2, clamp, simple);
     auto rawToCorrectedDurationMs = rawToCorrectedTimer.duration();
     EXPECT_EQ(res, OK);
 
@@ -202,9 +253,9 @@
             (std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(
                 rawToCorrectedDurationMs) / (randCoords.size() / 2) ).count();
 
-    RecordProperty("CorrectedToRawDurationPerCoordUs",
+    test->RecordProperty("CorrectedToRawDurationPerCoordUs",
             base::StringPrintf("%f", correctedToRawDurationPerCoordUs));
-    RecordProperty("RawToCorrectedDurationPerCoordUs",
+    test->RecordProperty("RawToCorrectedDurationPerCoordUs",
             base::StringPrintf("%f", rawToCorrectedDurationPerCoordUs));
 
     // Calculate mapping errors after round trip
@@ -239,17 +290,61 @@
     }
 
     float rmsError = std::sqrt(totalErrorSq / randCoords.size());
-    RecordProperty("RmsError", base::StringPrintf("%f", rmsError));
+    test->RecordProperty("RmsError", base::StringPrintf("%f", rmsError));
     for (size_t i = 0; i < histogram.size(); i++) {
         std::string label = base::StringPrintf("HistogramBin[%f,%f)",
                 (float)i/bucketsPerPixel, (float)(i + 1)/bucketsPerPixel);
-        RecordProperty(label, histogram[i]);
+        test->RecordProperty(label, histogram[i]);
     }
-    RecordProperty("HistogramOutOfRange", outOfHistogram);
+    test->RecordProperty("HistogramOutOfRange", outOfHistogram);
+}
+
+// Test a realistic distortion function with matching calibration values, enforcing
+// clamping.
+TEST(DistortionMapperTest, DISABLED_SmallTransform) {
+    int32_t activeArray[] = {0, 8, 3278, 2450};
+    int32_t preCorrectionActiveArray[] = {0, 0, 3280, 2464};
+
+    float distortion[] = {0.06875723, -0.13922249, 0.02818312, -0.00032781, -0.00025431};
+    float intrinsics[] = {1812.50000000, 1812.50000000, 1645.59533691, 1229.23229980, 0.00000000};
+
+    DistortionMapper m;
+    setupTestMapper(&m, distortion, intrinsics, activeArray, preCorrectionActiveArray);
+
+    RandomTransformTest(this, activeArray, m, /*clamp*/true, /*simple*/false);
+}
+
+// Test a realistic distortion function with matching calibration values, enforcing
+// clamping, but using the simple linear transform
+TEST(DistortionMapperTest, SmallSimpleTransform) {
+    int32_t activeArray[] = {0, 8, 3278, 2450};
+    int32_t preCorrectionActiveArray[] = {0, 0, 3280, 2464};
+
+    float distortion[] = {0.06875723, -0.13922249, 0.02818312, -0.00032781, -0.00025431};
+    float intrinsics[] = {1812.50000000, 1812.50000000, 1645.59533691, 1229.23229980, 0.00000000};
+
+    DistortionMapper m;
+    setupTestMapper(&m, distortion, intrinsics, activeArray, preCorrectionActiveArray);
+
+    RandomTransformTest(this, activeArray, m, /*clamp*/true, /*simple*/true);
+}
+
+// Test a very large distortion function; the regions aren't valid for such a big transform,
+// so disable clamping.  This test is just to verify round-trip math accuracy for big transforms
+TEST(DistortionMapperTest, LargeTransform) {
+    float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
+
+    DistortionMapper m;
+    setupTestMapper(&m, bigDistortion, testICal,
+            /*activeArray*/testActiveArray,
+            /*preCorrectionActiveArray*/testPreCorrActiveArray);
+
+    RandomTransformTest(this, testActiveArray, m, /*clamp*/false, /*simple*/false);
 }
 
 // Compare against values calculated by OpenCV
 // undistortPoints() method, which is the same as mapRawToCorrected
+// Ignore clamping
 // See script DistortionMapperComp.py
 #include "DistortionMapperTest_OpenCvData.h"
 
@@ -262,11 +357,14 @@
     const int32_t maxSqError = 2;
 
     DistortionMapper m;
-    setupTestMapper(&m, bigDistortion);
+    setupTestMapper(&m, bigDistortion, testICal,
+            /*activeArray*/testActiveArray,
+            /*preCorrectionActiveArray*/testActiveArray);
 
     using namespace openCvData;
 
-    res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2);
+    res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2, /*clamp*/false,
+            /*simple*/false);
 
     for (size_t i = 0; i < rawCoords.size(); i+=2) {
         int32_t dist = (rawCoords[i] - expCoords[i]) * (rawCoords[i] - expCoords[i]) +
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index c0a353f..f4c49ec 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -49,7 +49,8 @@
     std::lock_guard<std::mutex> lock(mMonitorMutex);
 
     // Expand shorthands
-    if (ssize_t idx = tagNames.find("3a") != -1) {
+    ssize_t idx = tagNames.find("3a");
+    if (idx != -1) {
         ssize_t end = tagNames.find(",", idx);
         char* start = tagNames.lockBuffer(tagNames.size());
         start[idx] = '\0';
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
index 2eeb7fa..5b20e61 100644
--- a/services/mediaanalytics/Android.mk
+++ b/services/mediaanalytics/Android.mk
@@ -34,8 +34,7 @@
     $(TOP)/frameworks/av/include/camera                             \
     $(TOP)/frameworks/native/include/media/openmax                  \
     $(TOP)/frameworks/native/include/media/hardware                 \
-    $(TOP)/external/tremolo/Tremolo                                 \
-    libcore/include
+    $(TOP)/external/tremolo/Tremolo
 
 
 LOCAL_MODULE:= mediametrics
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index 6ec8895..edf4dab 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -55,4 +55,8 @@
 getdents64: 1
 getrandom: 1
 
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
 @include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index bbbe552..4031b11 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -24,6 +24,7 @@
 mmap2: 1
 fstat64: 1
 stat64: 1
+statfs64: 1
 madvise: 1
 fstatat64: 1
 futex: 1
@@ -55,4 +56,8 @@
 getpid: 1
 gettid: 1
 
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
 @include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index f0f44f5..f4d8b43 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -21,7 +21,6 @@
 #include <utils/Vector.h>
 
 #include <media/DataSource.h>
-#include <media/MediaExtractor.h>
 #include <media/stagefright/DataSourceFactory.h>
 #include <media/stagefright/InterfaceUtils.h>
 #include <media/stagefright/MediaExtractorFactory.h>
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index e58dff7..b23832e 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -58,11 +58,11 @@
             shared->size() < NBLog::Timeline::sharedSize(size)) {
         return;
     }
-    sp<NBLog::Reader> reader(new NBLog::Reader(shared, size));
-    NBLog::NamedReader namedReader(reader, name);
+    sp<NBLog::Reader> reader(new NBLog::Reader(shared, size, name)); // Reader handled by merger
+    sp<NBLog::DumpReader> dumpReader(new NBLog::DumpReader(shared, size, name)); // for dumpsys
     Mutex::Autolock _l(mLock);
-    mNamedReaders.add(namedReader);
-    mMerger.addReader(namedReader);
+    mDumpReaders.add(dumpReader);
+    mMerger.addReader(reader);
 }
 
 void MediaLogService::unregisterWriter(const sp<IMemory>& shared)
@@ -71,9 +71,10 @@
         return;
     }
     Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mNamedReaders.size(); ) {
-        if (mNamedReaders[i].reader()->isIMemory(shared)) {
-            mNamedReaders.removeAt(i);
+    for (size_t i = 0; i < mDumpReaders.size(); ) {
+        if (mDumpReaders[i]->isIMemory(shared)) {
+            mDumpReaders.removeAt(i);
+            // TODO mMerger.removeReaders(shared)
         } else {
             i++;
         }
@@ -106,7 +107,7 @@
     if (args.size() > 0) {
         const String8 arg0(args[0]);
         if (!strcmp(arg0.string(), "-r")) {
-            // needed because mNamedReaders is protected by mLock
+            // needed because mReaders is protected by mLock
             bool locked = dumpTryLock(mLock);
 
             // failed to lock - MediaLogService is probably deadlocked
@@ -121,11 +122,12 @@
                 return NO_ERROR;
             }
 
-            for (const auto& namedReader : mNamedReaders) {
+            for (const auto &dumpReader : mDumpReaders) {
                 if (fd >= 0) {
-                    dprintf(fd, "\n%s:\n", namedReader.name());
+                    dprintf(fd, "\n%s:\n", dumpReader->name().c_str());
+                    dumpReader->dump(fd, 0 /*indent*/);
                 } else {
-                    ALOGI("%s:", namedReader.name());
+                    ALOGI("%s:", dumpReader->name().c_str());
                 }
             }
             mLock.unlock();
diff --git a/services/medialog/MediaLogService.h b/services/medialog/MediaLogService.h
index c945d1f..a1572f9 100644
--- a/services/medialog/MediaLogService.h
+++ b/services/medialog/MediaLogService.h
@@ -55,7 +55,7 @@
 
     Mutex               mLock;
 
-    Vector<NBLog::NamedReader> mNamedReaders;   // protected by mLock
+    Vector<sp<NBLog::DumpReader>> mDumpReaders;   // protected by mLock
 
     // FIXME Need comments on all of these, especially about locking
     NBLog::Shared *mMergerShared;
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index b031888..1c03b7f 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -99,7 +99,7 @@
         }
         partIndex++;
     }
-    fifo->getFifoControllerBase()->advanceReadIndex(framesDesired);
+    fifo->advanceReadIndex(framesDesired);
 
 #if AAUDIO_MIXER_ATRACE_ENABLED
     ATRACE_END();
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index efac788..7ae7f1b 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -102,7 +102,7 @@
                             streamShared->setTimestampPositionOffset(positionOffset);
 
                             // Is the buffer too full to write a burst?
-                            if (fifo->getFifoControllerBase()->getEmptyFramesAvailable() <
+                            if (fifo->getEmptyFramesAvailable() <
                                     getFramesPerBurst()) {
                                 streamShared->incrementXRunCount();
                             } else {
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index f9e21fb..f30f9bb 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -189,6 +189,7 @@
         minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
     }
     status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+    bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
     if (status != OK) {
         ALOGE("%s() - createMmapBuffer() failed with status %d %s",
               __func__, status, strerror(-status));
@@ -198,18 +199,13 @@
         ALOGD("%s() createMmapBuffer() returned = %d, buffer_size = %d, burst_size %d"
                       ", Sharable FD: %s",
               __func__, status,
-              abs(mMmapBufferinfo.buffer_size_frames),
+              mMmapBufferinfo.buffer_size_frames,
               mMmapBufferinfo.burst_size_frames,
-              mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
+              isBufferShareable ? "Yes" : "No");
     }
 
     setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
-    // The audio HAL indicates if the shared memory fd can be shared outside of audioserver
-    // by returning a negative buffer size
-    if (getBufferCapacity() < 0) {
-        // Exclusive mode can be used by client or service.
-        setBufferCapacity(-getBufferCapacity());
-    } else {
+    if (!isBufferShareable) {
         // Exclusive mode can only be used by the service because the FD cannot be shared.
         uid_t audioServiceUid = getuid();
         if ((mMmapClient.clientUid != audioServiceUid) &&