Merge "Fix typo in dequeueInputBuffer's warning"
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index 4c28789..a86cc87 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -566,7 +566,7 @@
 
     for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
         uint32_t tag = tagArray[i];
-        String8 sectionString = tagToSectionMap.valueFor(tag);
+        const String8& sectionString = tagToSectionMap.valueFor(tag);
 
         // Set up tag to section index map
         ssize_t index = sections.indexOf(sectionString);
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 9bf8247..5e0db60 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -45,6 +45,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraCaptureSession is an opaque type that manages frame captures of a camera device.
  *
@@ -591,6 +593,10 @@
 camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session)
         __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 28
+
 typedef struct ACaptureSessionOutput ACaptureSessionOutput;
 
 /**
@@ -635,6 +641,7 @@
  */
 camera_status_t ACameraCaptureSession_updateSharedOutput(ACameraCaptureSession* session,
         ACaptureSessionOutput* output) __INTRODUCED_IN(28);
+#endif /* __ANDROID_API__ >= 28 */
 
 __END_DECLS
 
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index bdd27f9..7c13b34 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraDevice is opaque type that provides access to a camera device.
  *
@@ -666,6 +668,10 @@
         const ACameraCaptureSession_stateCallbacks* callbacks,
         /*out*/ACameraCaptureSession** session) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 28
+
 /**
  * Create a shared ACaptureSessionOutput object.
  *
@@ -757,6 +763,8 @@
         const ACameraCaptureSession_stateCallbacks* callbacks,
         /*out*/ACameraCaptureSession** session) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_DEVICE_H */
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index e19ce36..6b58155 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -40,6 +40,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 typedef enum {
     ACAMERA_OK = 0,
 
@@ -130,6 +132,8 @@
     ACAMERA_ERROR_PERMISSION_DENIED     = ACAMERA_ERROR_BASE - 13,
 } camera_status_t;
 
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_ERROR_H */
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index a1cca4d..ea76738 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraManager is opaque type that provides access to camera service.
  *
@@ -119,7 +121,7 @@
  *                 this callback returns.
  */
 typedef void (*ACameraManager_AvailabilityCallback)(void* context,
-        const char* cameraId) __INTRODUCED_IN(24);
+        const char* cameraId);
 
 /**
  * A listener for camera devices becoming available or unavailable to open.
@@ -274,6 +276,8 @@
         ACameraDevice_StateCallbacks* callback,
         /*out*/ACameraDevice** device) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_MANAGER_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index 2078da7..611e270 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 /**
  * ACameraMetadata is opaque type that provides access to read-only camera metadata like camera
  * characteristics (via {@link ACameraManager_getCameraCharacteristics}) or capture results (via
@@ -229,6 +231,8 @@
  */
 void ACameraMetadata_free(ACameraMetadata* metadata) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_METADATA_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 7398f78..0501006 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -40,6 +40,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 typedef enum acamera_metadata_section {
     ACAMERA_COLOR_CORRECTION,
     ACAMERA_CONTROL,
@@ -5092,12 +5094,26 @@
      * the following code snippet can be used:</p>
      * <pre><code>// Returns true if the device supports the required hardware level, or better.
      * boolean isHardwareLevelSupported(CameraCharacteristics c, int requiredLevel) {
+     *     final int[] sortedHwLevels = {
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_FULL,
+     *         CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_3
+     *     };
      *     int deviceLevel = c.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL);
-     *     if (deviceLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
-     *         return requiredLevel == deviceLevel;
+     *     if (requiredLevel == deviceLevel) {
+     *         return true;
      *     }
-     *     // deviceLevel is not LEGACY, can use numerical sort
-     *     return requiredLevel &lt;= deviceLevel;
+     *
+     *     for (int sortedlevel : sortedHwLevels) {
+     *         if (sortedlevel == requiredLevel) {
+     *             return true;
+     *         } else if (sortedlevel == deviceLevel) {
+     *             return false;
+     *         }
+     *     }
+     *     return false; // Should never reach here
      * }
      * </code></pre>
      * <p>At a high level, the levels are:</p>
@@ -5111,6 +5127,8 @@
      *   post-processing settings, and image capture at a high rate.</li>
      * <li><code>LEVEL_3</code> devices additionally support YUV reprocessing and RAW image capture, along
      *   with additional output stream configurations.</li>
+     * <li><code>EXTERNAL</code> devices are similar to <code>LIMITED</code> devices with exceptions like some sensor or
+     *   lens information not reorted or less stable framerates.</li>
      * </ul>
      * <p>See the individual level enums for full descriptions of the supported capabilities.  The
      * ACAMERA_REQUEST_AVAILABLE_CAPABILITIES entry describes the device's capabilities at a
@@ -7878,6 +7896,9 @@
 
 } acamera_metadata_enum_android_distortion_correction_mode_t;
 
+
+#endif /* __ANDROID_API__ >= 24 */
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_METADATA_TAGS_H */
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index 2fb5d12..5340e76 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -44,6 +44,8 @@
 
 __BEGIN_DECLS
 
+#if __ANDROID_API__ >= 24
+
 // Container for output targets
 typedef struct ACameraOutputTargets ACameraOutputTargets;
 
@@ -302,6 +304,10 @@
  */
 void ACaptureRequest_free(ACaptureRequest* request) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 28
+
 /**
  * Associate an arbitrary user context pointer to the {@link ACaptureRequest}
  *
@@ -350,6 +356,8 @@
  */
 ACaptureRequest* ACaptureRequest_copy(const ACaptureRequest* src) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 __END_DECLS
 
 #endif /* _NDK_CAPTURE_REQUEST_H */
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 85aab57..bddf945 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -354,7 +354,10 @@
                     decodeTimesUs.push(delayDecodeUs);
                 }
 
-                if (showProgress && (n++ % 16) == 0) {
+                if (gVerbose) {
+                    MetaDataBase &meta = buffer->meta_data();
+                    fprintf(stdout, "%ld sample format: %s\n", numFrames, meta.toString().c_str());
+                } else if (showProgress && (n++ % 16) == 0) {
                     printf(".");
                     fflush(stdout);
                 }
@@ -647,7 +650,8 @@
         MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
         MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
         MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
-        MEDIA_MIMETYPE_VIDEO_DOLBY_VISION
+        MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+        MEDIA_MIMETYPE_AUDIO_EAC3, MEDIA_MIMETYPE_AUDIO_AC4
     };
 
     const char *codecType = queryDecoders? "decoder" : "encoder";
diff --git a/drm/common/Android.bp b/drm/common/Android.bp
index 1552c3f..272684c 100644
--- a/drm/common/Android.bp
+++ b/drm/common/Android.bp
@@ -35,7 +35,7 @@
 
     cflags: ["-Wall", "-Werror"],
 
-    static_libs: ["libbinder"],
+    shared_libs: ["libbinder"],
 
     export_include_dirs: ["include"],
 }
diff --git a/include/media/EventLog.h b/include/media/EventLog.h
new file mode 120000
index 0000000..9b2c4bf
--- /dev/null
+++ b/include/media/EventLog.h
@@ -0,0 +1 @@
+../../media/utils/include/mediautils/EventLog.h
\ No newline at end of file
diff --git a/include/media/TimeCheck.h b/include/media/TimeCheck.h
index e3ef134..85e17f9 120000
--- a/include/media/TimeCheck.h
+++ b/include/media/TimeCheck.h
@@ -1 +1 @@
-../../media/libmedia/include/media/TimeCheck.h
\ No newline at end of file
+../../media/utils/include/mediautils/TimeCheck.h
\ No newline at end of file
diff --git a/media/extractors/midi/MidiExtractor.cpp b/media/extractors/midi/MidiExtractor.cpp
index 949fbe0..a30b6f8 100644
--- a/media/extractors/midi/MidiExtractor.cpp
+++ b/media/extractors/midi/MidiExtractor.cpp
@@ -247,8 +247,9 @@
         EAS_I32 numRendered;
         EAS_RESULT result = EAS_Render(mEasData, p, mEasConfig->mixBufferSize, &numRendered);
         if (result != EAS_SUCCESS) {
-            ALOGE("EAS_Render returned %ld", result);
-            break;
+            ALOGE("EAS_Render() returned %ld, numBytesOutput = %d", result, numBytesOutput);
+            buffer->release();
+            return NULL; // Stop processing to prevent infinite loops.
         }
         p += numRendered * mEasConfig->numChannels;
         numBytesOutput += numRendered * mEasConfig->numChannels * sizeof(EAS_PCM);
diff --git a/media/extractors/mp3/XINGSeeker.cpp b/media/extractors/mp3/XINGSeeker.cpp
index 95ca556..01e06ca 100644
--- a/media/extractors/mp3/XINGSeeker.cpp
+++ b/media/extractors/mp3/XINGSeeker.cpp
@@ -44,7 +44,7 @@
 }
 
 bool XINGSeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
-    if (mSizeBytes == 0 || !mTOCValid || mDurationUs < 0) {
+    if (mSizeBytes == 0 || mDurationUs < 0) {
         return false;
     }
 
@@ -54,7 +54,7 @@
         fx = 0.0f;
     } else if( percent >= 100.0f ) {
         fx = 256.0f;
-    } else {
+    } else if (mTOCValid) {
         int a = (int)percent;
         float fa, fb;
         if ( a == 0 ) {
@@ -68,6 +68,8 @@
             fb = 256.0f;
         }
         fx = fa + (fb-fa)*(percent-a);
+    } else {
+        fx = percent * 2.56f;
     }
 
     *pos = (int)((1.0f/256.0f)*fx*mSizeBytes) + mFirstFramePos;
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
new file mode 100644
index 0000000..167d474
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -0,0 +1,624 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AC4Parser"
+
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include "AC4Parser.h"
+
+#define BOOLSTR(a)  ((a)?"true":"false")
+#define BYTE_ALIGN mBitReader.skipBits(mBitReader.numBitsLeft() % 8)
+#define CHECK_BITS_LEFT(n) if (mBitReader.numBitsLeft() < n) {return false;}
+
+namespace android {
+
+AC4Parser::AC4Parser() {
+}
+
+AC4DSIParser::AC4DSIParser(ABitReader &br)
+    : mBitReader(br){
+
+    mDSISize = mBitReader.numBitsLeft();
+}
+
+// ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+static const char *ChannelModes[] = {
+    "mono",
+    "stereo",
+    "3.0",
+    "5.0",
+    "5.1",
+    "7.0 (3/4/0)",
+    "7.1 (3/4/0.1)",
+    "7.0 (5/2/0)",
+    "7.1 (5/2/0.1)",
+    "7.0 (3/2/2)",
+    "7.1 (3/2/2.1)",
+    "7.0.4",
+    "7.1.4",
+    "9.0.4",
+    "9.1.4",
+    "22.2"
+};
+
+static const char* ContentClassifier[] = {
+    "Complete Main",
+    "Music and Effects",
+    "Visually Impaired",
+    "Hearing Impaired",
+    "Dialog",
+    "Commentary",
+    "Emergency",
+    "Voice Over"
+};
+
+bool AC4DSIParser::parseLanguageTag(uint32_t presentationID, uint32_t substreamID){
+    CHECK_BITS_LEFT(6);
+    uint32_t n_language_tag_bytes = mBitReader.getBits(6);
+    if (n_language_tag_bytes < 2 || n_language_tag_bytes >= 42) {
+        return false;
+    }
+    CHECK_BITS_LEFT(n_language_tag_bytes * 8);
+    char language_tag_bytes[42]; // TS 103 190 part 1 4.3.3.8.7
+    for (uint32_t i = 0; i < n_language_tag_bytes; i++) {
+        language_tag_bytes[i] = (char)mBitReader.getBits(8);
+    }
+    language_tag_bytes[n_language_tag_bytes] = 0;
+    ALOGV("%u.%u: language_tag = %s\n", presentationID, substreamID, language_tag_bytes);
+
+    std::string language(language_tag_bytes, n_language_tag_bytes);
+    mPresentations[presentationID].mLanguage = language;
+
+    return true;
+}
+
+// TS 103 190-1 v1.2.1 E.5 and TS 103 190-2 v1.1.1 E.9
+bool AC4DSIParser::parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID){
+    CHECK_BITS_LEFT(5);
+    uint32_t channel_mode = mBitReader.getBits(5);
+    CHECK_BITS_LEFT(2);
+    uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+    CHECK_BITS_LEFT(1);
+    bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+    ALOGV("%u.%u: channel_mode = %u (%s)\n", presentationID, substreamID, channel_mode,
+    channel_mode < NELEM(ChannelModes) ? ChannelModes[channel_mode] : "reserved");
+    ALOGV("%u.%u: dsi_sf_multiplier = %u\n", presentationID,
+        substreamID, dsi_sf_multiplier);
+    ALOGV("%u.%u: b_substream_bitrate_indicator = %s\n", presentationID,
+        substreamID, BOOLSTR(b_substream_bitrate_indicator));
+
+    if (b_substream_bitrate_indicator) {
+        CHECK_BITS_LEFT(5);
+        uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+        ALOGV("%u.%u: substream_bitrate_indicator = %u\n", presentationID, substreamID,
+            substream_bitrate_indicator);
+    }
+    if (channel_mode >= 7 && channel_mode <= 10) {
+        CHECK_BITS_LEFT(1);
+        uint32_t add_ch_base = mBitReader.getBits(1);
+        ALOGV("%u.%u: add_ch_base = %u\n", presentationID, substreamID, add_ch_base);
+    }
+    CHECK_BITS_LEFT(1);
+    bool b_content_type = (mBitReader.getBits(1) == 1);
+    ALOGV("%u.%u: b_content_type = %s\n", presentationID, substreamID, BOOLSTR(b_content_type));
+    if (b_content_type) {
+        CHECK_BITS_LEFT(3);
+        uint32_t content_classifier = mBitReader.getBits(3);
+        ALOGV("%u.%u: content_classifier = %u (%s)\n", presentationID, substreamID,
+            content_classifier, ContentClassifier[content_classifier]);
+
+        // For streams based on TS 103 190 part 1 the presentation level channel_mode doesn't
+        // exist and so we use the channel_mode from either the CM or M&E substream
+        // (they are mutually exclusive)
+        if (mPresentations[presentationID].mChannelMode == -1 &&
+            (content_classifier == 0 || content_classifier == 1)) {
+            mPresentations[presentationID].mChannelMode = channel_mode;
+        }
+        mPresentations[presentationID].mContentClassifier = content_classifier;
+        CHECK_BITS_LEFT(1);
+        bool b_language_indicator = (mBitReader.getBits(1) == 1);
+        ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, substreamID,
+            BOOLSTR(b_language_indicator));
+        if (b_language_indicator) {
+            if (!parseLanguageTag(presentationID, substreamID)) {
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
+// ETSI TS 103 190-2 v1.1.1 section E.11
+bool AC4DSIParser::parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID)
+{
+    CHECK_BITS_LEFT(1);
+    bool b_substreams_present = (mBitReader.getBits(1) == 1);
+    CHECK_BITS_LEFT(1);
+    bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+    CHECK_BITS_LEFT(1);
+    bool b_channel_coded = (mBitReader.getBits(1) == 1);
+    CHECK_BITS_LEFT(8);
+    uint32_t n_substreams = mBitReader.getBits(8);
+    ALOGV("%u.%u: b_substreams_present = %s\n", presentationID, groupID,
+        BOOLSTR(b_substreams_present));
+    ALOGV("%u.%u: b_hsf_ext = %s\n", presentationID, groupID, BOOLSTR(b_hsf_ext));
+    ALOGV("%u.%u: b_channel_coded = %s\n", presentationID, groupID, BOOLSTR(b_channel_coded));
+    ALOGV("%u.%u: n_substreams = %u\n", presentationID, groupID, n_substreams);
+
+    for (uint32_t i = 0; i < n_substreams; i++) {
+        CHECK_BITS_LEFT(2);
+        uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+        CHECK_BITS_LEFT(1);
+        bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+        ALOGV("%u.%u.%u: dsi_sf_multiplier = %u\n", presentationID, groupID, i, dsi_sf_multiplier);
+        ALOGV("%u.%u.%u: b_substream_bitrate_indicator = %s\n", presentationID, groupID, i,
+            BOOLSTR(b_substream_bitrate_indicator));
+
+        if (b_substream_bitrate_indicator) {
+            CHECK_BITS_LEFT(5);
+            uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+            ALOGV("%u.%u.%u: substream_bitrate_indicator = %u\n", presentationID, groupID, i,
+                substream_bitrate_indicator);
+        }
+        if (b_channel_coded) {
+            CHECK_BITS_LEFT(24);
+            uint32_t dsi_substream_channel_mask = mBitReader.getBits(24);
+            ALOGV("%u.%u.%u: dsi_substream_channel_mask = 0x%06x\n", presentationID, groupID, i,
+                dsi_substream_channel_mask);
+        } else {
+            CHECK_BITS_LEFT(1);
+            bool b_ajoc = (mBitReader.getBits(1) == 1);
+            ALOGV("%u.%u.%u: b_ajoc = %s\n", presentationID, groupID, i, BOOLSTR(b_ajoc));
+            if (b_ajoc) {
+                CHECK_BITS_LEFT(1);
+                bool b_static_dmx = (mBitReader.getBits(1) == 1);
+                ALOGV("%u.%u.%u: b_static_dmx = %s\n", presentationID, groupID, i,
+                    BOOLSTR(b_static_dmx));
+                if (!b_static_dmx) {
+                    CHECK_BITS_LEFT(4);
+                    uint32_t n_dmx_objects_minus1 = mBitReader.getBits(4);
+                    ALOGV("%u.%u.%u: n_dmx_objects_minus1 = %u\n", presentationID, groupID, i,
+                        n_dmx_objects_minus1);
+                }
+                CHECK_BITS_LEFT(6);
+                uint32_t n_umx_objects_minus1 = mBitReader.getBits(6);
+                ALOGV("%u.%u.%u: n_umx_objects_minus1 = %u\n", presentationID, groupID, i,
+                    n_umx_objects_minus1);
+            }
+            CHECK_BITS_LEFT(4);
+            mBitReader.skipBits(4); // objects_assignment_mask
+        }
+    }
+
+    CHECK_BITS_LEFT(1);
+    bool b_content_type = (mBitReader.getBits(1) == 1);
+    ALOGV("%u.%u: b_content_type = %s\n", presentationID, groupID, BOOLSTR(b_content_type));
+    if (b_content_type) {
+        CHECK_BITS_LEFT(3);
+        uint32_t content_classifier = mBitReader.getBits(3);
+        ALOGV("%u.%u: content_classifier = %s (%u)\n", presentationID, groupID,
+            ContentClassifier[content_classifier], content_classifier);
+
+        mPresentations[presentationID].mContentClassifier = content_classifier;
+
+        CHECK_BITS_LEFT(1);
+        bool b_language_indicator = (mBitReader.getBits(1) == 1);
+        ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, groupID,
+            BOOLSTR(b_language_indicator));
+
+        if (b_language_indicator) {
+            if (!parseLanguageTag(presentationID, groupID)) {
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
+bool AC4DSIParser::parseBitrateDsi() {
+    CHECK_BITS_LEFT(2 + 32 + 32);
+    mBitReader.skipBits(2); // bit_rate_mode
+    mBitReader.skipBits(32); // bit_rate
+    mBitReader.skipBits(32); // bit_rate_precision
+
+    return true;
+}
+
+// TS 103 190-1 section E.4 (ac4_dsi) and TS 103 190-2 section E.6 (ac4_dsi_v1)
+bool AC4DSIParser::parse() {
+    CHECK_BITS_LEFT(3);
+    uint32_t ac4_dsi_version = mBitReader.getBits(3);
+    if (ac4_dsi_version > 1) {
+        ALOGE("error while parsing ac-4 dsi: only versions 0 and 1 are supported");
+        return false;
+    }
+
+    CHECK_BITS_LEFT(7 + 1 + 4 + 9);
+    uint32_t bitstream_version = mBitReader.getBits(7);
+    mBitReader.skipBits(1); // fs_index
+    mBitReader.skipBits(4); // frame_rate_index
+    uint32_t n_presentations = mBitReader.getBits(9);
+
+    int32_t short_program_id = -1;
+    if (bitstream_version > 1) {
+        if (ac4_dsi_version == 0){
+            ALOGE("invalid ac4 dsi");
+            return false;
+        }
+        CHECK_BITS_LEFT(1);
+        bool b_program_id = (mBitReader.getBits(1) == 1);
+        if (b_program_id) {
+            CHECK_BITS_LEFT(16 + 1);
+            short_program_id = mBitReader.getBits(16);
+            bool b_uuid = (mBitReader.getBits(1) == 1);
+            if (b_uuid) {
+                const uint32_t kAC4UUIDSizeInBytes = 16;
+                char program_uuid[kAC4UUIDSizeInBytes];
+                CHECK_BITS_LEFT(kAC4UUIDSizeInBytes * 8);
+                for (uint32_t i = 0; i < kAC4UUIDSizeInBytes; i++) {
+                    program_uuid[i] = (char)(mBitReader.getBits(8));
+                }
+                ALOGV("UUID = %s", program_uuid);
+            }
+        }
+    }
+
+    if (ac4_dsi_version == 1) {
+        if (!parseBitrateDsi()) {
+            return false;
+        }
+        BYTE_ALIGN;
+    }
+
+    for (uint32_t presentation = 0; presentation < n_presentations; presentation++) {
+        mPresentations[presentation].mProgramID = short_program_id;
+        // known as b_single_substream in ac4_dsi_version 0
+        bool b_single_substream_group = false;
+        uint32_t presentation_config = 0, presentation_version = 0;
+        uint32_t pres_bytes = 0;
+
+        if (ac4_dsi_version == 0) {
+            CHECK_BITS_LEFT(1 + 5 + 5);
+            b_single_substream_group = (mBitReader.getBits(1) == 1);
+            presentation_config = mBitReader.getBits(5);
+            presentation_version = mBitReader.getBits(5);
+        } else if (ac4_dsi_version == 1) {
+            CHECK_BITS_LEFT(8 + 8);
+            presentation_version = mBitReader.getBits(8);
+            pres_bytes = mBitReader.getBits(8);
+            if (pres_bytes == 0xff) {
+                CHECK_BITS_LEFT(16);
+                pres_bytes += mBitReader.getBits(16);
+            }
+            ALOGV("%u: pres_bytes = %u\n", presentation, pres_bytes);
+            if (presentation_version > 1) {
+                CHECK_BITS_LEFT(pres_bytes * 8);
+                mBitReader.skipBits(pres_bytes * 8);
+                continue;
+            }
+            // ac4_presentation_v0_dsi() and ac4_presentation_v1_dsi() both
+            // start with a presentation_config of 5 bits
+            CHECK_BITS_LEFT(5);
+            presentation_config = mBitReader.getBits(5);
+            b_single_substream_group = (presentation_config == 0x1f);
+        }
+
+        static const char *PresentationConfig[] = {
+            "Music&Effects + Dialog",
+            "Main + DE",
+            "Main + Associate",
+            "Music&Effects + Dialog + Associate",
+            "Main + DE + Associate",
+            "Arbitrary substream groups",
+            "EMDF only"
+        };
+        ALOGV("%u: b_single_substream/group = %s\n", presentation,
+            BOOLSTR(b_single_substream_group));
+        ALOGV("%u: presentation_version = %u\n", presentation, presentation_version);
+        ALOGV("%u: presentation_config = %u (%s)\n", presentation, presentation_config,
+            (presentation_config >= NELEM(PresentationConfig) ?
+            "reserved" : PresentationConfig[presentation_config]));
+
+        /* record a marker, less the size of the presentation_config */
+        uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
+
+        bool b_add_emdf_substreams = false;
+        if (!b_single_substream_group && presentation_config == 6) {
+            b_add_emdf_substreams = true;
+            ALOGV("%u: b_add_emdf_substreams = %s\n", presentation, BOOLSTR(b_add_emdf_substreams));
+        } else {
+            CHECK_BITS_LEFT(3 + 1);
+            uint32_t mdcompat = mBitReader.getBits(3);
+            ALOGV("%u: mdcompat = %d\n", presentation, mdcompat);
+
+            bool b_presentation_group_index = (mBitReader.getBits(1) == 1);
+            ALOGV("%u: b_presentation_group_index = %s\n", presentation,
+                BOOLSTR(b_presentation_group_index));
+            if (b_presentation_group_index) {
+                CHECK_BITS_LEFT(5);
+                mPresentations[presentation].mGroupIndex = mBitReader.getBits(5);
+                ALOGV("%u: presentation_group_index = %d\n", presentation,
+                    mPresentations[presentation].mGroupIndex);
+            }
+            CHECK_BITS_LEFT(2);
+            uint32_t dsi_frame_rate_multiply_info = mBitReader.getBits(2);
+            ALOGV("%u: dsi_frame_rate_multiply_info = %d\n", presentation,
+                dsi_frame_rate_multiply_info);
+            if (ac4_dsi_version == 1 && presentation_version == 1) {
+                CHECK_BITS_LEFT(2);
+                uint32_t dsi_frame_rate_fraction_info = mBitReader.getBits(2);
+                ALOGV("%u: dsi_frame_rate_fraction_info = %d\n", presentation,
+                    dsi_frame_rate_fraction_info);
+            }
+            CHECK_BITS_LEFT(5 + 10);
+            uint32_t presentation_emdf_version = mBitReader.getBits(5);
+            uint32_t presentation_key_id = mBitReader.getBits(10);
+            ALOGV("%u: presentation_emdf_version = %d\n", presentation, presentation_emdf_version);
+            ALOGV("%u: presentation_key_id = %d\n", presentation, presentation_key_id);
+
+            if (ac4_dsi_version == 1) {
+                bool b_presentation_channel_coded = false;
+                if (presentation_version == 0) {
+                    b_presentation_channel_coded = true;
+                } else {
+                    CHECK_BITS_LEFT(1);
+                    b_presentation_channel_coded = (mBitReader.getBits(1) == 1);
+                }
+                ALOGV("%u: b_presentation_channel_coded = %s\n", presentation,
+                    BOOLSTR(b_presentation_channel_coded));
+                if (b_presentation_channel_coded) {
+                    if (presentation_version == 1) {
+                        CHECK_BITS_LEFT(5);
+                        uint32_t dsi_presentation_ch_mode = mBitReader.getBits(5);
+                        mPresentations[presentation].mChannelMode = dsi_presentation_ch_mode;
+                        ALOGV("%u: dsi_presentation_ch_mode = %d (%s)\n", presentation,
+                            dsi_presentation_ch_mode,
+                            dsi_presentation_ch_mode < NELEM(ChannelModes) ?
+                            ChannelModes[dsi_presentation_ch_mode] : "reserved");
+
+                        if (dsi_presentation_ch_mode >= 11 && dsi_presentation_ch_mode <= 14) {
+                            CHECK_BITS_LEFT(1 + 2);
+                            uint32_t pres_b_4_back_channels_present = mBitReader.getBits(1);
+                            uint32_t pres_top_channel_pairs = mBitReader.getBits(2);
+                            ALOGV("%u: pres_b_4_back_channels_present = %s\n", presentation,
+                                BOOLSTR(pres_b_4_back_channels_present));
+                            ALOGV("%u: pres_top_channel_pairs = %d\n", presentation,
+                                pres_top_channel_pairs);
+                        }
+                    }
+                    // presentation_channel_mask in ac4_presentation_v0_dsi()
+                    CHECK_BITS_LEFT(24);
+                    uint32_t presentation_channel_mask_v1 = mBitReader.getBits(24);
+                    ALOGV("%u: presentation_channel_mask_v1 = 0x%06x\n", presentation,
+                        presentation_channel_mask_v1);
+                }
+                if (presentation_version == 1) {
+                    CHECK_BITS_LEFT(1);
+                    bool b_presentation_core_differs = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_presentation_core_differs = %s\n", presentation,
+                        BOOLSTR(b_presentation_core_differs));
+                    if (b_presentation_core_differs) {
+                        CHECK_BITS_LEFT(1);
+                        bool b_presentation_core_channel_coded = (mBitReader.getBits(1) == 1);
+                        if (b_presentation_core_channel_coded) {
+                            CHECK_BITS_LEFT(2);
+                            mBitReader.skipBits(2); // dsi_presentation_channel_mode_core
+                        }
+                    }
+                    CHECK_BITS_LEFT(1);
+                    bool b_presentation_filter = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_presentation_filter = %s\n", presentation,
+                        BOOLSTR(b_presentation_filter));
+                    if (b_presentation_filter) {
+                        CHECK_BITS_LEFT(1 + 8);
+                        bool b_enable_presentation = (mBitReader.getBits(1) == 1);
+                        if (!b_enable_presentation) {
+                            mPresentations[presentation].mEnabled = false;
+                        }
+                        ALOGV("%u: b_enable_presentation = %s\n", presentation,
+                            BOOLSTR(b_enable_presentation));
+                        uint32_t n_filter_bytes = mBitReader.getBits(8);
+                        CHECK_BITS_LEFT(n_filter_bytes * 8);
+                        for (uint32_t i = 0; i < n_filter_bytes; i++) {
+                            mBitReader.skipBits(8); // filter_data
+                        }
+                    }
+                }
+            } /* ac4_dsi_version == 1 */
+
+            if (b_single_substream_group) {
+                if (presentation_version == 0) {
+                    if (!parseSubstreamDSI(presentation, 0)) {
+                        return false;
+                    }
+                } else {
+                    if (!parseSubstreamGroupDSI(presentation, 0)) {
+                        return false;
+                    }
+                }
+            } else {
+                if (ac4_dsi_version == 1) {
+                    CHECK_BITS_LEFT(1);
+                    bool b_multi_pid = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_multi_pid = %s\n", presentation, BOOLSTR(b_multi_pid));
+                } else {
+                    CHECK_BITS_LEFT(1);
+                    bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+                    ALOGV("%u: b_hsf_ext = %s\n", presentation, BOOLSTR(b_hsf_ext));
+                }
+                switch (presentation_config) {
+                case 0:
+                case 1:
+                case 2:
+                    if (presentation_version == 0) {
+                        if (!parseSubstreamDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamDSI(presentation, 1)) {
+                            return false;
+                        }
+                    } else {
+                        if (!parseSubstreamGroupDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamGroupDSI(presentation, 1)) {
+                            return false;
+                        }
+                    }
+                    break;
+                case 3:
+                case 4:
+                    if (presentation_version == 0) {
+                        if (!parseSubstreamDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamDSI(presentation, 1)) {
+                            return false;
+                        }
+                        if (!parseSubstreamDSI(presentation, 2)) {
+                            return false;
+                        }
+                    } else {
+                        if (!parseSubstreamGroupDSI(presentation, 0)) {
+                            return false;
+                        }
+                        if (!parseSubstreamGroupDSI(presentation, 1)) {
+                            return false;
+                        }
+                        if (!parseSubstreamGroupDSI(presentation, 2)) {
+                            return false;
+                        }
+                    }
+                    break;
+                case 5:
+                    if (presentation_version == 0) {
+                        if (!parseSubstreamDSI(presentation, 0)) {
+                            return false;
+                        }
+                    } else {
+                        CHECK_BITS_LEFT(3);
+                        uint32_t n_substream_groups_minus2 = mBitReader.getBits(3);
+                        ALOGV("%u: n_substream_groups_minus2 = %d\n", presentation,
+                            n_substream_groups_minus2);
+                        for (uint32_t sg = 0; sg < n_substream_groups_minus2 + 2; sg++) {
+                            if (!parseSubstreamGroupDSI(presentation, sg)) {
+                                return false;
+                            }
+                        }
+                    }
+                    break;
+                default:
+                    CHECK_BITS_LEFT(7);
+                    uint32_t n_skip_bytes = mBitReader.getBits(7);
+                    CHECK_BITS_LEFT(n_skip_bytes * 8)
+                    for (uint32_t j = 0; j < n_skip_bytes; j++) {
+                        mBitReader.getBits(8);
+                    }
+                    break;
+                }
+                CHECK_BITS_LEFT(1 + 1);
+                bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+                mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+                b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+                ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+                ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+                    BOOLSTR(b_add_emdf_substreams));
+            }
+        }
+        if (b_add_emdf_substreams) {
+            CHECK_BITS_LEFT(7);
+            uint32_t n_add_emdf_substreams = mBitReader.getBits(7);
+            for (uint32_t j = 0; j < n_add_emdf_substreams; j++) {
+                CHECK_BITS_LEFT(5 + 10);
+                uint32_t substream_emdf_version = mBitReader.getBits(5);
+                uint32_t substream_key_id = mBitReader.getBits(10);
+                ALOGV("%u: emdf_substream[%d]: version=%d, key_id=%d\n", presentation, j,
+                    substream_emdf_version, substream_key_id);
+            }
+        }
+
+        bool b_presentation_bitrate_info = false;
+        if (presentation_version > 0) {
+            CHECK_BITS_LEFT(1);
+            b_presentation_bitrate_info = (mBitReader.getBits(1) == 1);
+        }
+
+        ALOGV("b_presentation_bitrate_info = %s\n", BOOLSTR(b_presentation_bitrate_info));
+        if (b_presentation_bitrate_info) {
+            if (!parseBitrateDsi()) {
+                return false;
+            }
+        }
+
+        if (presentation_version > 0) {
+            CHECK_BITS_LEFT(1);
+            bool b_alternative = (mBitReader.getBits(1) == 1);
+            ALOGV("b_alternative = %s\n", BOOLSTR(b_alternative));
+            if (b_alternative) {
+                BYTE_ALIGN;
+                CHECK_BITS_LEFT(16);
+                uint32_t name_len = mBitReader.getBits(16);
+                char* presentation_name = new char[name_len+1];
+                CHECK_BITS_LEFT(name_len * 8);
+                for (uint32_t i = 0; i < name_len; i++) {
+                    presentation_name[i] = (char)(mBitReader.getBits(8));
+                }
+                presentation_name[name_len] = '\0';
+                std::string description(presentation_name, name_len);
+                mPresentations[presentation].mDescription = description;
+                CHECK_BITS_LEFT(5);
+                uint32_t n_targets = mBitReader.getBits(5);
+                CHECK_BITS_LEFT(n_targets * (3 + 8));
+                for (uint32_t i = 0; i < n_targets; i++){
+                    mBitReader.skipBits(3); // target_md_compat
+                    mBitReader.skipBits(8); // target_device_category
+                }
+            }
+        }
+
+        BYTE_ALIGN;
+
+        if (ac4_dsi_version == 1) {
+            uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
+            if (mBitReader.numBitsLeft() % 8 != 0) {
+                end += 1;
+            }
+
+            uint64_t presentation_bytes = end - start;
+            uint64_t skip_bytes = pres_bytes - presentation_bytes;
+            ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
+            CHECK_BITS_LEFT(skip_bytes * 8);
+            mBitReader.skipBits(skip_bytes * 8);
+        }
+
+        // we should know this or something is probably wrong
+        // with the bitstream (or we don't support it)
+        if (mPresentations[presentation].mChannelMode == -1){
+            ALOGE("could not determing channel mode of presentation %d", presentation);
+            return false;
+        }
+    } /* each presentation */
+
+    return true;
+}
+
+};
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/AC4Parser.h
new file mode 100644
index 0000000..73b6e31
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AC4_PARSER_H_
+#define AC4_PARSER_H_
+
+#include <cstdint>
+#include <map>
+#include <string>
+
+#include <media/stagefright/foundation/ABitReader.h>
+
+namespace android {
+
+class AC4Parser {
+public:
+    AC4Parser();
+    virtual ~AC4Parser() { }
+
+    virtual bool parse() = 0;
+
+    struct AC4Presentation {
+        int32_t mChannelMode = -1;
+        int32_t mProgramID = -1;
+        int32_t mGroupIndex = -1;
+
+        // TS 103 190-1 v1.2.1 4.3.3.8.1
+        enum ContentClassifiers {
+            kCompleteMain,
+            kMusicAndEffects,
+            kVisuallyImpaired,
+            kHearingImpaired,
+            kDialog,
+            kCommentary,
+            kEmergency,
+            kVoiceOver
+        };
+
+        uint32_t mContentClassifier = kCompleteMain;
+
+        // ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+        enum InputChannelMode {
+            kChannelMode_Mono,
+            kChannelMode_Stereo,
+            kChannelMode_3_0,
+            kChannelMode_5_0,
+            kChannelMode_5_1,
+            kChannelMode_7_0_34,
+            kChannelMode_7_1_34,
+            kChannelMode_7_0_52,
+            kChannelMode_7_1_52,
+            kChannelMode_7_0_322,
+            kChannelMode_7_1_322,
+            kChannelMode_7_0_4,
+            kChannelMode_7_1_4,
+            kChannelMode_9_0_4,
+            kChannelMode_9_1_4,
+            kChannelMode_22_2,
+            kChannelMode_Reserved,
+        };
+
+        bool mHasDialogEnhancements = false;
+        bool mPreVirtualized = false;
+        bool mEnabled = true;
+
+        std::string mLanguage;
+        std::string mDescription;
+    };
+    typedef std::map<uint32_t, AC4Presentation> AC4Presentations;
+
+    const AC4Presentations& getPresentations() const { return mPresentations; }
+
+protected:
+    AC4Presentations mPresentations;
+};
+
+class AC4DSIParser: public AC4Parser {
+public:
+    explicit AC4DSIParser(ABitReader &br);
+    virtual ~AC4DSIParser() { }
+
+    bool parse();
+
+private:
+    bool parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID);
+    bool parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID);
+    bool parseLanguageTag(uint32_t presentationID, uint32_t substreamID);
+    bool parseBitrateDsi();
+
+    uint64_t mDSISize;
+    ABitReader& mBitReader;
+};
+
+};
+
+#endif  // AC4_PARSER_H_
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index fa739e8..40b2c97 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -2,6 +2,7 @@
     name: "libmp4extractor_defaults",
 
     srcs: [
+        "AC4Parser.cpp",
         "ItemTable.cpp",
         "MPEG4Extractor.cpp",
         "SampleIterator.cpp",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 7b3b81d..fe9f99c 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -26,6 +26,7 @@
 
 #include <utils/Log.h>
 
+#include "AC4Parser.h"
 #include "MPEG4Extractor.h"
 #include "SampleTable.h"
 #include "ItemTable.h"
@@ -125,6 +126,8 @@
 
     bool mIsAVC;
     bool mIsHEVC;
+    bool mIsAC4;
+
     size_t mNALLengthSize;
 
     bool mStarted;
@@ -310,6 +313,9 @@
         case FOURCC('s', 'a', 'w', 'b'):
             return MEDIA_MIMETYPE_AUDIO_AMR_WB;
 
+        case FOURCC('e', 'c', '-', '3'):
+            return MEDIA_MIMETYPE_AUDIO_EAC3;
+
         case FOURCC('m', 'p', '4', 'v'):
             return MEDIA_MIMETYPE_VIDEO_MPEG4;
 
@@ -324,6 +330,8 @@
         case FOURCC('h', 'v', 'c', '1'):
         case FOURCC('h', 'e', 'v', '1'):
             return MEDIA_MIMETYPE_VIDEO_HEVC;
+        case FOURCC('a', 'c', '-', '4'):
+            return MEDIA_MIMETYPE_AUDIO_AC4;
         default:
             ALOGW("Unknown fourcc: %c%c%c%c",
                    (fourcc >> 24) & 0xff,
@@ -2433,7 +2441,19 @@
         case FOURCC('a', 'c', '-', '3'):
         {
             *offset += chunk_size;
-            return parseAC3SampleEntry(data_offset);
+            return parseAC3SpecificBox(data_offset);
+        }
+
+        case FOURCC('e', 'c', '-', '3'):
+        {
+            *offset += chunk_size;
+            return parseEAC3SpecificBox(data_offset);
+        }
+
+        case FOURCC('a', 'c', '-', '4'):
+        {
+            *offset += chunk_size;
+            return parseAC4SpecificBox(data_offset);
         }
 
         case FOURCC('f', 't', 'y', 'p'):
@@ -2507,36 +2527,260 @@
     return OK;
 }
 
-status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+status_t MPEG4Extractor::parseChannelCountSampleRate(
+        off64_t *offset, uint16_t *channelCount, uint16_t *sampleRate) {
     // skip 16 bytes:
     //  + 6-byte reserved,
     //  + 2-byte data reference index,
     //  + 8-byte reserved
-    offset += 16;
-    uint16_t channelCount;
-    if (!mDataSource->getUInt16(offset, &channelCount)) {
+    *offset += 16;
+    if (!mDataSource->getUInt16(*offset, channelCount)) {
+        ALOGE("MPEG4Extractor: error while reading sample entry box: cannot read channel count");
         return ERROR_MALFORMED;
     }
     // skip 8 bytes:
     //  + 2-byte channelCount,
     //  + 2-byte sample size,
     //  + 4-byte reserved
-    offset += 8;
-    uint16_t sampleRate;
-    if (!mDataSource->getUInt16(offset, &sampleRate)) {
-        ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+    *offset += 8;
+    if (!mDataSource->getUInt16(*offset, sampleRate)) {
+        ALOGE("MPEG4Extractor: error while reading sample entry box: cannot read sample rate");
         return ERROR_MALFORMED;
     }
-
     // skip 4 bytes:
     //  + 2-byte sampleRate,
     //  + 2-byte reserved
-    offset += 4;
-    return parseAC3SpecificBox(offset, sampleRate);
+    *offset += 4;
+    return OK;
 }
 
-status_t MPEG4Extractor::parseAC3SpecificBox(
-        off64_t offset, uint16_t sampleRate) {
+status_t MPEG4Extractor::parseAC4SpecificBox(off64_t offset) {
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    uint16_t sampleRate, channelCount;
+    status_t status;
+    if ((status = parseChannelCountSampleRate(&offset, &channelCount, &sampleRate)) != OK) {
+        return status;
+    }
+    uint32_t size;
+    // + 4-byte size
+    // + 4-byte type
+    // + 3-byte payload
+    const uint32_t kAC4MinimumBoxSize = 4 + 4 + 3;
+    if (!mDataSource->getUInt32(offset, &size) || size < kAC4MinimumBoxSize) {
+        ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read specific box size");
+        return ERROR_MALFORMED;
+    }
+
+    // + 4-byte size
+    offset += 4;
+    uint32_t type;
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '4')) {
+        ALOGE("MPEG4Extractor: error while reading ac-4 specific block: header not dac4");
+        return ERROR_MALFORMED;
+    }
+
+    // + 4-byte type
+    offset += 4;
+    // at least for AC4 DSI v1 this is big enough
+    const uint32_t kAC4SpecificBoxPayloadSize = 256;
+    uint8_t chunk[kAC4SpecificBoxPayloadSize];
+    ssize_t dsiSize = size - 8; // size of box - size and type fields
+    if (dsiSize >= (ssize_t)kAC4SpecificBoxPayloadSize ||
+        mDataSource->readAt(offset, chunk, dsiSize) != dsiSize) {
+        ALOGE("MPEG4Extractor: error while reading ac-4 specific block: bitstream fields");
+        return ERROR_MALFORMED;
+    }
+    // + size-byte payload
+    offset += dsiSize;
+    ABitReader br(chunk, dsiSize);
+    AC4DSIParser parser(br);
+    if (!parser.parse()){
+        ALOGE("MPEG4Extractor: error while parsing ac-4 specific block");
+        return ERROR_MALFORMED;
+    }
+
+    mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+    mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+    mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+    return OK;
+}
+
+status_t MPEG4Extractor::parseEAC3SpecificBox(off64_t offset) {
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    uint16_t sampleRate, channels;
+    status_t status;
+    if ((status = parseChannelCountSampleRate(&offset, &channels, &sampleRate)) != OK) {
+        return status;
+    }
+    uint32_t size;
+    // + 4-byte size
+    // + 4-byte type
+    // + 3-byte payload
+    const uint32_t kEAC3SpecificBoxMinSize = 11;
+    // 13 + 3 + (8 * (2 + 5 + 5 + 3 + 1 + 3 + 4 + (14 * 9 + 1))) bits == 152 bytes theoretical max
+    // calculated from the required bits read below as well as the maximum number of independent
+    // and dependant sub streams you can have
+    const uint32_t kEAC3SpecificBoxMaxSize = 152;
+    if (!mDataSource->getUInt32(offset, &size) ||
+        size < kEAC3SpecificBoxMinSize ||
+        size > kEAC3SpecificBoxMaxSize) {
+        ALOGE("MPEG4Extractor: error while reading eac-3 block: cannot read specific box size");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    uint32_t type;
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'e', 'c', '3')) {
+        ALOGE("MPEG4Extractor: error while reading eac-3 specific block: header not dec3");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    uint8_t* chunk = new (std::nothrow) uint8_t[size];
+    if (chunk == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    if (mDataSource->readAt(offset, chunk, size) != (ssize_t)size) {
+        ALOGE("MPEG4Extractor: error while reading eac-3 specific block: bitstream fields");
+        delete[] chunk;
+        return ERROR_MALFORMED;
+    }
+
+    ABitReader br(chunk, size);
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+    static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+    if (br.numBitsLeft() < 16) {
+        delete[] chunk;
+        return ERROR_MALFORMED;
+    }
+    unsigned data_rate = br.getBits(13);
+    ALOGV("EAC3 data rate = %d", data_rate);
+
+    unsigned num_ind_sub = br.getBits(3) + 1;
+    ALOGV("EAC3 independant substreams = %d", num_ind_sub);
+    if (br.numBitsLeft() < (num_ind_sub * 23)) {
+        delete[] chunk;
+        return ERROR_MALFORMED;
+    }
+
+    unsigned channelCount = 0;
+    for (unsigned i = 0; i < num_ind_sub; i++) {
+        unsigned fscod = br.getBits(2);
+        if (fscod == 3) {
+            ALOGE("Incorrect fscod (3) in EAC3 header");
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+        unsigned boxSampleRate = sampleRateTable[fscod];
+        if (boxSampleRate != sampleRate) {
+            ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+                boxSampleRate, sampleRate);
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+
+        unsigned bsid = br.getBits(5);
+        if (bsid < 8) {
+            ALOGW("Incorrect bsid in EAC3 header. Possibly AC-3?");
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+
+        // skip
+        br.skipBits(2);
+        unsigned bsmod = br.getBits(3);
+        unsigned acmod = br.getBits(3);
+        unsigned lfeon = br.getBits(1);
+        // we currently only support the first stream
+        if (i == 0)
+            channelCount = channelCountTable[acmod] + lfeon;
+        ALOGV("bsmod = %d, acmod = %d, lfeon = %d", bsmod, acmod, lfeon);
+
+        br.skipBits(3);
+        unsigned num_dep_sub = br.getBits(4);
+        ALOGV("EAC3 dependant substreams = %d", num_dep_sub);
+        if (num_dep_sub != 0) {
+            if (br.numBitsLeft() < 9) {
+                delete[] chunk;
+                return ERROR_MALFORMED;
+            }
+            static const char* chan_loc_tbl[] = { "Lc/Rc","Lrs/Rrs","Cs","Ts","Lsd/Rsd",
+                "Lw/Rw","Lvh/Rvh","Cvh","Lfe2" };
+            unsigned chan_loc = br.getBits(9);
+            unsigned mask = 1;
+            for (unsigned j = 0; j < 9; j++, mask <<= 1) {
+                if ((chan_loc & mask) != 0) {
+                    // we currently only support the first stream
+                    if (i == 0) {
+                        channelCount++;
+                        // these are 2 channels in the mask
+                        if (j == 0 || j == 1 || j == 4 || j == 5 || j == 6) {
+                            channelCount++;
+                        }
+                    }
+                    ALOGV(" %s", chan_loc_tbl[j]);
+                }
+            }
+        } else {
+            if (br.numBitsLeft() == 0) {
+                delete[] chunk;
+                return ERROR_MALFORMED;
+            }
+            br.skipBits(1);
+        }
+    }
+
+    if (br.numBitsLeft() != 0) {
+        if (br.numBitsLeft() < 8) {
+            delete[] chunk;
+            return ERROR_MALFORMED;
+        }
+        unsigned mask = br.getBits(8);
+        for (unsigned i = 0; i < 8; i++) {
+            if (((0x1 << i) && mask) == 0)
+                continue;
+
+            if (br.numBitsLeft() < 8) {
+                delete[] chunk;
+                return ERROR_MALFORMED;
+            }
+            switch (i) {
+                case 0: {
+                    unsigned complexity = br.getBits(8);
+                    ALOGV("Found a JOC stream with complexity = %d", complexity);
+                }break;
+                default: {
+                    br.skipBits(8);
+                }break;
+            }
+        }
+    }
+    mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EAC3);
+    mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+    mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+
+    delete[] chunk;
+    return OK;
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(off64_t offset) {
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    uint16_t sampleRate, channels;
+    status_t status;
+    if ((status = parseChannelCountSampleRate(&offset, &channels, &sampleRate)) != OK) {
+        return status;
+    }
     uint32_t size;
     // + 4-byte size
     // + 4-byte type
@@ -2591,9 +2835,6 @@
     unsigned lfeon = br.getBits(1);
     unsigned channelCount = channelCountTable[acmod] + lfeon;
 
-    if (mLastTrack == NULL) {
-        return ERROR_MALFORMED;
-    }
     mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
     mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
     mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
@@ -3857,6 +4098,7 @@
       mCurrentSampleInfoOffsets(NULL),
       mIsAVC(false),
       mIsHEVC(false),
+      mIsAC4(false),
       mNALLengthSize(0),
       mStarted(false),
       mGroup(NULL),
@@ -3890,6 +4132,7 @@
     mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
     mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
               !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+    mIsAC4 = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4);
 
     if (mIsAVC) {
         uint32_t type;
@@ -4830,7 +5073,7 @@
         }
     }
 
-    if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
+    if ((!mIsAVC && !mIsHEVC && !mIsAC4) || mWantsNALFragments) {
         if (newBuffer) {
             ssize_t num_bytes_read =
                 mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
@@ -4862,13 +5105,20 @@
             ++mCurrentSampleIndex;
         }
 
-        if (!mIsAVC && !mIsHEVC) {
+        if (!mIsAVC && !mIsHEVC && !mIsAC4) {
             *out = mBuffer;
             mBuffer = NULL;
 
             return OK;
         }
 
+        if (mIsAC4) {
+            mBuffer->release();
+            mBuffer = NULL;
+
+            return ERROR_IO;
+        }
+
         // Each NAL unit is split up into its constituent fragments and
         // each one of them returned in its own buffer.
 
@@ -4907,6 +5157,58 @@
         *out = clone;
 
         return OK;
+    } else if (mIsAC4) {
+        CHECK(mBuffer != NULL);
+        // Make sure there is enough space to write the sync header and the raw frame
+        if (mBuffer->range_length() < (7 + size)) {
+            mBuffer->release();
+            mBuffer = NULL;
+
+            return ERROR_IO;
+        }
+
+        uint8_t *dstData = (uint8_t *)mBuffer->data();
+        size_t dstOffset = 0;
+        // Add AC-4 sync header to MPEG4 encapsulated AC-4 raw frame
+        // AC40 sync word, meaning no CRC at the end of the frame
+        dstData[dstOffset++] = 0xAC;
+        dstData[dstOffset++] = 0x40;
+        dstData[dstOffset++] = 0xFF;
+        dstData[dstOffset++] = 0xFF;
+        dstData[dstOffset++] = (uint8_t)((size >> 16) & 0xFF);
+        dstData[dstOffset++] = (uint8_t)((size >> 8) & 0xFF);
+        dstData[dstOffset++] = (uint8_t)((size >> 0) & 0xFF);
+
+        ssize_t numBytesRead = mDataSource->readAt(offset, dstData + dstOffset, size);
+        if (numBytesRead != (ssize_t)size) {
+            mBuffer->release();
+            mBuffer = NULL;
+
+            return ERROR_IO;
+        }
+
+        mBuffer->set_range(0, dstOffset + size);
+        mBuffer->meta_data().clear();
+        mBuffer->meta_data().setInt64(
+                kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+        mBuffer->meta_data().setInt64(
+                kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+        if (targetSampleTimeUs >= 0) {
+            mBuffer->meta_data().setInt64(
+                    kKeyTargetTime, targetSampleTimeUs);
+        }
+
+        if (isSyncSample) {
+            mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+        }
+
+        ++mCurrentSampleIndex;
+
+        *out = mBuffer;
+        mBuffer = NULL;
+
+        return OK;
     } else {
         // Whole NAL units are returned but each fragment is prefixed by
         // the start code (0x00 00 00 01).
@@ -5081,9 +5383,13 @@
     uint32_t cts = 0;
     bool isSyncSample = false;
     bool newBuffer = false;
-    if (mBuffer == NULL) {
+    if (mBuffer == NULL || mCurrentSampleIndex >= mCurrentSamples.size()) {
         newBuffer = true;
 
+        if (mBuffer != NULL) {
+            mBuffer->release();
+            mBuffer = NULL;
+        }
         if (mCurrentSampleIndex >= mCurrentSamples.size()) {
             // move to next fragment if there is one
             if (mNextMoofOffset <= mCurrentMoofOffset) {
@@ -5361,6 +5667,8 @@
 
         return OK;
     }
+
+    return OK;
 }
 
 MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 3ea0963..a4a5684 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -139,8 +139,11 @@
 
     Track *findTrackByMimePrefix(const char *mimePrefix);
 
-    status_t parseAC3SampleEntry(off64_t offset);
-    status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+    status_t parseChannelCountSampleRate(
+            off64_t *offset, uint16_t *channelCount, uint16_t *sampleRate);
+    status_t parseAC3SpecificBox(off64_t offset);
+    status_t parseEAC3SpecificBox(off64_t offset);
+    status_t parseAC4SpecificBox(off64_t offset);
 
     MPEG4Extractor(const MPEG4Extractor &);
     MPEG4Extractor &operator=(const MPEG4Extractor &);
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index 93ee7c6..1a6d306 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -328,7 +328,15 @@
         ++mTimeToSampleIndex;
     }
 
-    *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
+    // below is equivalent to:
+    // *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
+    uint32_t tmp;
+    if (__builtin_sub_overflow(sampleIndex, mTTSSampleIndex, &tmp) ||
+            __builtin_mul_overflow(mTTSDuration, tmp, &tmp) ||
+            __builtin_add_overflow(mTTSSampleTime, tmp, &tmp)) {
+        return ERROR_OUT_OF_RANGE;
+    }
+    *time = tmp;
 
     int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
     if ((offset < 0 && *time < (offset == INT32_MIN ?
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
deleted file mode 100644
index a0b981c..0000000
--- a/media/libaaudio/examples/input_monitor/jni/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/input_monitor.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor_callback
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libaaudio_prebuilt
-LOCAL_SRC_FILES := libaaudio.so
-LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/jni/Application.mk b/media/libaaudio/examples/input_monitor/jni/Application.mk
deleted file mode 100644
index e74475c..0000000
--- a/media/libaaudio/examples/input_monitor/jni/Application.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-# TODO remove then when we support other architectures
-APP_ABI := arm64-v8a
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/loopback/jni/Android.mk b/media/libaaudio/examples/loopback/jni/Android.mk
deleted file mode 100644
index aebe877..0000000
--- a/media/libaaudio/examples/loopback/jni/Android.mk
+++ /dev/null
@@ -1,16 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/loopback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_STATIC_LIBRARIES := libsndfile
-LOCAL_SHARED_LIBRARIES := libaaudio libaudioutils
-LOCAL_MODULE := aaudio_loopback
-include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/loopback/jni/Application.mk b/media/libaaudio/examples/loopback/jni/Application.mk
deleted file mode 100644
index ba44f37..0000000
--- a/media/libaaudio/examples/loopback/jni/Application.mk
+++ /dev/null
@@ -1 +0,0 @@
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
deleted file mode 100644
index 1a1bd43..0000000
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/write_sine.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_callback
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libaaudio_prebuilt
-LOCAL_SRC_FILES := libaaudio.so
-LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/write_sine/jni/Application.mk b/media/libaaudio/examples/write_sine/jni/Application.mk
deleted file mode 100644
index ba44f37..0000000
--- a/media/libaaudio/examples/write_sine/jni/Application.mk
+++ /dev/null
@@ -1 +0,0 @@
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index b1cb0e7..0641b6e 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -430,14 +430,15 @@
 }
 
 status_t AudioEffect::getEffectDescriptor(const effect_uuid_t *uuid,
-        effect_descriptor_t *descriptor) /*const*/
+                                          const effect_uuid_t *type,
+                                          uint32_t preferredTypeFlag,
+                                          effect_descriptor_t *descriptor)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-    return af->getEffectDescriptor(uuid, descriptor);
+    return af->getEffectDescriptor(uuid, type, preferredTypeFlag, descriptor);
 }
 
-
 status_t AudioEffect::queryDefaultPreProcessing(audio_session_t audioSession,
                                           effect_descriptor_t *descriptors,
                                           uint32_t *count)
@@ -446,6 +447,55 @@
     if (aps == 0) return PERMISSION_DENIED;
     return aps->queryDefaultPreProcessing(audioSession, descriptors, count);
 }
+
+status_t AudioEffect::newEffectUniqueId(audio_unique_id_t* id)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return PERMISSION_DENIED;
+    *id = af->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+    return NO_ERROR;
+}
+
+status_t AudioEffect::addStreamDefaultEffect(const char *typeStr,
+                                             const String16& opPackageName,
+                                             const char *uuidStr,
+                                             int32_t priority,
+                                             audio_usage_t usage,
+                                             audio_unique_id_t *id)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    if (typeStr == NULL && uuidStr == NULL) return BAD_VALUE;
+
+    // Convert type & uuid from string to effect_uuid_t.
+    effect_uuid_t type;
+    if (typeStr != NULL) {
+        status_t res = stringToGuid(typeStr, &type);
+        if (res != OK) return res;
+    } else {
+        type = *EFFECT_UUID_NULL;
+    }
+
+    effect_uuid_t uuid;
+    if (uuidStr != NULL) {
+        status_t res = stringToGuid(uuidStr, &uuid);
+        if (res != OK) return res;
+    } else {
+        uuid = *EFFECT_UUID_NULL;
+    }
+
+    return aps->addStreamDefaultEffect(&type, opPackageName, &uuid, priority, usage, id);
+}
+
+status_t AudioEffect::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    return aps->removeStreamDefaultEffect(id);
+}
+
 // -------------------------------------------------------------------------
 
 status_t AudioEffect::stringToGuid(const char *str, effect_uuid_t *guid)
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index c072901..e260fd8 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -878,31 +878,25 @@
                                  flags, selectedDeviceId, portId);
 }
 
-status_t AudioSystem::startOutput(audio_io_handle_t output,
-                                  audio_stream_type_t stream,
-                                  audio_session_t session)
+status_t AudioSystem::startOutput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->startOutput(output, stream, session);
+    return aps->startOutput(portId);
 }
 
-status_t AudioSystem::stopOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session)
+status_t AudioSystem::stopOutput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->stopOutput(output, stream, session);
+    return aps->stopOutput(portId);
 }
 
-void AudioSystem::releaseOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session)
+void AudioSystem::releaseOutput(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return;
-    aps->releaseOutput(output, stream, session);
+    aps->releaseOutput(portId);
 }
 
 status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
@@ -1244,18 +1238,18 @@
 
 status_t AudioSystem::startAudioSource(const struct audio_port_config *source,
                                        const audio_attributes_t *attributes,
-                                       audio_patch_handle_t *handle)
+                                       audio_port_handle_t *portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->startAudioSource(source, attributes, handle);
+    return aps->startAudioSource(source, attributes, portId);
 }
 
-status_t AudioSystem::stopAudioSource(audio_patch_handle_t handle)
+status_t AudioSystem::stopAudioSource(audio_port_handle_t portId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->stopAudioSource(handle);
+    return aps->stopAudioSource(portId);
 }
 
 status_t AudioSystem::setMasterMono(bool mono)
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 37c62a8..00678c2 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -598,14 +598,18 @@
     }
 
     virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
-            effect_descriptor_t *pDescriptor) const
+                                         const effect_uuid_t *pType,
+                                         uint32_t preferredTypeFlag,
+                                         effect_descriptor_t *pDescriptor) const
     {
-        if (pUuid == NULL || pDescriptor == NULL) {
+        if (pUuid == NULL || pType == NULL || pDescriptor == NULL) {
             return BAD_VALUE;
         }
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.write(pUuid, sizeof(effect_uuid_t));
+        data.write(pType, sizeof(effect_uuid_t));
+        data.writeUint32(preferredTypeFlag);
         status_t status = remote()->transact(GET_EFFECT_DESCRIPTOR, data, &reply);
         if (status != NO_ERROR) {
             return status;
@@ -634,10 +638,10 @@
         sp<IEffect> effect;
 
         if (pDesc == NULL) {
-            return effect;
             if (status != NULL) {
                 *status = BAD_VALUE;
             }
+            return effect;
         }
 
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -949,7 +953,8 @@
             break;
     }
 
-    TimeCheck check("IAudioFlinger");
+    std::string tag("IAudioFlinger command " + std::to_string(code));
+    TimeCheck check(tag.c_str());
 
     switch (code) {
         case CREATE_TRACK: {
@@ -1276,8 +1281,11 @@
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             effect_uuid_t uuid;
             data.read(&uuid, sizeof(effect_uuid_t));
+            effect_uuid_t type;
+            data.read(&type, sizeof(effect_uuid_t));
+            uint32_t preferredTypeFlag = data.readUint32();
             effect_descriptor_t desc = {};
-            status_t status = getEffectDescriptor(&uuid, &desc);
+            status_t status = getEffectDescriptor(&uuid, &type, preferredTypeFlag, &desc);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->write(&desc, sizeof(effect_descriptor_t));
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 316105c..abf74f8 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -81,7 +81,9 @@
     GET_MASTER_MONO,
     GET_STREAM_VOLUME_DB,
     GET_SURROUND_FORMATS,
-    SET_SURROUND_FORMAT_ENABLED
+    SET_SURROUND_FORMAT_ENABLED,
+    ADD_STREAM_DEFAULT_EFFECT,
+    REMOVE_STREAM_DEFAULT_EFFECT
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -244,41 +246,29 @@
             return status;
         }
 
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session)
+    virtual status_t startOutput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(output);
-        data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t) session);
+        data.writeInt32((int32_t)portId);
         remote()->transact(START_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session)
+    virtual status_t stopOutput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(output);
-        data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t) session);
+        data.writeInt32((int32_t)portId);
         remote()->transact(STOP_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session)
+    virtual void releaseOutput(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(output);
-        data.writeInt32((int32_t)stream);
-        data.writeInt32((int32_t)session);
+        data.writeInt32((int32_t)portId);
         remote()->transact(RELEASE_OUTPUT, data, &reply);
     }
 
@@ -752,11 +742,11 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle)
+                                      audio_port_handle_t *portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        if (source == NULL || attributes == NULL || handle == NULL) {
+        if (source == NULL || attributes == NULL || portId == NULL) {
             return BAD_VALUE;
         }
         data.write(source, sizeof(struct audio_port_config));
@@ -769,15 +759,15 @@
         if (status != NO_ERROR) {
             return status;
         }
-        *handle = (audio_patch_handle_t)reply.readInt32();
+        *portId = (audio_port_handle_t)reply.readInt32();
         return status;
     }
 
-    virtual status_t stopAudioSource(audio_patch_handle_t handle)
+    virtual status_t stopAudioSource(audio_port_handle_t portId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(handle);
+        data.writeInt32(portId);
         status_t status = remote()->transact(STOP_AUDIO_SOURCE, data, &reply);
         if (status != NO_ERROR) {
             return status;
@@ -878,6 +868,42 @@
         }
         return reply.readInt32();
     }
+
+    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_usage_t usage,
+                                            audio_unique_id_t* id)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(type, sizeof(effect_uuid_t));
+        data.writeString16(opPackageName);
+        data.write(uuid, sizeof(effect_uuid_t));
+        data.writeInt32(priority);
+        data.writeInt32((int32_t) usage);
+        status_t status = remote()->transact(ADD_STREAM_DEFAULT_EFFECT, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = static_cast <status_t> (reply.readInt32());
+        *id = reply.readInt32();
+        return status;
+    }
+
+    virtual status_t removeStreamDefaultEffect(audio_unique_id_t id)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(id);
+        status_t status = remote()->transact(REMOVE_STREAM_DEFAULT_EFFECT, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return static_cast <status_t> (reply.readInt32());
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -947,7 +973,8 @@
             break;
     }
 
-    TimeCheck check("IAudioPolicyService");
+    std::string tag("IAudioPolicyService command " + std::to_string(code));
+    TimeCheck check(tag.c_str());
 
     switch (code) {
         case SET_DEVICE_CONNECTION_STATE: {
@@ -1074,34 +1101,22 @@
 
         case START_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            audio_stream_type_t stream =
-                                static_cast <audio_stream_type_t>(data.readInt32());
-            audio_session_t session = (audio_session_t)data.readInt32();
-            reply->writeInt32(static_cast <uint32_t>(startOutput(output,
-                                                                 stream,
-                                                                 session)));
+            const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+            reply->writeInt32(static_cast <uint32_t>(startOutput(portId)));
             return NO_ERROR;
         } break;
 
         case STOP_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            audio_stream_type_t stream =
-                                static_cast <audio_stream_type_t>(data.readInt32());
-            audio_session_t session = (audio_session_t)data.readInt32();
-            reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
-                                                                stream,
-                                                                session)));
+            const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+            reply->writeInt32(static_cast <uint32_t>(stopOutput(portId)));
             return NO_ERROR;
         } break;
 
         case RELEASE_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
-            audio_stream_type_t stream = (audio_stream_type_t)data.readInt32();
-            audio_session_t session = (audio_session_t)data.readInt32();
-            releaseOutput(output, stream, session);
+            const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+            releaseOutput(portId);
             return NO_ERROR;
         } break;
 
@@ -1495,17 +1510,17 @@
             audio_attributes_t attributes = {};
             data.read(&attributes, sizeof(audio_attributes_t));
             sanetizeAudioAttributes(&attributes);
-            audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
-            status_t status = startAudioSource(&source, &attributes, &handle);
+            audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+            status_t status = startAudioSource(&source, &attributes, &portId);
             reply->writeInt32(status);
-            reply->writeInt32(handle);
+            reply->writeInt32(portId);
             return NO_ERROR;
         } break;
 
         case STOP_AUDIO_SOURCE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            audio_patch_handle_t handle = (audio_patch_handle_t) data.readInt32();
-            status_t status = stopAudioSource(handle);
+            audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
+            status_t status = stopAudioSource(portId);
             reply->writeInt32(status);
             return NO_ERROR;
         } break;
@@ -1584,6 +1599,43 @@
             return NO_ERROR;
         }
 
+        case ADD_STREAM_DEFAULT_EFFECT: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            effect_uuid_t type;
+            status_t status = data.read(&type, sizeof(effect_uuid_t));
+            if (status != NO_ERROR) {
+                return status;
+            }
+            String16 opPackageName;
+            status = data.readString16(&opPackageName);
+            if (status != NO_ERROR) {
+                return status;
+            }
+            effect_uuid_t uuid;
+            status = data.read(&uuid, sizeof(effect_uuid_t));
+            if (status != NO_ERROR) {
+                return status;
+            }
+            int32_t priority = data.readInt32();
+            audio_usage_t usage = (audio_usage_t) data.readInt32();
+            audio_unique_id_t id = 0;
+            reply->writeInt32(static_cast <int32_t>(addStreamDefaultEffect(&type,
+                                                                           opPackageName,
+                                                                           &uuid,
+                                                                           priority,
+                                                                           usage,
+                                                                           &id)));
+            reply->writeInt32(id);
+            return NO_ERROR;
+        }
+
+        case REMOVE_STREAM_DEFAULT_EFFECT: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_unique_id_t id = static_cast<audio_unique_id_t>(data.readInt32());
+            reply->writeInt32(static_cast <int32_t>(removeStreamDefaultEffect(id)));
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index bfc068b..c97f783 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -90,27 +90,34 @@
      */
     static status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor);
 
-
     /*
-     * Returns the descriptor for the specified effect uuid.
+     * Returns a descriptor for the specified effect uuid or type.
+     *
+     * Lookup an effect by uuid, or if that's unspecified (EFFECT_UUID_NULL),
+     * do so by type and preferred flags instead.
      *
      * Parameters:
      *      uuid:       pointer to effect uuid.
+     *      type:       pointer to effect type uuid.
+     *      preferredTypeFlags: if multiple effects of the given type exist,
+     *                  one with a matching type flag will be chosen over one without.
+     *                  Use EFFECT_FLAG_TYPE_MASK to indicate no preference.
      *      descriptor: address where the effect descriptor should be returned.
      *
      * Returned status (from utils/Errors.h) can be:
      *      NO_ERROR        successful operation.
      *      PERMISSION_DENIED could not get AudioFlinger interface
      *      NO_INIT         effect library failed to initialize
-     *      BAD_VALUE       invalid uuid or descriptor pointers
+     *      BAD_VALUE       invalid type or descriptor pointers
      *      NAME_NOT_FOUND  no effect with this uuid found
      *
      * Returned value
      *   *descriptor updated with effect descriptor
      */
     static status_t getEffectDescriptor(const effect_uuid_t *uuid,
-                                        effect_descriptor_t *descriptor) /*const*/;
-
+                                        const effect_uuid_t *type,
+                                        uint32_t preferredTypeFlag,
+                                        effect_descriptor_t *descriptor);
 
     /*
      * Returns a list of descriptors corresponding to the pre processings enabled by default
@@ -144,6 +151,79 @@
                                               uint32_t *count);
 
     /*
+     * Gets a new system-wide unique effect id.
+     *
+     * Parameters:
+     *      id: The address to return the generated id.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     * Returned value
+     *   *id:  The new unique system-wide effect id.
+     */
+    static status_t newEffectUniqueId(audio_unique_id_t* id);
+
+    /*
+     * Static methods for adding/removing system-wide effects.
+     */
+
+    /*
+     * Adds an effect to the list of default output effects for a given stream type.
+     *
+     * If the effect is no longer available when a stream of the given type
+     * is created, the system will continue without adding it.
+     *
+     * Parameters:
+     *   typeStr:  Type uuid of effect to be a default: can be null if uuidStr is specified.
+     *             This may correspond to the OpenSL ES interface implemented by this effect,
+     *             or could be some vendor-defined type.
+     *   opPackageName: The package name used for app op checks.
+     *   uuidStr:  Uuid of effect to be a default: can be null if type is specified.
+     *             This uuid corresponds to a particular implementation of an effect type.
+     *             Note if both uuidStr and typeStr are specified, typeStr is ignored.
+     *   priority: Requested priority for effect control: the priority level corresponds to the
+     *             value of priority parameter: negative values indicate lower priorities, positive
+     *             values higher priorities, 0 being the normal priority.
+     *   usage:    The usage this effect should be a default for. Unrecognized values will be
+     *             treated as AUDIO_USAGE_UNKNOWN.
+     *   id:       Address where the system-wide unique id of the default effect should be returned.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     *      NO_INIT         effect library failed to initialize.
+     *      BAD_VALUE       invalid type uuid or implementation uuid.
+     *      NAME_NOT_FOUND  no effect with this uuid or type found.
+     *
+     * Returned value
+     *   *id:  The system-wide unique id of the added default effect.
+     */
+    static status_t addStreamDefaultEffect(const char* typeStr,
+                                           const String16& opPackageName,
+                                           const char* uuidStr,
+                                           int32_t priority,
+                                           audio_usage_t usage,
+                                           audio_unique_id_t* id);
+
+    /*
+     * Removes an effect from the list of default output effects for a given stream type.
+     *
+     * Parameters:
+     *      id: The system-wide unique id of the effect that should no longer be a default.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *      NO_ERROR        successful operation.
+     *      PERMISSION_DENIED could not get AudioFlinger interface
+     *                        or caller lacks required permissions.
+     *      NO_INIT         effect library failed to initialize.
+     *      BAD_VALUE       invalid id.
+     */
+    static status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
+    /*
      * Events used by callback function (effect_callback_t).
      */
     enum event_type {
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
index 73ee0a7..35d2e85 100644
--- a/media/libaudioclient/include/media/AudioPolicyHelper.h
+++ b/media/libaudioclient/include/media/AudioPolicyHelper.h
@@ -19,6 +19,43 @@
 #include <system/audio.h>
 
 static inline
+audio_stream_type_t audio_usage_to_stream_type(const audio_usage_t usage)
+{
+    switch(usage) {
+        case AUDIO_USAGE_MEDIA:
+        case AUDIO_USAGE_GAME:
+        case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        case AUDIO_USAGE_ASSISTANT:
+            return AUDIO_STREAM_MUSIC;
+        case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+            return AUDIO_STREAM_ACCESSIBILITY;
+        case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+            return AUDIO_STREAM_SYSTEM;
+        case AUDIO_USAGE_VOICE_COMMUNICATION:
+            return AUDIO_STREAM_VOICE_CALL;
+
+        case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+            return AUDIO_STREAM_DTMF;
+
+        case AUDIO_USAGE_ALARM:
+            return AUDIO_STREAM_ALARM;
+        case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+            return AUDIO_STREAM_RING;
+
+        case AUDIO_USAGE_NOTIFICATION:
+        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+        case AUDIO_USAGE_NOTIFICATION_EVENT:
+            return AUDIO_STREAM_NOTIFICATION;
+
+        case AUDIO_USAGE_UNKNOWN:
+        default:
+            return AUDIO_STREAM_MUSIC;
+    }
+}
+
+static inline
 audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
 {
     // flags to stream type mapping
@@ -30,38 +67,7 @@
     }
 
     // usage to stream type mapping
-    switch (attr->usage) {
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-    case AUDIO_USAGE_ASSISTANT:
-        return AUDIO_STREAM_MUSIC;
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        return AUDIO_STREAM_ACCESSIBILITY;
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return AUDIO_STREAM_SYSTEM;
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return AUDIO_STREAM_VOICE_CALL;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return AUDIO_STREAM_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-        return AUDIO_STREAM_ALARM;
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return AUDIO_STREAM_RING;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return AUDIO_STREAM_NOTIFICATION;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return AUDIO_STREAM_MUSIC;
-    }
+    return audio_usage_to_stream_type(attr->usage);
 }
 
 static inline
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 4c0f796..adfee8b 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -224,15 +224,9 @@
                                      audio_output_flags_t flags,
                                      audio_port_handle_t *selectedDeviceId,
                                      audio_port_handle_t *portId);
-    static status_t startOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session);
-    static status_t stopOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session);
-    static void releaseOutput(audio_io_handle_t output,
-                              audio_stream_type_t stream,
-                              audio_session_t session);
+    static status_t startOutput(audio_port_handle_t portId);
+    static status_t stopOutput(audio_port_handle_t portId);
+    static void releaseOutput(audio_port_handle_t portId);
 
     // Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
     // or release it with releaseInput().
@@ -328,9 +322,9 @@
     static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
 
     static status_t startAudioSource(const struct audio_port_config *source,
-                                      const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle);
-    static status_t stopAudioSource(audio_patch_handle_t handle);
+                                     const audio_attributes_t *attributes,
+                                     audio_port_handle_t *portId);
+    static status_t stopAudioSource(audio_port_handle_t portId);
 
     static status_t setMasterMono(bool mono);
     static status_t getMasterMono(bool *mono);
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index e6bf72f..31326ab 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -428,7 +428,9 @@
     virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
 
     virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID,
-                                        effect_descriptor_t *pDescriptor) const = 0;
+                                         const effect_uuid_t *pTypeUUID,
+                                         uint32_t preferredTypeFlag,
+                                         effect_descriptor_t *pDescriptor) const = 0;
 
     virtual sp<IEffect> createEffect(
                                     effect_descriptor_t *pDesc,
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index c3876af..c2899f8 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -66,15 +66,9 @@
                                       audio_output_flags_t flags,
                                       audio_port_handle_t *selectedDeviceId,
                                       audio_port_handle_t *portId) = 0;
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session) = 0;
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session) = 0;
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session) = 0;
+    virtual status_t startOutput(audio_port_handle_t portId) = 0;
+    virtual status_t stopOutput(audio_port_handle_t portId) = 0;
+    virtual void releaseOutput(audio_port_handle_t portId) = 0;
     virtual status_t  getInputForAttr(const audio_attributes_t *attr,
                               audio_io_handle_t *input,
                               audio_session_t session,
@@ -115,6 +109,13 @@
     virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count) = 0;
+    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_usage_t usage,
+                                            audio_unique_id_t* id) = 0;
+    virtual status_t removeStreamDefaultEffect(audio_unique_id_t id) = 0;
    // Check if offload is possible for given format, stream type, sample rate,
     // bit rate, duration, video and streaming or offload property is enabled
     virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
@@ -159,8 +160,8 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle) = 0;
-    virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
+                                      audio_port_handle_t *portId) = 0;
+    virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index b23e018..bfa80e8 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -192,7 +192,17 @@
                     const native_handle *handle = hidlInfo.sharedMemory.handle();
                     if (handle->numFds > 0) {
                         info->shared_memory_fd = handle->data[0];
+#if MAJOR_VERSION == 4
+                        info->flags = audio_mmap_buffer_flag(hidlInfo.flags);
+#endif
                         info->buffer_size_frames = hidlInfo.bufferSizeFrames;
+                        // Negative buffer size frame was a hack in O and P to
+                        // indicate that the buffer is shareable to applications
+                        if (info->buffer_size_frames < 0) {
+                            info->buffer_size_frames *= -1;
+                            info->flags = audio_mmap_buffer_flag(
+                                    info->flags | AUDIO_MMAP_APPLICATION_SHAREABLE);
+                        }
                         info->burst_size_frames = hidlInfo.burstSizeFrames;
                         // info->shared_memory_address is not needed in HIDL context
                         info->shared_memory_address = NULL;
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index f6f817a..f3ea826 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -875,7 +875,7 @@
                 t->hook = Track::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
                         t->mMixerInFormat, t->mMixerFormat);
                 ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                        "Track %d needs downmix + resample", i);
+                        "Track %d needs downmix + resample", name);
             } else {
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
                     t->hook = Track::getTrackHook(
@@ -890,7 +890,7 @@
                     t->hook = Track::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
                             t->mMixerInFormat, t->mMixerFormat);
                     ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                            "Track %d needs downmix", i);
+                            "Track %d needs downmix", name);
                 }
             }
         }
diff --git a/media/libeffects/config/include/media/EffectsConfig.h b/media/libeffects/config/include/media/EffectsConfig.h
index 55b946f..fa0415b 100644
--- a/media/libeffects/config/include/media/EffectsConfig.h
+++ b/media/libeffects/config/include/media/EffectsConfig.h
@@ -96,7 +96,7 @@
     /** Parsed config, nullptr if the xml lib could not load the file */
     std::unique_ptr<Config> parsedConfig;
     size_t nbSkippedElement; //< Number of skipped invalid library, effect or processing chain
-    const char* configPath; //< Path to the loaded configuration
+    const std::string configPath; //< Path to the loaded configuration
 };
 
 /** Parses the provided effect configuration.
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index d79501f..351b1ee 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -250,14 +250,14 @@
     return true;
 }
 
-/** Internal version of the public parse(const char* path) with precondition `path != nullptr`. */
-ParsingResult parseWithPath(const char* path) {
+/** Internal version of the public parse(const char* path) where path always exist. */
+ParsingResult parseWithPath(std::string&& path) {
     XMLDocument doc;
-    doc.LoadFile(path);
+    doc.LoadFile(path.c_str());
     if (doc.Error()) {
-        ALOGE("Failed to parse %s: Tinyxml2 error (%d): %s", path,
+        ALOGE("Failed to parse %s: Tinyxml2 error (%d): %s", path.c_str(),
               doc.ErrorID(), doc.ErrorStr());
-        return {nullptr, 0, path};
+        return {nullptr, 0, std::move(path)};
     }
 
     auto config = std::make_unique<Config>();
@@ -295,7 +295,7 @@
             }
         }
     }
-    return {std::move(config), nbSkippedElements, path};
+    return {std::move(config), nbSkippedElements, std::move(path)};
 }
 
 }; // namespace
@@ -310,14 +310,14 @@
         if (access(defaultPath.c_str(), R_OK) != 0) {
             continue;
         }
-        auto result = parseWithPath(defaultPath.c_str());
+        auto result = parseWithPath(std::move(defaultPath));
         if (result.parsedConfig != nullptr) {
             return result;
         }
     }
 
     ALOGE("Could not parse effect configuration in any of the default locations.");
-    return {nullptr, 0, nullptr};
+    return {nullptr, 0, ""};
 }
 
 } // namespace effectsConfig
diff --git a/media/libeffects/factory/EffectsXmlConfigLoader.cpp b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
index 7a7d431..052a88b 100644
--- a/media/libeffects/factory/EffectsXmlConfigLoader.cpp
+++ b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
@@ -327,7 +327,8 @@
                                            &gSkippedEffects, &gSubEffectList);
 
     ALOGE_IF(result.nbSkippedElement != 0, "%zu errors during loading of configuration: %s",
-             result.nbSkippedElement, result.configPath ?: "No config file found");
+             result.nbSkippedElement,
+             result.configPath.empty() ? "No config file found" : result.configPath.c_str());
 
     return result.nbSkippedElement;
 }
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 04c2692..53d266a 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -1198,13 +1198,7 @@
     for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
         if (MemTab.Region[i].Size != 0){
             if (MemTab.Region[i].pBaseAddress != NULL){
-                ALOGV("\tLvmEffect_free - START freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
-
                 free(MemTab.Region[i].pBaseAddress);
-
-                ALOGV("\tLvmEffect_free - END   freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }else{
                 ALOGV("\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer %" PRIu32
                         " bytes for region %u at %p ERROR\n",
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index e1c03f9..686ec4c 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -612,13 +612,7 @@
     for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
         if (MemTab.Region[i].Size != 0){
             if (MemTab.Region[i].pBaseAddress != NULL){
-                ALOGV("\tfree() - START freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
-
                 free(MemTab.Region[i].pBaseAddress);
-
-                ALOGV("\tfree() - END   freeing %" PRIu32 " bytes for region %u at %p\n",
-                        MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }else{
                 ALOGV("\tLVM_ERROR : free() - trying to free with NULL pointer %" PRIu32 " bytes "
                         "for region %u at %p ERROR\n",
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index f2844ed..b914f4b 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -889,7 +889,7 @@
         delete session->procFrame;
         session->procFrame = NULL;
         delete session->apm;
-        session->apm = NULL;
+        session->apm = NULL; // NOLINT(clang-analyzer-cplusplus.NewDelete)
     }
     return status;
 }
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index a22819a..e6d6b3e 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -20,7 +20,7 @@
     vndk: {
         enabled: true,
     },
-    srcs: ["AudioParameter.cpp", "TypeConverter.cpp", "TimeCheck.cpp"],
+    srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
     cflags: [
         "-Werror",
         "-Wno-error=deprecated-declarations",
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index aade69a..e0f5a40 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -546,8 +546,8 @@
 
             if (info->mHasRefProfile) {
 
-                CamcorderProfile *profile =
-                    new CamcorderProfile(
+                std::unique_ptr<CamcorderProfile> profile =
+                    std::make_unique<CamcorderProfile>(
                             *mCamcorderProfiles[info->mRefProfileIndex]);
 
                 // Overwrite the quality
@@ -581,7 +581,7 @@
                         mCamcorderProfiles[info->mRefProfileIndex]->mQuality,
                         profile->mQuality, cameraId);
 
-                mCamcorderProfiles.add(profile);
+                mCamcorderProfiles.add(profile.release());
             }
         }
     }
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 1fa8789..0fb5abc 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -9,6 +9,7 @@
 
     srcs: [
         "JAudioTrack.cpp",
+        "JavaVMHelper.cpp",
         "MediaPlayer2AudioOutput.cpp",
         "mediaplayer2.cpp",
     ],
@@ -49,6 +50,10 @@
         "media_plugin_headers",
     ],
 
+    include_dirs: [
+        "frameworks/base/core/jni",
+    ],
+
     static_libs: [
         "libmedia_helper",
         "libstagefright_nuplayer2",
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index ac0cc57..778ae1b 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -21,7 +21,7 @@
 #include "mediaplayer2/JAudioTrack.h"
 
 #include <android_media_AudioErrors.h>
-#include <android_runtime/AndroidRuntime.h>
+#include <mediaplayer2/JavaVMHelper.h>
 
 namespace android {
 
@@ -39,7 +39,7 @@
         const audio_attributes_t* pAttributes,        // AudioAttributes
         float maxRequiredSpeed) {                     // bufferSizeInBytes
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
     mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
 
@@ -116,19 +116,19 @@
 }
 
 JAudioTrack::~JAudioTrack() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     env->DeleteGlobalRef(mAudioTrackCls);
 }
 
 size_t JAudioTrack::frameCount() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetBufferSizeInFrames = env->GetMethodID(
             mAudioTrackCls, "getBufferSizeInFrames", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
 }
 
 size_t JAudioTrack::channelCount() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
 }
@@ -143,7 +143,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
             mAudioTrackCls, "getPlaybackHeadPosition", "()I");
     *position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
@@ -152,7 +152,7 @@
 }
 
 bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
     jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
@@ -189,7 +189,7 @@
 status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
     // TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
     // Should we do the same thing?
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
     jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
@@ -224,7 +224,7 @@
 }
 
 const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jmethodID jGetPlaybackParams = env->GetMethodID(
             mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
@@ -266,7 +266,7 @@
         return media::VolumeShaper::Status(BAD_VALUE);
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
             "(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
@@ -282,7 +282,7 @@
 }
 
 status_t JAudioTrack::setAuxEffectSendLevel(float level) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
             mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
@@ -290,14 +290,14 @@
 }
 
 status_t JAudioTrack::attachAuxEffect(int effectId) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
     int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
     return javaToNativeStatus(result);
 }
 
 status_t JAudioTrack::setVolume(float left, float right) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     // TODO: Java setStereoVolume is deprecated. Do we really need this method?
     jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
@@ -305,14 +305,14 @@
 }
 
 status_t JAudioTrack::setVolume(float volume) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
     int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
     return javaToNativeStatus(result);
 }
 
 status_t JAudioTrack::start() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
     // TODO: Should we catch the Java IllegalStateException from play()?
     env->CallVoidMethod(mAudioTrackObj, jPlay);
@@ -324,7 +324,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jbyteArray jAudioData = env->NewByteArray(size);
     env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
 
@@ -353,7 +353,7 @@
 }
 
 void JAudioTrack::stop() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
     env->CallVoidMethod(mAudioTrackObj, jStop);
     // TODO: Should we catch IllegalStateException?
@@ -365,20 +365,20 @@
 }
 
 void JAudioTrack::flush() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
     env->CallVoidMethod(mAudioTrackObj, jFlush);
 }
 
 void JAudioTrack::pause() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
     env->CallVoidMethod(mAudioTrackObj, jPause);
     // TODO: Should we catch IllegalStateException?
 }
 
 bool JAudioTrack::isPlaying() const {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
     int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
 
@@ -393,7 +393,7 @@
 }
 
 uint32_t JAudioTrack::getSampleRate() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
     return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
 }
@@ -403,7 +403,7 @@
         return BAD_VALUE;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetBufferSizeInFrames = env->GetMethodID(
             mAudioTrackCls, "getBufferSizeInFrames", "()I");
     int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
@@ -417,7 +417,7 @@
 }
 
 audio_format_t JAudioTrack::format() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
     int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
     return audioFormatToNative(javaFormat);
@@ -454,7 +454,7 @@
 }
 
 audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
             "()Landroid/media/AudioDeviceInfo;");
     jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
@@ -469,14 +469,14 @@
 }
 
 audio_session_t JAudioTrack::getAudioSessionId() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
     jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
     return (audio_session_t) sessionId;
 }
 
 status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
     jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
             jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
@@ -550,7 +550,7 @@
         return NULL;
     }
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     // Referenced "android_media_VolumeShaper.h".
     jfloatArray xarray = nullptr;
@@ -595,7 +595,7 @@
 jobject JAudioTrack::createVolumeShaperOperationObj(
         const sp<media::VolumeShaper::Operation>& operation) {
 
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
 
     jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
     jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
@@ -647,7 +647,7 @@
 }
 
 jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2Impl$StreamEventCallback");
     jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
     jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
@@ -655,7 +655,7 @@
 }
 
 jobject JAudioTrack::createCallbackExecutor() {
-    JNIEnv *env = AndroidRuntime::getJNIEnv();
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
     jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
     jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
             "newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
diff --git a/media/libmediaplayer2/JavaVMHelper.cpp b/media/libmediaplayer2/JavaVMHelper.cpp
new file mode 100644
index 0000000..90aaa7f
--- /dev/null
+++ b/media/libmediaplayer2/JavaVMHelper.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JavaVMHelper"
+
+#include "mediaplayer2/JavaVMHelper.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <stdlib.h>
+
+namespace android {
+
+// static
+std::atomic<JavaVM *> JavaVMHelper::sJavaVM(NULL);
+
+// static
+JNIEnv *JavaVMHelper::getJNIEnv() {
+    JNIEnv *env;
+    JavaVM *vm = sJavaVM.load();
+    CHECK(vm != NULL);
+
+    if (vm->GetEnv((void **)&env, JNI_VERSION_1_4) != JNI_OK) {
+        return NULL;
+    }
+
+    return env;
+}
+
+// static
+void JavaVMHelper::setJavaVM(JavaVM *vm) {
+    sJavaVM.store(vm);
+}
+
+}  // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
new file mode 100644
index 0000000..35091b7
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JAVA_VM_HELPER_H_
+
+#define JAVA_VM_HELPER_H_
+
+#include "jni.h"
+
+#include <atomic>
+
+namespace android {
+
+struct JavaVMHelper {
+    static JNIEnv *getJNIEnv();
+    static void setJavaVM(JavaVM *vm);
+
+private:
+    // Once a valid JavaVM has been set, it should never be reset or changed.
+    // However, as it may be accessed from multiple threads, access needs to be
+    // synchronized.
+    static std::atomic<JavaVM *> sJavaVM;
+};
+
+}  // namespace android
+
+#endif  // JAVA_VM_HELPER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
index 2fb5a2c..bc84729 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
@@ -96,17 +96,17 @@
 enum media2_info_type {
     // 0xx
     MEDIA2_INFO_UNKNOWN = 1,
-    // The player was started because it was used as the next player for another
-    // player, which just completed playback
-    MEDIA2_INFO_STARTED_AS_NEXT = 2,
+    // The player just started the playback of this data source.
+    MEDIA2_INFO_DATA_SOURCE_START = 2,
     // The player just pushed the very first video frame for rendering
     MEDIA2_INFO_VIDEO_RENDERING_START = 3,
     // The player just pushed the very first audio frame for rendering
     MEDIA2_INFO_AUDIO_RENDERING_START = 4,
     // The player just completed the playback of this data source
-    MEDIA2_INFO_PLAYBACK_COMPLETE = 5,
-    // The player just completed the playback of the full play list
-    MEDIA2_INFO_PLAYLIST_END = 6,
+    MEDIA2_INFO_DATA_SOURCE_END = 5,
+    // The player just completed the playback of all data sources.
+    // But this is not visible in native code. Just keep this entry for completeness.
+    MEDIA2_INFO_DATA_SOURCE_LIST_END = 6,
 
     //1xx
     // The player just prepared a data source.
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 060b698..c649573 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -2474,8 +2474,8 @@
     if (mDriver != NULL) {
         sp<NuPlayer2Driver> driver = mDriver.promote();
         if (driver != NULL) {
-            notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_PLAYBACK_COMPLETE, 0);
-            notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_STARTED_AS_NEXT, 0);
+            notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_END, 0);
+            notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_START, 0);
         }
     }
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index 645138a..931b86e 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -1088,6 +1088,12 @@
                         static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
                 }
                 if (mediaBuf != NULL) {
+                    if (mediaBuf->size() > codecBuffer->capacity()) {
+                        handleError(ERROR_BUFFER_TOO_SMALL);
+                        mDequeuedInputBuffers.push_back(bufferIx);
+                        return false;
+                    }
+
                     codecBuffer->setRange(0, mediaBuf->size());
                     memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 69cd82e..050e4fb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -1069,6 +1069,12 @@
                         static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
                 }
                 if (mediaBuf != NULL) {
+                    if (mediaBuf->size() > codecBuffer->capacity()) {
+                        handleError(ERROR_BUFFER_TOO_SMALL);
+                        mDequeuedInputBuffers.push_back(bufferIx);
+                        return false;
+                    }
+
                     codecBuffer->setRange(0, mediaBuf->size());
                     memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
 
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
index f09e93d..3418dc0 100644
--- a/media/libnblog/PerformanceAnalysis.cpp
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -32,6 +32,7 @@
 #include <sys/prctl.h>
 #include <time.h>
 #include <new>
+#include <audio_utils/LogPlot.h>
 #include <audio_utils/roundup.h>
 #include <media/nblog/NBLog.h>
 #include <media/nblog/PerformanceAnalysis.h>
@@ -208,27 +209,6 @@
     return isOutlier;
 }
 
-static int widthOf(int x) {
-    int width = 0;
-    if (x < 0) {
-        width++;
-        x = x == INT_MIN ? INT_MAX : -x;
-    }
-    // assert (x >= 0)
-    do {
-        ++width;
-        x /= 10;
-    } while (x > 0);
-    return width;
-}
-
-// computes the column width required for a specific histogram value
-inline int numberWidth(double number, int leftPadding) {
-    // Added values account for whitespaces needed around numbers, and for the
-    // dot and decimal digit not accounted for by widthOf
-    return std::max(std::max(widthOf(static_cast<int>(number)) + 3, 2), leftPadding + 1);
-}
-
 // rounds value to precision based on log-distance from mean
 __attribute__((no_sanitize("signed-integer-overflow")))
 inline double logRound(double x, double mean) {
@@ -254,7 +234,7 @@
 // of PerformanceAnalysis
 void PerformanceAnalysis::reportPerformance(String8 *body, int author, log_hash_t hash,
                                             int maxHeight) {
-    if (mHists.empty()) {
+    if (mHists.empty() || body == nullptr) {
         return;
     }
 
@@ -273,69 +253,16 @@
         }
     }
 
-    // underscores and spaces length corresponds to maximum width of histogram
-    static const int kLen = 200;
-    std::string underscores(kLen, '_');
-    std::string spaces(kLen, ' ');
-
-    auto it = buckets.begin();
-    double maxDelta = it->first;
-    int maxCount = it->second;
-    // Compute maximum values
-    while (++it != buckets.end()) {
-        if (it->first > maxDelta) {
-            maxDelta = it->first;
-        }
-        if (it->second > maxCount) {
-            maxCount = it->second;
-        }
-    }
-    int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
-    const int leftPadding = widthOf(1 << height);
-    const int bucketWidth = numberWidth(maxDelta, leftPadding);
-    int scalingFactor = 1;
-    // scale data if it exceeds maximum height
-    if (height > maxHeight) {
-        scalingFactor = (height + maxHeight) / maxHeight;
-        height /= scalingFactor;
-    }
-    body->appendFormat("\n%*s %3.2f %s", leftPadding + 11,
-            "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:");
-    body->appendFormat("\n%*s%d, %lld, %lld\n", leftPadding + 11,
+    static const int SIZE = 128;
+    char title[SIZE];
+    snprintf(title, sizeof(title), "\n%s %3.2f %s\n%s%d, %lld, %lld\n",
+            "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:",
             "Thread, hash, starting timestamp: ", author,
-            static_cast<long long int>(hash), static_cast<long long int>(startingTs));
-    // write histogram label line with bucket values
-    body->appendFormat("\n%s", " ");
-    body->appendFormat("%*s", leftPadding, " ");
-    for (auto const &x : buckets) {
-        const int colWidth = numberWidth(x.first, leftPadding);
-        body->appendFormat("%*d", colWidth, x.second);
-    }
-    // write histogram ascii art
-    body->appendFormat("\n%s", " ");
-    for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
-        const int value = 1 << row;
-        body->appendFormat("%.*s", leftPadding, spaces.c_str());
-        for (auto const &x : buckets) {
-            const int colWidth = numberWidth(x.first, leftPadding);
-            body->appendFormat("%.*s%s", colWidth - 1,
-                               spaces.c_str(), x.second < value ? " " : "|");
-        }
-        body->appendFormat("\n%s", " ");
-    }
-    // print x-axis
-    const int columns = static_cast<int>(buckets.size());
-    body->appendFormat("%*c", leftPadding, ' ');
-    body->appendFormat("%.*s", (columns + 1) * bucketWidth, underscores.c_str());
-    body->appendFormat("\n%s", " ");
+            static_cast<long long>(hash), static_cast<long long>(startingTs));
+    static const char * const kLabel = "ms";
 
-    // write footer with bucket labels
-    body->appendFormat("%*s", leftPadding, " ");
-    for (auto const &x : buckets) {
-        const int colWidth = numberWidth(x.first, leftPadding);
-        body->appendFormat("%*.*f", colWidth, 1, x.first);
-    }
-    body->appendFormat("%.*s%s", bucketWidth, spaces.c_str(), "ms\n");
+    body->appendFormat("%s",
+            audio_utils_plot_histogram(buckets, title, kLabel, maxHeight).c_str());
 
     // Now report glitches
     body->appendFormat("\ntime elapsed between glitches and glitch timestamps:\n");
diff --git a/media/libnblog/include/media/nblog/PerformanceAnalysis.h b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
index ddfe9d6..56e0ea6 100644
--- a/media/libnblog/include/media/nblog/PerformanceAnalysis.h
+++ b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
@@ -25,6 +25,8 @@
 
 namespace android {
 
+class String8;
+
 namespace ReportPerformance {
 
 class PerformanceAnalysis;
diff --git a/media/libnblog/include/media/nblog/ReportPerformance.h b/media/libnblog/include/media/nblog/ReportPerformance.h
index ec0842f..1b11197 100644
--- a/media/libnblog/include/media/nblog/ReportPerformance.h
+++ b/media/libnblog/include/media/nblog/ReportPerformance.h
@@ -23,9 +23,6 @@
 
 namespace android {
 
-// The String8 class is used by reportPerformance function
-class String8;
-
 namespace ReportPerformance {
 
 constexpr int kMsPerSec = 1000;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 7f39d10..3526047 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5556,6 +5556,11 @@
             break;
         }
 
+        case kWhatCheckIfStuck: {
+            ALOGV("No-op by default");
+            break;
+        }
+
         default:
             return false;
     }
@@ -7873,6 +7878,18 @@
             break;
         }
 
+        case kWhatCheckIfStuck:
+        {
+            int32_t generation = 0;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation == mCodec->mStateGeneration) {
+                mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+            }
+
+            handled = true;
+            break;
+        }
+
         default:
             handled = BaseState::onMessageReceived(msg);
             break;
@@ -7884,6 +7901,11 @@
 void ACodec::OutputPortSettingsChangedState::stateEntered() {
     ALOGV("[%s] Now handling output port settings change",
          mCodec->mComponentName.c_str());
+
+    // If we haven't transitioned after 3 seconds, we're probably stuck.
+    sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+    msg->setInt32("generation", mCodec->mStateGeneration);
+    msg->post(3000000);
 }
 
 bool ACodec::OutputPortSettingsChangedState::onOMXFrameRendered(
@@ -8146,6 +8168,11 @@
     ALOGV("[%s] Now Flushing", mCodec->mComponentName.c_str());
 
     mFlushComplete[kPortIndexInput] = mFlushComplete[kPortIndexOutput] = false;
+
+    // If we haven't transitioned after 3 seconds, we're probably stuck.
+    sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+    msg->setInt32("generation", mCodec->mStateGeneration);
+    msg->post(3000000);
 }
 
 bool ACodec::FlushingState::onMessageReceived(const sp<AMessage> &msg) {
@@ -8160,6 +8187,7 @@
                 msg->setInt32("generation", mCodec->mStateGeneration);
                 msg->post(3000000);
             }
+            handled = true;
             break;
         }
 
@@ -8180,6 +8208,18 @@
             break;
         }
 
+        case kWhatCheckIfStuck:
+        {
+            int32_t generation = 0;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation == mCodec->mStateGeneration) {
+                mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+            }
+
+            handled = true;
+            break;
+        }
+
         default:
             handled = BaseState::onMessageReceived(msg);
             break;
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 710ae68..266a240 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -129,6 +129,7 @@
         secureHandle = static_cast<native_handle_t *>(secureData->getDestinationPointer());
     }
     ssize_t result = -1;
+    ssize_t codecDataOffset = 0;
     if (mCrypto != NULL) {
         ICrypto::DestinationBuffer destination;
         if (secure) {
@@ -180,9 +181,16 @@
 
         Status status = Status::OK;
         hidl_string detailedError;
+        ScramblingControl sctrl = ScramblingControl::UNSCRAMBLED;
+
+        if (key != NULL) {
+            sctrl = (ScramblingControl)key[0];
+            // Adjust for the PES offset
+            codecDataOffset = key[2] | (key[3] << 8);
+        }
 
         auto returnVoid = mDescrambler->descramble(
-                key != NULL ? (ScramblingControl)key[0] : ScramblingControl::UNSCRAMBLED,
+                sctrl,
                 hidlSubSamples,
                 srcBuffer,
                 0,
@@ -202,6 +210,11 @@
             return UNKNOWN_ERROR;
         }
 
+        if (result < codecDataOffset) {
+            ALOGD("invalid codec data offset: %zd, result %zd", codecDataOffset, result);
+            return BAD_VALUE;
+        }
+
         ALOGV("descramble succeeded, %zd bytes", result);
 
         if (dstBuffer.type == BufferType::SHARED_MEMORY) {
@@ -210,7 +223,7 @@
         }
     }
 
-    it->mCodecBuffer->setRange(0, result);
+    it->mCodecBuffer->setRange(codecDataOffset, result - codecDataOffset);
 
     // Copy metadata from client to codec buffer.
     it->mCodecBuffer->meta()->clear();
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index db37021..41f5db0 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -217,6 +217,7 @@
       mNumFramesReceived(0),
       mLastFrameTimestampUs(0),
       mStarted(false),
+      mEos(false),
       mNumFramesEncoded(0),
       mTimeBetweenFrameCaptureUs(0),
       mFirstFrameTimeUs(0),
@@ -880,6 +881,7 @@
     {
         Mutex::Autolock autoLock(mLock);
         mStarted = false;
+        mEos = false;
         mStopSystemTimeUs = -1;
         mFrameAvailableCondition.signal();
 
@@ -1075,7 +1077,7 @@
 
     {
         Mutex::Autolock autoLock(mLock);
-        while (mStarted && mFramesReceived.empty()) {
+        while (mStarted && !mEos && mFramesReceived.empty()) {
             if (NO_ERROR !=
                 mFrameAvailableCondition.waitRelative(mLock,
                     mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
@@ -1091,6 +1093,9 @@
         if (!mStarted) {
             return OK;
         }
+        if (mFramesReceived.empty()) {
+            return ERROR_END_OF_STREAM;
+        }
         frame = *mFramesReceived.begin();
         mFramesReceived.erase(mFramesReceived.begin());
 
@@ -1129,6 +1134,8 @@
     if (mStopSystemTimeUs != -1 && timestampUs >= mStopSystemTimeUs) {
         ALOGV("Drop Camera frame at %lld  stop time: %lld us",
                 (long long)timestampUs, (long long)mStopSystemTimeUs);
+        mEos = true;
+        mFrameAvailableCondition.signal();
         return true;
     }
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 1610bd0..f91c543 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -860,7 +860,15 @@
 }
 
 //static
-sp<CodecBase> MediaCodec::GetCodecBase(const AString &name) {
+sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, const char *owner) {
+    if (owner) {
+        if (strncmp(owner, "default", 8) == 0) {
+            return new ACodec;
+        } else if (strncmp(owner, "codec2", 7) == 0) {
+            return CreateCCodec();
+        }
+    }
+
     if (name.startsWithIgnoreCase("c2.")) {
         return CreateCCodec();
     } else if (name.startsWithIgnoreCase("omx.")) {
@@ -884,11 +892,6 @@
     // we need to invest in an extra looper to free the main event
     // queue.
 
-    mCodec = GetCodecBase(name);
-    if (mCodec == NULL) {
-        return NAME_NOT_FOUND;
-    }
-
     mCodecInfo.clear();
 
     bool secureCodec = false;
@@ -922,6 +925,11 @@
         return NAME_NOT_FOUND;
     }
 
+    mCodec = GetCodecBase(name, mCodecInfo->getOwnerName());
+    if (mCodec == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
     if (mIsVideo) {
         // video codec needs dedicated looper
         if (mCodecLooper == NULL) {
@@ -1935,7 +1943,9 @@
                         mAnalyticsItem->setCString(kCodecCodec, mComponentName.c_str());
                     }
 
-                    if (mComponentName.startsWith("OMX.google.")) {
+                    const char *owner = mCodecInfo->getOwnerName();
+                    if (mComponentName.startsWith("OMX.google.")
+                            && (owner == nullptr || strncmp(owner, "default", 8) == 0)) {
                         mFlags |= kFlagUsesSoftwareRenderer;
                     } else {
                         mFlags &= ~kFlagUsesSoftwareRenderer;
diff --git a/media/libstagefright/StagefrightPluginLoader.cpp b/media/libstagefright/StagefrightPluginLoader.cpp
index 519e870..dd5903a 100644
--- a/media/libstagefright/StagefrightPluginLoader.cpp
+++ b/media/libstagefright/StagefrightPluginLoader.cpp
@@ -46,7 +46,7 @@
     }
     mCreateInputSurface = (CodecBase::CreateInputSurfaceFunc)dlsym(
             mLibHandle, "CreateInputSurface");
-    if (mCreateBuilder == nullptr) {
+    if (mCreateInputSurface == nullptr) {
         ALOGD("Failed to find symbol: CreateInputSurface (%s)", dlerror());
     }
 }
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index cf5e91e..ada37a6 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1577,6 +1577,8 @@
     { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
     { MEDIA_MIMETYPE_AUDIO_OPUS,        AUDIO_FORMAT_OPUS},
     { MEDIA_MIMETYPE_AUDIO_AC3,         AUDIO_FORMAT_AC3},
+    { MEDIA_MIMETYPE_AUDIO_EAC3,        AUDIO_FORMAT_E_AC3},
+    { MEDIA_MIMETYPE_AUDIO_AC4,         AUDIO_FORMAT_AC4},
     { MEDIA_MIMETYPE_AUDIO_FLAC,        AUDIO_FORMAT_FLAC},
     { 0, AUDIO_FORMAT_INVALID }
 };
@@ -1867,4 +1869,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 6819bba..9020fc1 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -475,7 +475,16 @@
                 nextVsyncTime += mVsyncPeriod;
                 if (vsyncsForLastFrame < ULONG_MAX)
                     ++vsyncsForLastFrame;
+            } else if (mTimeCorrection < -correctionLimit * 2
+                    || mTimeCorrection > correctionLimit * 2) {
+                ALOGW("correction beyond limit: %lld vs %lld (vsyncs for last frame: %zu, min: %zu)"
+                        " restarting. render=%lld",
+                        (long long)mTimeCorrection, (long long)correctionLimit,
+                        vsyncsForLastFrame, minVsyncsPerFrame, (long long)origRenderTime);
+                restart();
+                return origRenderTime;
             }
+
             ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
         }
         mLastVsyncTime = nextVsyncTime;
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index 813004b..942f850 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -431,7 +431,7 @@
             }
 
             if (mInputBufferCount == 0) {
-                CHECK(mHeader == NULL);
+                delete mHeader;
                 mHeader = new OpusHeader();
                 memset(mHeader, 0, sizeof(*mHeader));
                 if (!ParseOpusHeader(data, size, mHeader)) {
@@ -452,6 +452,9 @@
                 }
 
                 int status = OPUS_INVALID_STATE;
+                if (mDecoder != NULL) {
+                    opus_multistream_decoder_destroy(mDecoder);
+                }
                 mDecoder = opus_multistream_decoder_create(kRate,
                                                            mHeader->channels,
                                                            mHeader->num_streams,
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 1695c75..a32cf08 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -50,6 +50,7 @@
 const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
 const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
 const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
 const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
 
 const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 25be89f..b165bcb 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -52,6 +52,7 @@
 extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
 extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
 extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
 extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
 
 extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 52791b9..8ab33f7 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -234,7 +234,11 @@
         if (mSelectedIndex >= 0 && i == (size_t)mSelectedIndex) {
             const Media &item = mMediaItems.itemAt(i);
 
-            *uri = item.makeURL(baseURL);
+            if (item.mURI.empty()) {
+                *uri = "";
+            } else {
+                *uri = item.makeURL(baseURL);
+            }
             return true;
         }
     }
@@ -465,7 +469,7 @@
         }
 
         if ((*uri).empty()) {
-            *uri = mItems.itemAt(index).mURI;
+            *uri = mItems.itemAt(index).makeURL(mBaseURI.c_str());
         }
     }
 
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 97d15a7..1137cf1 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -137,6 +137,7 @@
         kWhatOMXDied                 = 'OMXd',
         kWhatReleaseCodecInstance    = 'relC',
         kWhatForceStateTransition    = 'fstt',
+        kWhatCheckIfStuck            = 'Cstk',
     };
 
     enum {
diff --git a/media/libstagefright/include/media/stagefright/CameraSource.h b/media/libstagefright/include/media/stagefright/CameraSource.h
index 475976b..3037b72 100644
--- a/media/libstagefright/include/media/stagefright/CameraSource.h
+++ b/media/libstagefright/include/media/stagefright/CameraSource.h
@@ -204,6 +204,7 @@
     int32_t mNumFramesReceived;
     int64_t mLastFrameTimestampUs;
     bool mStarted;
+    bool mEos;
     int32_t mNumFramesEncoded;
 
     // Time between capture of two frames.
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index ad02004..7f6aae6 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -377,7 +377,7 @@
 
     MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid);
 
-    static sp<CodecBase> GetCodecBase(const AString &name);
+    static sp<CodecBase> GetCodecBase(const AString &name, const char *owner = nullptr);
 
     static status_t PostAndAwaitResponse(
             const sp<AMessage> &msg, sp<AMessage> *response);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5cc5093..32635d1 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -119,6 +119,7 @@
 private:
     struct StreamInfo {
         unsigned mType;
+        unsigned mTypeExt;
         unsigned mPID;
         int32_t mCASystemId;
     };
@@ -145,10 +146,12 @@
     Stream(Program *program,
            unsigned elementaryPID,
            unsigned streamType,
+           unsigned streamTypeExt,
            unsigned PCR_PID,
            int32_t CA_system_ID);
 
     unsigned type() const { return mStreamType; }
+    unsigned typeExt() const { return mStreamTypeExt; }
     unsigned pid() const { return mElementaryPID; }
     void setPID(unsigned pid) { mElementaryPID = pid; }
 
@@ -194,6 +197,7 @@
     Program *mProgram;
     unsigned mElementaryPID;
     unsigned mStreamType;
+    unsigned mStreamTypeExt;
     unsigned mPCR_PID;
     int32_t mExpectedContinuityCounter;
 
@@ -447,7 +451,7 @@
         if (descriptor_length > infoLength) {
             break;
         }
-        if (descriptor_tag == 9 && descriptor_length >= 4) {
+        if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
             found = true;
             caDescriptor->mSystemID = br->getBits(16);
             caDescriptor->mPID = br->getBits(16) & 0x1fff;
@@ -513,37 +517,65 @@
     // infoBytesRemaining is the number of bytes that make up the
     // variable length section of ES_infos. It does not include the
     // final CRC.
-    size_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
+    int32_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
 
     while (infoBytesRemaining >= 5) {
-
-        unsigned streamType = br->getBits(8);
-        ALOGV("    stream_type = 0x%02x", streamType);
-
+        StreamInfo info;
+        info.mType = br->getBits(8);
+        ALOGV("    stream_type = 0x%02x", info.mType);
         MY_LOGV("    reserved = %u", br->getBits(3));
 
-        unsigned elementaryPID = br->getBits(13);
-        ALOGV("    elementary_PID = 0x%04x", elementaryPID);
+        info.mPID = br->getBits(13);
+        ALOGV("    elementary_PID = 0x%04x", info.mPID);
 
         MY_LOGV("    reserved = %u", br->getBits(4));
 
         unsigned ES_info_length = br->getBits(12);
         ALOGV("    ES_info_length = %u", ES_info_length);
+        infoBytesRemaining -= 5 + ES_info_length;
 
         CADescriptor streamCA;
-        bool hasStreamCA = findCADescriptor(br, ES_info_length, &streamCA);
+        info.mTypeExt = EXT_DESCRIPTOR_DVB_RESERVED_MAX;
+        bool hasStreamCA = false;
+        while (ES_info_length > 2 && infoBytesRemaining >= 0) {
+            unsigned descriptor_tag = br->getBits(8);
+            ALOGV("      tag = 0x%02x", descriptor_tag);
+
+            unsigned descriptor_length = br->getBits(8);
+            ALOGV("      len = %u", descriptor_length);
+
+            ES_info_length -= 2;
+            if (descriptor_length > ES_info_length) {
+                return ERROR_MALFORMED;
+            }
+            if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
+                hasStreamCA = true;
+                streamCA.mSystemID = br->getBits(16);
+                streamCA.mPID = br->getBits(16) & 0x1fff;
+                ES_info_length -= 4;
+                streamCA.mPrivateData.assign(br->data(), br->data() + descriptor_length - 4);
+            } else if (info.mType == STREAMTYPE_PES_PRIVATE_DATA &&
+                       descriptor_tag == DESCRIPTOR_DVB_EXTENSION && descriptor_length >= 1) {
+                unsigned descTagExt = br->getBits(8);
+                ALOGV("      tag_ext = 0x%02x", descTagExt);
+                if (descTagExt == EXT_DESCRIPTOR_DVB_AC4) {
+                    info.mTypeExt = EXT_DESCRIPTOR_DVB_AC4;
+                }
+                ES_info_length -= descriptor_length;
+                descriptor_length--;
+                br->skipBits(descriptor_length * 8);
+            } else {
+                ES_info_length -= descriptor_length;
+                br->skipBits(descriptor_length * 8);
+            }
+        }
         if (hasStreamCA && !mParser->mCasManager->addStream(
-                mProgramNumber, elementaryPID, streamCA)) {
+                mProgramNumber, info.mPID, streamCA)) {
             return ERROR_MALFORMED;
         }
-        StreamInfo info;
-        info.mType = streamType;
-        info.mPID = elementaryPID;
         info.mCASystemId = hasProgramCA ? programCA.mSystemID :
                            hasStreamCA ? streamCA.mSystemID  : -1;
         infos.push(info);
-
-        infoBytesRemaining -= 5 + ES_info_length;
     }
 
     if (infoBytesRemaining != 0) {
@@ -602,7 +634,7 @@
 
         if (index < 0) {
             sp<Stream> stream = new Stream(
-                    this, info.mPID, info.mType, PCR_PID, info.mCASystemId);
+                    this, info.mPID, info.mType, info.mTypeExt, PCR_PID, info.mCASystemId);
 
             if (mSampleAesKeyItem != NULL) {
                 stream->signalNewSampleAesKey(mSampleAesKeyItem);
@@ -720,11 +752,13 @@
         Program *program,
         unsigned elementaryPID,
         unsigned streamType,
+        unsigned streamTypeExt,
         unsigned PCR_PID,
         int32_t CA_system_ID)
     : mProgram(program),
       mElementaryPID(elementaryPID),
       mStreamType(streamType),
+      mStreamTypeExt(streamTypeExt),
       mPCR_PID(PCR_PID),
       mExpectedContinuityCounter(-1),
       mPayloadStarted(false),
@@ -781,6 +815,16 @@
             mode = ElementaryStreamQueue::AC3;
             break;
 
+        case STREAMTYPE_EAC3:
+            mode = ElementaryStreamQueue::EAC3;
+            break;
+
+        case STREAMTYPE_PES_PRIVATE_DATA:
+            if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4) {
+                mode = ElementaryStreamQueue::AC4;
+            }
+            break;
+
         case STREAMTYPE_METADATA:
             mode = ElementaryStreamQueue::METADATA;
             break;
@@ -986,9 +1030,12 @@
         case STREAMTYPE_MPEG2_AUDIO_ADTS:
         case STREAMTYPE_LPCM_AC3:
         case STREAMTYPE_AC3:
+        case STREAMTYPE_EAC3:
         case STREAMTYPE_AAC_ENCRYPTED:
         case STREAMTYPE_AC3_ENCRYPTED:
             return true;
+        case STREAMTYPE_PES_PRIVATE_DATA:
+            return mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4;
 
         default:
             return false;
@@ -1395,7 +1442,7 @@
     // Perform the 1st pass descrambling if needed
     if (descrambleBytes > 0) {
         memcpy(mDescrambledBuffer->data(), mBuffer->data(), descrambleBytes);
-        mDescrambledBuffer->setRange(0, descrambleBytes);
+        mDescrambledBuffer->setRange(0, mBuffer->size());
 
         hidl_vec<SubSample> subSamples;
         subSamples.resize(descrambleSubSamples);
@@ -1412,10 +1459,9 @@
             }
         }
 
-        uint64_t srcOffset = 0, dstOffset = 0;
-        // If scrambled at PES-level, PES header should be skipped
+        // If scrambled at PES-level, PES header is in the clear
         if (pesScramblingControl != 0) {
-            srcOffset = dstOffset = pesOffset;
+            subSamples[0].numBytesOfClearData = pesOffset;
             subSamples[0].numBytesOfEncryptedData -= pesOffset;
         }
 
@@ -1431,9 +1477,9 @@
                 (ScramblingControl) sctrl,
                 subSamples,
                 mDescramblerSrcBuffer,
-                srcOffset,
+                0 /*srcOffset*/,
                 dstBuffer,
-                dstOffset,
+                0 /*dstOffset*/,
                 [&status, &bytesWritten, &detailedError] (
                         Status _status, uint32_t _bytesWritten,
                         const hidl_string& _detailedError) {
@@ -1450,9 +1496,15 @@
 
         ALOGV("[stream %d] descramble succeeded, %d bytes",
                 mElementaryPID, bytesWritten);
-        memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
+
+        // Set descrambleBytes to the returned result.
+        // Note that this might be smaller than the total length of input data.
+        // (eg. when we're descrambling the PES header portion of a secure stream,
+        // the plugin might cut it off right after the PES header.)
+        descrambleBytes = bytesWritten;
     }
 
+    sp<ABuffer> buffer;
     if (mQueue->isScrambled()) {
         // Queue subSample info for scrambled queue
         sp<ABuffer> clearSizesBuffer = new ABuffer(mSubSamples.size() * 4);
@@ -1464,8 +1516,7 @@
         for (auto it = mSubSamples.begin();
                 it != mSubSamples.end(); it++, i++) {
             if ((it->transport_scrambling_mode == 0
-                    && pesScramblingControl == 0)
-                    || i < descrambleSubSamples) {
+                    && pesScramblingControl == 0)) {
                 clearSizePtr[i] = it->subSampleSize;
                 encSizePtr[i] = 0;
             } else {
@@ -1474,14 +1525,26 @@
             }
             isSync |= it->random_access_indicator;
         }
+
+        // If scrambled at PES-level, PES header is in the clear
+        if (pesScramblingControl != 0) {
+            clearSizePtr[0] = pesOffset;
+            encSizePtr[0] -= pesOffset;
+        }
         // Pass the original TS subsample size now. The PES header adjust
         // will be applied when the scrambled AU is dequeued.
         mQueue->appendScrambledData(
                 mBuffer->data(), mBuffer->size(), sctrl,
                 isSync, clearSizesBuffer, encSizesBuffer);
+
+        buffer = mDescrambledBuffer;
+    } else {
+        memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
+
+        buffer = mBuffer;
     }
 
-    ABitReader br(mBuffer->data(), mBuffer->size());
+    ABitReader br(buffer->data(), buffer->size());
     status_t err = parsePES(&br, event);
 
     if (err != OK) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 45ca06b..a31dc46 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -142,6 +142,7 @@
         STREAMTYPE_MPEG2_VIDEO          = 0x02,
         STREAMTYPE_MPEG1_AUDIO          = 0x03,
         STREAMTYPE_MPEG2_AUDIO          = 0x04,
+        STREAMTYPE_PES_PRIVATE_DATA     = 0x06,
         STREAMTYPE_MPEG2_AUDIO_ADTS     = 0x0f,
         STREAMTYPE_MPEG4_VIDEO          = 0x10,
         STREAMTYPE_METADATA             = 0x15,
@@ -153,6 +154,7 @@
         // Stream type 0x83 is non-standard,
         // it could be LPCM or TrueHD AC3
         STREAMTYPE_LPCM_AC3             = 0x83,
+        STREAMTYPE_EAC3                 = 0x87,
 
         //Sample Encrypted types
         STREAMTYPE_H264_ENCRYPTED       = 0xDB,
@@ -160,6 +162,20 @@
         STREAMTYPE_AC3_ENCRYPTED        = 0xC1,
     };
 
+    enum {
+        // From ISO/IEC 13818-1: 2007 (E), Table 2-29
+        DESCRIPTOR_CA                   = 0x09,
+
+        // DVB BlueBook A038 Table 12
+        DESCRIPTOR_DVB_EXTENSION        = 0x7F,
+    };
+
+    // DVB BlueBook A038 Table 109
+    enum {
+        EXT_DESCRIPTOR_DVB_AC4              = 0x15,
+        EXT_DESCRIPTOR_DVB_RESERVED_MAX     = 0x7F,
+    };
+
 protected:
     virtual ~ATSParser();
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index ece0692..9e154a3 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -226,6 +226,7 @@
         int32_t cryptoMode;
         if (buffer->meta()->findInt32("cryptoMode", &cryptoMode)) {
             int32_t cryptoKey;
+            int32_t pesOffset;
             sp<ABuffer> clearBytesBuffer, encBytesBuffer;
 
             CHECK(buffer->meta()->findInt32("cryptoKey", &cryptoKey));
@@ -233,6 +234,8 @@
                     && clearBytesBuffer != NULL);
             CHECK(buffer->meta()->findBuffer("encBytes", &encBytesBuffer)
                     && encBytesBuffer != NULL);
+            CHECK(buffer->meta()->findInt32("pesOffset", &pesOffset)
+                    && (pesOffset >= 0) && (pesOffset < 65536));
 
             bufmeta.setInt32(kKeyCryptoMode, cryptoMode);
 
@@ -240,6 +243,11 @@
             bufmeta.setData(kKeyCryptoIV, 0, array, 16);
 
             array[0] = (uint8_t) (cryptoKey & 0xff);
+            // array[1] contains PES header flag, which we don't use.
+            // array[2~3] contain the PES offset.
+            array[2] = (uint8_t) (pesOffset & 0xff);
+            array[3] = (uint8_t) ((pesOffset >> 8) & 0xff);
+
             bufmeta.setData(kKeyCryptoKey, 0, array, 16);
 
             bufmeta.setData(kKeyPlainSizes, 0,
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 0fa9fcb..90005c3 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -86,6 +86,21 @@
     mCasSessionId = sessionId;
 }
 
+static int32_t readVariableBits(ABitReader &bits, int32_t nbits) {
+    int32_t value = 0;
+    int32_t more_bits = 1;
+
+    while (more_bits) {
+        value += bits.getBits(nbits);
+        more_bits = bits.getBits(1);
+        if (!more_bits)
+            break;
+        value++;
+        value <<= nbits;
+    }
+    return value;
+}
+
 // Parse AC3 header assuming the current ptr is start position of syncframe,
 // update metadata only applicable, and return the payload size
 static unsigned parseAC3SyncFrame(
@@ -195,8 +210,153 @@
     return payloadSize;
 }
 
-static bool IsSeeminglyValidAC3Header(const uint8_t *ptr, size_t size) {
-    return parseAC3SyncFrame(ptr, size, NULL) > 0;
+// Parse EAC3 header assuming the current ptr is start position of syncframe,
+// update metadata only applicable, and return the payload size
+// ATSC A/52:2012 E2.3.1
+static unsigned parseEAC3SyncFrame(
+    const uint8_t *ptr, size_t size, sp<MetaData> *metaData) {
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+    static const unsigned samplingRateTable[] = {48000, 44100, 32000};
+    static const unsigned samplingRateTable2[] = {24000, 22050, 16000};
+
+    ABitReader bits(ptr, size);
+    if (bits.numBitsLeft() < 16) {
+        ALOGE("Not enough bits left for further parsing");
+        return 0;
+    }
+    if (bits.getBits(16) != 0x0B77) {
+        ALOGE("No valid sync word in EAC3 header");
+        return 0;
+    }
+
+    // we parse up to bsid so there needs to be at least that many bits
+    if (bits.numBitsLeft() < 2 + 3 + 11 + 2 + 2 + 3 + 1 + 5) {
+        ALOGE("Not enough bits left for further parsing");
+        return 0;
+    }
+
+    unsigned strmtyp = bits.getBits(2);
+    if (strmtyp == 3) {
+        ALOGE("Incorrect strmtyp in EAC3 header");
+        return 0;
+    }
+
+    unsigned substreamid = bits.getBits(3);
+    // only the first independent stream is supported
+    if ((strmtyp == 0 || strmtyp == 2) && substreamid != 0)
+        return 0;
+
+    unsigned frmsiz = bits.getBits(11);
+    unsigned fscod = bits.getBits(2);
+
+    unsigned samplingRate = 0;
+    if (fscod == 0x3) {
+        unsigned fscod2 = bits.getBits(2);
+        if (fscod2 == 3) {
+            ALOGW("Incorrect fscod2 in EAC3 header");
+            return 0;
+        }
+        samplingRate = samplingRateTable2[fscod2];
+    } else {
+        samplingRate = samplingRateTable[fscod];
+        unsigned numblkscod __unused = bits.getBits(2);
+    }
+
+    unsigned acmod = bits.getBits(3);
+    unsigned lfeon = bits.getBits(1);
+    unsigned bsid = bits.getBits(5);
+    if (bsid < 11 || bsid > 16) {
+        ALOGW("Incorrect bsid in EAC3 header. Could be AC-3 or some unknown EAC3 format");
+        return 0;
+    }
+
+    // we currently only support the first independant stream
+    if (metaData != NULL && (strmtyp == 0 || strmtyp == 2)) {
+        unsigned channelCount = channelCountTable[acmod] + lfeon;
+        ALOGV("EAC3 channelCount = %d", channelCount);
+        ALOGV("EAC3 samplingRate = %d", samplingRate);
+        (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EAC3);
+        (*metaData)->setInt32(kKeyChannelCount, channelCount);
+        (*metaData)->setInt32(kKeySampleRate, samplingRate);
+        (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+    }
+
+    unsigned payloadSize = frmsiz + 1;
+    payloadSize <<= 1;  // convert from 16-bit words to bytes
+
+    return payloadSize;
+}
+
+// Parse AC4 header assuming the current ptr is start position of syncframe
+// and update frameSize and metadata.
+static status_t parseAC4SyncFrame(
+        const uint8_t *ptr, size_t size, unsigned &frameSize, sp<MetaData> *metaData) {
+    // ETSI TS 103 190-2 V1.1.1 (2015-09), Annex C
+    // The sync_word can be either 0xAC40 or 0xAC41.
+    static const int kSyncWordAC40 = 0xAC40;
+    static const int kSyncWordAC41 = 0xAC41;
+
+    size_t headerSize = 0;
+    ABitReader bits(ptr, size);
+    int32_t syncWord = bits.getBits(16);
+    if ((syncWord != kSyncWordAC40) && (syncWord != kSyncWordAC41)) {
+        ALOGE("Invalid syncword in AC4 header");
+        return ERROR_MALFORMED;
+    }
+    headerSize += 2;
+
+    frameSize = bits.getBits(16);
+    headerSize += 2;
+    if (frameSize == 0xFFFF) {
+        frameSize = bits.getBits(24);
+        headerSize += 3;
+    }
+
+    if (frameSize == 0) {
+        ALOGE("Invalid frame size in AC4 header");
+        return ERROR_MALFORMED;
+    }
+    frameSize += headerSize;
+    // If the sync_word is 0xAC41, a crc_word is also transmitted.
+    if (syncWord == kSyncWordAC41) {
+        frameSize += 2; // crc_word
+    }
+    ALOGV("AC4 frameSize = %u", frameSize);
+
+    // ETSI TS 103 190-2 V1.1.1 6.2.1.1
+    uint32_t bitstreamVersion = bits.getBits(2);
+    if (bitstreamVersion == 3) {
+        bitstreamVersion += readVariableBits(bits, 2);
+    }
+
+    bits.skipBits(10); // Sequence Counter
+
+    uint32_t bWaitFrames = bits.getBits(1);
+    if (bWaitFrames) {
+        uint32_t waitFrames = bits.getBits(3);
+        if (waitFrames > 0) {
+            bits.skipBits(2); // br_code;
+        }
+    }
+
+    // ETSI TS 103 190 V1.1.1 Table 82
+    bool fsIndex = bits.getBits(1);
+    uint32_t samplingRate = fsIndex ? 48000 : 44100;
+
+    if (metaData != NULL) {
+        ALOGV("dequeueAccessUnitAC4 Setting mFormat");
+        (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+        (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+        // [FIXME] AC4 channel count is defined per presentation. Provide a default channel count
+        // as stereo for the entire stream.
+        (*metaData)->setInt32(kKeyChannelCount, 2);
+        (*metaData)->setInt32(kKeySampleRate, samplingRate);
+    }
+    return OK;
+}
+
+static status_t IsSeeminglyValidAC4Header(const uint8_t *ptr, size_t size, unsigned &frameSize) {
+    return parseAC4SyncFrame(ptr, size, frameSize, NULL);
 }
 
 static bool IsSeeminglyValidADTSHeader(
@@ -390,12 +550,19 @@
             }
 
             case AC3:
+            case EAC3:
             {
                 uint8_t *ptr = (uint8_t *)data;
 
                 ssize_t startOffset = -1;
                 for (size_t i = 0; i < size; ++i) {
-                    if (IsSeeminglyValidAC3Header(&ptr[i], size - i)) {
+                    unsigned payloadSize = 0;
+                    if (mMode == AC3) {
+                        payloadSize = parseAC3SyncFrame(&ptr[i], size - i, NULL);
+                    } else if (mMode == EAC3) {
+                        payloadSize = parseEAC3SyncFrame(&ptr[i], size - i, NULL);
+                    }
+                    if (payloadSize > 0) {
                         startOffset = i;
                         break;
                     }
@@ -406,7 +573,7 @@
                 }
 
                 if (startOffset > 0) {
-                    ALOGI("found something resembling an AC3 syncword at "
+                    ALOGI("found something resembling an (E)AC3 syncword at "
                           "offset %zd",
                           startOffset);
                 }
@@ -416,6 +583,43 @@
                 break;
             }
 
+            case AC4:
+            {
+                uint8_t *ptr = (uint8_t *)data;
+                unsigned frameSize = 0;
+                ssize_t startOffset = -1;
+
+                // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+                // (Sync header 4 bytes + AC4 toc 3 bytes)
+                if (size < 7) {
+                    return ERROR_MALFORMED;
+                }
+                for (size_t i = 0; i < size; ++i) {
+                    if (IsSeeminglyValidAC4Header(&ptr[i], size - i, frameSize) == OK) {
+                        startOffset = i;
+                        break;
+                    }
+                }
+
+                if (startOffset < 0) {
+                    return ERROR_MALFORMED;
+                }
+
+                if (startOffset > 0) {
+                    ALOGI("found something resembling an AC4 syncword at "
+                          "offset %zd",
+                          startOffset);
+                }
+                if (frameSize != size - startOffset) {
+                    ALOGV("AC4 frame size is %u bytes, while the buffer size is %zd bytes.",
+                          frameSize, size - startOffset);
+                }
+
+                data = &ptr[startOffset];
+                size -= startOffset;
+                break;
+            }
+
             case MPEG_AUDIO:
             {
                 uint8_t *ptr = (uint8_t *)data;
@@ -568,25 +772,9 @@
         return NULL;
     }
 
-    // skip the PES header, and copy the rest into scrambled access unit
+    // copy into scrambled access unit
     sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
-            mScrambledBuffer->data() + pesOffset,
-            scrambledLength - pesOffset);
-
-    // fix up first sample size after skipping the PES header
-    if (pesOffset > 0) {
-        int32_t &firstClearSize = *(int32_t*)clearSizes->data();
-        int32_t &firstEncSize = *(int32_t*)encSizes->data();
-        // Cut away the PES header
-        if (firstClearSize >= pesOffset) {
-            // This is for TS-level scrambling, we descrambled the first
-            // (or it was clear to begin with)
-            firstClearSize -= pesOffset;
-        } else if (firstEncSize >= pesOffset) {
-            // This can only be PES-level scrambling
-            firstEncSize -= pesOffset;
-        }
-    }
+            mScrambledBuffer->data(), scrambledLength);
 
     scrambledAccessUnit->meta()->setInt64("timeUs", timeUs);
     if (isSync) {
@@ -600,6 +788,7 @@
     scrambledAccessUnit->meta()->setInt32("cryptoKey", keyId);
     scrambledAccessUnit->meta()->setBuffer("clearBytes", clearSizes);
     scrambledAccessUnit->meta()->setBuffer("encBytes", encSizes);
+    scrambledAccessUnit->meta()->setInt32("pesOffset", pesOffset);
 
     memmove(mScrambledBuffer->data(),
             mScrambledBuffer->data() + scrambledLength,
@@ -648,7 +837,10 @@
         case AAC:
             return dequeueAccessUnitAAC();
         case AC3:
-            return dequeueAccessUnitAC3();
+        case EAC3:
+            return dequeueAccessUnitEAC3();
+        case AC4:
+            return dequeueAccessUnitAC4();
         case MPEG_VIDEO:
             return dequeueAccessUnitMPEGVideo();
         case MPEG4_VIDEO:
@@ -666,34 +858,38 @@
     }
 }
 
-sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC3() {
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitEAC3() {
     unsigned syncStartPos = 0;  // in bytes
     unsigned payloadSize = 0;
     sp<MetaData> format = new MetaData;
 
-    ALOGV("dequeueAccessUnit_AC3[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+    ALOGV("dequeueAccessUnitEAC3[%d]: mBuffer %p(%zu)", mAUIndex,
+            mBuffer->data(), mBuffer->size());
 
     while (true) {
         if (syncStartPos + 2 >= mBuffer->size()) {
             return NULL;
         }
 
-        payloadSize = parseAC3SyncFrame(
-                mBuffer->data() + syncStartPos,
-                mBuffer->size() - syncStartPos,
-                &format);
+        uint8_t *ptr = mBuffer->data() + syncStartPos;
+        size_t size = mBuffer->size() - syncStartPos;
+        if (mMode == AC3) {
+            payloadSize = parseAC3SyncFrame(ptr, size, &format);
+        } else if (mMode == EAC3) {
+            payloadSize = parseEAC3SyncFrame(ptr, size, &format);
+        }
         if (payloadSize > 0) {
             break;
         }
 
-        ALOGV("dequeueAccessUnit_AC3[%d]: syncStartPos %u payloadSize %u",
+        ALOGV("dequeueAccessUnitEAC3[%d]: syncStartPos %u payloadSize %u",
                 mAUIndex, syncStartPos, payloadSize);
 
         ++syncStartPos;
     }
 
     if (mBuffer->size() < syncStartPos + payloadSize) {
-        ALOGV("Not enough buffer size for AC3");
+        ALOGV("Not enough buffer size for E/AC3");
         return NULL;
     }
 
@@ -701,7 +897,6 @@
         mFormat = format;
     }
 
-
     int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
     if (timeUs < 0ll) {
         ALOGE("negative timeUs");
@@ -710,7 +905,12 @@
 
     // Not decrypting if key info not available (e.g., scanner/extractor parsing ts files)
     if (mSampleDecryptor != NULL) {
-        mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+        if (mMode == AC3) {
+            mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+        } else if (mMode == EAC3) {
+            ALOGE("EAC3 AU is encrypted and decryption is not supported");
+            return NULL;
+        }
     }
     mAUIndex++;
 
@@ -730,6 +930,69 @@
     return accessUnit;
 }
 
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC4() {
+    unsigned syncStartPos = 0;
+    unsigned payloadSize = 0;
+    sp<MetaData> format = new MetaData;
+    ALOGV("dequeueAccessUnit_AC4[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+
+    // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+    // (Sync header 4 bytes + AC4 toc 3 bytes)
+    if (mBuffer->size() < 7) {
+        return NULL;
+    }
+
+    while (true) {
+        if (syncStartPos + 2 >= mBuffer->size()) {
+            return NULL;
+        }
+
+        status_t status = parseAC4SyncFrame(
+                    mBuffer->data() + syncStartPos,
+                    mBuffer->size() - syncStartPos,
+                    payloadSize,
+                    &format);
+        if (status == OK) {
+            break;
+        }
+
+        ALOGV("dequeueAccessUnit_AC4[%d]: syncStartPos %u payloadSize %u",
+                mAUIndex, syncStartPos, payloadSize);
+
+        ++syncStartPos;
+    }
+
+    if (mBuffer->size() < syncStartPos + payloadSize) {
+        ALOGV("Not enough buffer size for AC4");
+        return NULL;
+    }
+
+    if (mFormat == NULL) {
+        mFormat = format;
+    }
+
+    int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+    if (timeUs < 0ll) {
+        ALOGE("negative timeUs");
+        return NULL;
+    }
+    mAUIndex++;
+
+    sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+    memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+    accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
+
+    memmove(
+            mBuffer->data(),
+            mBuffer->data() + syncStartPos + payloadSize,
+            mBuffer->size() - syncStartPos - payloadSize);
+
+    mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+    return accessUnit;
+}
+
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() {
     if (mBuffer->size() < 4) {
         return NULL;
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index ffcb502..8c1d112 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -38,6 +38,8 @@
         H264,
         AAC,
         AC3,
+        EAC3,
+        AC4,
         MPEG_AUDIO,
         MPEG_VIDEO,
         MPEG4_VIDEO,
@@ -115,7 +117,8 @@
 
     sp<ABuffer> dequeueAccessUnitH264();
     sp<ABuffer> dequeueAccessUnitAAC();
-    sp<ABuffer> dequeueAccessUnitAC3();
+    sp<ABuffer> dequeueAccessUnitEAC3();
+    sp<ABuffer> dequeueAccessUnitAC4();
     sp<ABuffer> dequeueAccessUnitMPEGAudio();
     sp<ABuffer> dequeueAccessUnitMPEGVideo();
     sp<ABuffer> dequeueAccessUnitMPEG4Video();
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 7d2c2dd..32113c2 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -1085,7 +1085,8 @@
         }
 
         case OMXBuffer::kBufferTypeANWBuffer: {
-            if (mPortMode[portIndex] != IOMX::kPortModePresetANWBuffer) {
+            if (mPortMode[portIndex] != IOMX::kPortModePresetANWBuffer
+                    && mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer) {
                 break;
             }
             return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
@@ -1609,12 +1610,15 @@
     }
     BufferMeta *buffer_meta = static_cast<BufferMeta *>(header->pAppPrivate);
 
+    // Invalidate buffers in the client side first before calling OMX_FreeBuffer.
+    // If not, pending events in the client side might access the buffers after free.
+    invalidateBufferID(buffer);
+
     OMX_ERRORTYPE err = OMX_FreeBuffer(mHandle, portIndex, header);
     CLOG_IF_ERROR(freeBuffer, err, "%s:%u %#x", portString(portIndex), portIndex, buffer);
 
     delete buffer_meta;
     buffer_meta = NULL;
-    invalidateBufferID(buffer);
 
     return StatusFromOMXError(err);
 }
diff --git a/media/mtp/PosixAsyncIO.cpp b/media/mtp/PosixAsyncIO.cpp
index e67c568..72c07cc 100644
--- a/media/mtp/PosixAsyncIO.cpp
+++ b/media/mtp/PosixAsyncIO.cpp
@@ -15,42 +15,109 @@
  */
 
 #include <android-base/logging.h>
-#include <condition_variable>
 #include <memory>
-#include <mutex>
+#include <pthread.h>
 #include <queue>
+#include <thread>
 #include <unistd.h>
 
 #include "PosixAsyncIO.h"
 
 namespace {
 
-void read_func(struct aiocb *aiocbp) {
-    aiocbp->ret = TEMP_FAILURE_RETRY(pread(aiocbp->aio_fildes,
-                aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
-    if (aiocbp->ret == -1) aiocbp->error = errno;
+std::thread gWorkerThread;
+std::deque<struct aiocb*> gWorkQueue;
+bool gSuspended = true;
+int gAiocbRefcount = 0;
+std::mutex gLock;
+std::condition_variable gWait;
+
+void work_func(void *) {
+    pthread_setname_np(pthread_self(), "AsyncIO work");
+    while (true) {
+        struct aiocb *aiocbp;
+        {
+            std::unique_lock<std::mutex> lk(gLock);
+            gWait.wait(lk, []{return gWorkQueue.size() > 0 || gSuspended;});
+            if (gSuspended)
+                return;
+            aiocbp = gWorkQueue.back();
+            gWorkQueue.pop_back();
+        }
+        CHECK(aiocbp->queued);
+        int ret;
+        if (aiocbp->read) {
+            ret = TEMP_FAILURE_RETRY(pread(aiocbp->aio_fildes,
+                    aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
+        } else {
+            ret = TEMP_FAILURE_RETRY(pwrite(aiocbp->aio_fildes,
+               aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
+        }
+        {
+            std::unique_lock<std::mutex> lk(aiocbp->lock);
+            aiocbp->ret = ret;
+            if (aiocbp->ret == -1) {
+                aiocbp->error = errno;
+            }
+            aiocbp->queued = false;
+        }
+        aiocbp->cv.notify_all();
+    }
 }
 
-void write_func(struct aiocb *aiocbp) {
-    aiocbp->ret = TEMP_FAILURE_RETRY(pwrite(aiocbp->aio_fildes,
-                aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
-    if (aiocbp->ret == -1) aiocbp->error = errno;
+int aio_add(struct aiocb *aiocbp) {
+    CHECK(!aiocbp->queued);
+    aiocbp->queued = true;
+    {
+        std::unique_lock<std::mutex> lk(gLock);
+        gWorkQueue.push_front(aiocbp);
+    }
+    gWait.notify_one();
+    return 0;
 }
 
 } // end anonymous namespace
 
+aiocb::aiocb() {
+    this->ret = 0;
+    this->queued = false;
+    {
+        std::unique_lock<std::mutex> lk(gLock);
+        if (gAiocbRefcount == 0) {
+            CHECK(gWorkQueue.size() == 0);
+            CHECK(gSuspended);
+            gSuspended = false;
+            gWorkerThread = std::thread(work_func, nullptr);
+        }
+        gAiocbRefcount++;
+    }
+}
+
 aiocb::~aiocb() {
-    CHECK(!thread.joinable());
+    CHECK(!this->queued);
+    {
+        std::unique_lock<std::mutex> lk(gLock);
+        CHECK(!gSuspended);
+        if (gAiocbRefcount == 1) {
+            CHECK(gWorkQueue.size() == 0);
+            gSuspended = true;
+            lk.unlock();
+            gWait.notify_one();
+            gWorkerThread.join();
+            lk.lock();
+        }
+        gAiocbRefcount--;
+    }
 }
 
 int aio_read(struct aiocb *aiocbp) {
-    aiocbp->thread = std::thread(read_func, aiocbp);
-    return 0;
+    aiocbp->read = true;
+    return aio_add(aiocbp);
 }
 
 int aio_write(struct aiocb *aiocbp) {
-    aiocbp->thread = std::thread(write_func, aiocbp);
-    return 0;
+    aiocbp->read = false;
+    return aio_add(aiocbp);
 }
 
 int aio_error(const struct aiocb *aiocbp) {
@@ -64,7 +131,10 @@
 int aio_suspend(struct aiocb *aiocbp[], int n,
         const struct timespec *) {
     for (int i = 0; i < n; i++) {
-        aiocbp[i]->thread.join();
+        {
+            std::unique_lock<std::mutex> lk(aiocbp[i]->lock);
+            aiocbp[i]->cv.wait(lk, [aiocbp, i]{return !aiocbp[i]->queued;});
+        }
     }
     return 0;
 }
diff --git a/media/mtp/PosixAsyncIO.h b/media/mtp/PosixAsyncIO.h
index 590aaef..2bb5735 100644
--- a/media/mtp/PosixAsyncIO.h
+++ b/media/mtp/PosixAsyncIO.h
@@ -17,10 +17,11 @@
 #ifndef _POSIXASYNCIO_H
 #define _POSIXASYNCIO_H
 
+#include <condition_variable>
+#include <mutex>
 #include <sys/cdefs.h>
 #include <sys/types.h>
 #include <time.h>
-#include <thread>
 #include <unistd.h>
 
 /**
@@ -35,10 +36,15 @@
     size_t aio_nbytes;
 
     // Used internally
-    std::thread thread;
+    bool read;
+    bool queued;
     ssize_t ret;
     int error;
 
+    std::mutex lock;
+    std::condition_variable cv;
+
+    aiocb();
     ~aiocb();
 };
 
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 6d10f1c..5597488 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -309,6 +309,7 @@
     }
     String8 defaultUrl;
     DrmPlugin::KeyRequestType keyRequestType;
+    mObj->mKeyRequest.clear();
     status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
             mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
             &keyRequestType);
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 38e12e3..f936118 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -36,13 +36,12 @@
 #ifndef _NDK_IMAGE_H
 #define _NDK_IMAGE_H
 
+#include <stdint.h>
 #include <sys/cdefs.h>
 
 #include "NdkMediaError.h"
 
-#if __ANDROID_API__ >= 26
 #include <android/hardware_buffer.h>
-#endif /* __ANDROID_API__ >= 26 */
 
 __BEGIN_DECLS
 
@@ -516,6 +515,8 @@
     int32_t bottom;
 } AImageCropRect;
 
+#if __ANDROID_API__ >= 24
+
 /**
  * Return the image back the the system and delete the AImage object from memory.
  *
@@ -712,6 +713,10 @@
         const AImage* image, int planeIdx,
         /*out*/uint8_t** data, /*out*/int* dataLength) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 26
+
 /**
  * Return the image back the the system and delete the AImage object from memory asynchronously.
  *
@@ -756,6 +761,8 @@
  */
 media_status_t AImage_getHardwareBuffer(const AImage* image, /*out*/AHardwareBuffer** buffer) __INTRODUCED_IN(26);
 
+#endif /* __ANDROID_API__ >= 26 */
+
 __END_DECLS
 
 #endif //_NDK_IMAGE_H
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index eb1a44a..68de176 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -50,6 +50,8 @@
  */
 typedef struct AImageReader AImageReader;
 
+#if __ANDROID_API__ >= 24
+
 /**
  * Create a new reader for images of the desired size and format.
  *
@@ -296,6 +298,10 @@
 media_status_t AImageReader_setImageListener(
         AImageReader* reader, AImageReader_ImageListener* listener) __INTRODUCED_IN(24);
 
+#endif /* __ANDROID_API__ >= 24 */
+
+#if __ANDROID_API__ >= 26
+
 /**
  * AImageReader constructor similar to {@link AImageReader_new} that takes an additional parameter
  * for the consumer usage. All other parameters and the return values are identical to those passed
@@ -455,6 +461,8 @@
 media_status_t AImageReader_setBufferRemovedListener(
         AImageReader* reader, AImageReader_BufferRemovedListener* listener) __INTRODUCED_IN(26);
 
+#endif /* __ANDROID_API__ >= 26 */
+
 __END_DECLS
 
 #endif //_NDK_IMAGE_READER_H
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b329b39..9dc120d 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -121,6 +121,8 @@
       AMediaCodecOnAsyncError           onAsyncError;
 };
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Create codec by name. Use this if you know the exact codec you want to use.
  * When configuring, you will need to specify whether to use the codec as an
@@ -274,6 +276,8 @@
 media_status_t AMediaCodec_releaseOutputBufferAtTime(
         AMediaCodec *mData, size_t idx, int64_t timestampNs) __INTRODUCED_IN(21);
 
+#if __ANDROID_API__ >= 26
+
 /**
  * Creates a Surface that can be used as the input to encoder, in place of input buffers
  *
@@ -344,6 +348,10 @@
  */
 media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData) __INTRODUCED_IN(26);
 
+#endif /* __ANDROID_API__ >= 26 */
+
+#if __ANDROID_API__ >= 28
+
 /**
  * Get the component name. If the codec was created by createDecoderByType
  * or createEncoderByType, what component is chosen is not known beforehand.
@@ -405,6 +413,8 @@
  */
 bool AMediaCodecActionCode_isTransient(int32_t actionCode) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 typedef enum {
     AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
     AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
@@ -483,6 +493,8 @@
  */
 media_status_t AMediaCodecCryptoInfo_getEncryptedBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif //_NDK_MEDIA_CODEC_H
diff --git a/media/ndk/include/media/NdkMediaCrypto.h b/media/ndk/include/media/NdkMediaCrypto.h
index b673adc..bcdf9a0 100644
--- a/media/ndk/include/media/NdkMediaCrypto.h
+++ b/media/ndk/include/media/NdkMediaCrypto.h
@@ -47,6 +47,8 @@
 
 typedef uint8_t AMediaUUID[16];
 
+#if __ANDROID_API__ >= 21
+
 bool AMediaCrypto_isCryptoSchemeSupported(const AMediaUUID uuid) __INTRODUCED_IN(21);
 
 bool AMediaCrypto_requiresSecureDecoderComponent(const char *mime) __INTRODUCED_IN(21);
@@ -55,6 +57,8 @@
 
 void AMediaCrypto_delete(AMediaCrypto* crypto) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_CRYPTO_H
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 3a4373c..ea5ba0c 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -38,6 +38,8 @@
 struct AMediaDataSource;
 typedef struct AMediaDataSource AMediaDataSource;
 
+#if __ANDROID_API__ >= 28
+
 /*
  * AMediaDataSource's callbacks will be invoked on an implementation-defined thread
  * or thread pool. No guarantees are provided about which thread(s) will be used for
@@ -133,6 +135,8 @@
         AMediaDataSource*,
         AMediaDataSourceClose) __INTRODUCED_IN(28);
 
+#endif  /*__ANDROID_API__ >= 28 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_DATASOURCE_H
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 24c0d6d..0209681 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -87,6 +87,8 @@
 typedef void (*AMediaDrmEventListener)(AMediaDrm *, const AMediaDrmSessionId *sessionId,
         AMediaDrmEventType eventType, int extra, const uint8_t *data, size_t dataSize);
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Query if the given scheme identified by its UUID is supported on this device, and
  * whether the drm plugin is able to handle the media container format specified by mimeType.
@@ -459,6 +461,8 @@
         const char *macAlgorithm, uint8_t *keyId, const uint8_t *message, size_t messageSize,
         const uint8_t *signature, size_t signatureSize) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif //_NDK_MEDIA_DRM_H
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index 9f60891..6a1796f 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -49,6 +49,8 @@
 struct AMediaExtractor;
 typedef struct AMediaExtractor AMediaExtractor;
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Create new media extractor
  */
@@ -72,12 +74,16 @@
         const char *location) __INTRODUCED_IN(21);
         // TODO support headers
 
+#if __ANDROID_API__ >= 28
+
 /**
  * Set the custom data source implementation from which the extractor will read.
  */
 media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*,
         AMediaDataSource *src) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
 /**
  * Return the number of tracks in the previously specified media file
  */
@@ -173,6 +179,8 @@
     AMEDIAEXTRACTOR_SAMPLE_FLAG_ENCRYPTED = 2,
 };
 
+#if __ANDROID_API__ >= 28
+
 /**
  * Returns the format of the extractor. The caller must free the returned format
  * using AMediaFormat_delete(format).
@@ -219,6 +227,10 @@
 media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex,
         AMediaFormat *fmt) __INTRODUCED_IN(28);
 
+#endif /* __ANDROID_API__ >= 28 */
+
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_EXTRACTOR_H
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index dd39acf..5f7804d 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -46,6 +46,8 @@
 struct AMediaFormat;
 typedef struct AMediaFormat AMediaFormat;
 
+#if __ANDROID_API__ >= 21
+
 AMediaFormat *AMediaFormat_new() __INTRODUCED_IN(21);
 media_status_t AMediaFormat_delete(AMediaFormat*) __INTRODUCED_IN(21);
 
@@ -88,73 +90,76 @@
 /**
  * XXX should these be ints/enums that we look up in a table as needed?
  */
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
-extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
-extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
-extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
-extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
-extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
-extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
-extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
-extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
-extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
-extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
-extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
-extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
-extern const char* AMEDIAFORMAT_KEY_CSD;
-extern const char* AMEDIAFORMAT_KEY_CSD_0;
-extern const char* AMEDIAFORMAT_KEY_CSD_1;
-extern const char* AMEDIAFORMAT_KEY_CSD_2;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_DURATION;
-extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
-extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS;
-extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
-extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
-extern const char* AMEDIAFORMAT_KEY_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
-extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
-extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
-extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
-extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
-extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
-extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
-extern const char* AMEDIAFORMAT_KEY_LATENCY;
-extern const char* AMEDIAFORMAT_KEY_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
-extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_MIME;
-extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA;
-extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
-extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
-extern const char* AMEDIAFORMAT_KEY_PRIORITY;
-extern const char* AMEDIAFORMAT_KEY_PROFILE;
-extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
-extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
-extern const char* AMEDIAFORMAT_KEY_ROTATION;
-extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_SEI;
-extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_STRIDE;
-extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID;
-extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
-extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_TIME_US;
-extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
-extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_BIT_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_0 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_1 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_2 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DURATION __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FRAME_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_HEIGHT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_IS_ADTS __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_LANGUAGE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_LATENCY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_LEVEL __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MIME __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PRIORITY __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PROFILE __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_ROTATION __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_SEI __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_STRIDE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TIME_US __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_WIDTH __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
+#if __ANDROID_API__ >= 28
 bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out) __INTRODUCED_IN(28);
 bool AMediaFormat_getRect(AMediaFormat*, const char *name,
         int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) __INTRODUCED_IN(28);
@@ -163,6 +168,7 @@
 void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value) __INTRODUCED_IN(28);
 void AMediaFormat_setRect(AMediaFormat*, const char* name,
         int32_t left, int32_t top, int32_t right, int32_t bottom) __INTRODUCED_IN(28);
+#endif /* __ANDROID_API__ >= 28 */
 
 __END_DECLS
 
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 75c70ed..7393867 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -53,6 +53,8 @@
     AMEDIAMUXER_OUTPUT_FORMAT_WEBM   = 1,
 } OutputFormat;
 
+#if __ANDROID_API__ >= 21
+
 /**
  * Create new media muxer
  */
@@ -121,6 +123,8 @@
         size_t trackIdx, const uint8_t *data,
         const AMediaCodecBufferInfo *info) __INTRODUCED_IN(21);
 
+#endif /* __ANDROID_API__ >= 21 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_MUXER_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index fb56694..d828d6a 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -32,55 +32,66 @@
     AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
-    AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+    AMEDIAFORMAT_KEY_AAC_PROFILE; # var introduced=21
     AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
     AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
     AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
-    AMEDIAFORMAT_KEY_BIT_RATE; # var
+    AMEDIAFORMAT_KEY_BIT_RATE; # var introduced=21
     AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
-    AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
-    AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
-    AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+    AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var introduced=21
+    AMEDIAFORMAT_KEY_CHANNEL_MASK; # var introduced=21
+    AMEDIAFORMAT_KEY_COLOR_FORMAT; # var introduced=21
     AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
     AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
     AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_0; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_1; # var introduced=28
+    AMEDIAFORMAT_KEY_CSD_2; # var introduced=28
     AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
-    AMEDIAFORMAT_KEY_DURATION; # var
-    AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
-    AMEDIAFORMAT_KEY_FRAME_RATE; # var
+    AMEDIAFORMAT_KEY_DISPLAY_HEIGHT; # var introduced=28
+    AMEDIAFORMAT_KEY_DISPLAY_WIDTH; # var introduced=28
+    AMEDIAFORMAT_KEY_DURATION; # var introduced=21
+    AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var introduced=21
+    AMEDIAFORMAT_KEY_FRAME_RATE; # var introduced=21
     AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
     AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
     AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
-    AMEDIAFORMAT_KEY_HEIGHT; # var
+    AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
     AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
-    AMEDIAFORMAT_KEY_IS_ADTS; # var
-    AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
-    AMEDIAFORMAT_KEY_IS_DEFAULT; # var
-    AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
-    AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
-    AMEDIAFORMAT_KEY_LANGUAGE; # var
+    AMEDIAFORMAT_KEY_IS_ADTS; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_DEFAULT; # var introduced=21
+    AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var introduced=21
+    AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
+    AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
     AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
     AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
-    AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
-    AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
-    AMEDIAFORMAT_KEY_MAX_WIDTH; # var
-    AMEDIAFORMAT_KEY_MIME; # var
+    AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
+    AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
+    AMEDIAFORMAT_KEY_MAX_WIDTH; # var introduced=21
+    AMEDIAFORMAT_KEY_MIME; # var introduced=21
+    AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
     AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
     AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
     AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
     AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
-    AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
-    AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+    AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
+    AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
     AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
-    AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+    AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
+    AMEDIAFORMAT_KEY_SEI; # var introduced=28
     AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
-    AMEDIAFORMAT_KEY_STRIDE; # var
+    AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
+    AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID; # var introduced=28
     AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
+    AMEDIAFORMAT_KEY_TIME_US; # var introduced=28
+    AMEDIAFORMAT_KEY_TRACK_INDEX; # var introduced=28
     AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
-    AMEDIAFORMAT_KEY_WIDTH; # var
+    AMEDIAFORMAT_KEY_WIDTH; # var introduced=21
     AMediaCodecActionCode_isRecoverable; # introduced=28
     AMediaCodecActionCode_isTransient; # introduced=28
     AMediaCodecCryptoInfo_delete;
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index de8e46a..f5b3f92 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -22,6 +22,7 @@
         "ProcessInfo.cpp",
         "SchedulingPolicyService.cpp",
         "ServiceUtilities.cpp",
+        "TimeCheck.cpp",
     ],
     shared_libs: [
         "libbinder",
@@ -31,12 +32,20 @@
         "libmemunreachable",
     ],
 
+    logtags: ["EventLogTags.logtags"],
+
     cflags: [
         "-Wall",
         "-Wextra",
         "-Werror",
     ],
 
+    product_variables: {
+        product_is_iot: {
+            cflags: ["-DTARGET_ANDROID_THINGS"],
+        },
+    },
+
     local_include_dirs: ["include"],
     export_include_dirs: ["include"],
 }
diff --git a/media/utils/EventLogTags.logtags b/media/utils/EventLogTags.logtags
new file mode 100644
index 0000000..67f0ea8
--- /dev/null
+++ b/media/utils/EventLogTags.logtags
@@ -0,0 +1,41 @@
+# The entries in this file map a sparse set of log tag numbers to tag names.
+# This is installed on the device, in /system/etc, and parsed by logcat.
+#
+# Tag numbers are decimal integers, from 0 to 2^31.  (Let's leave the
+# negative values alone for now.)
+#
+# Tag names are one or more ASCII letters and numbers or underscores, i.e.
+# "[A-Z][a-z][0-9]_".  Do not include spaces or punctuation (the former
+# impacts log readability, the latter makes regex searches more annoying).
+#
+# Tag numbers and names are separated by whitespace.  Blank lines and lines
+# starting with '#' are ignored.
+#
+# Optionally, after the tag names can be put a description for the value(s)
+# of the tag. Description are in the format
+#    (<name>|data type[|data unit])
+# Multiple values are separated by commas.
+#
+# The data type is a number from the following values:
+# 1: int
+# 2: long
+# 3: string
+# 4: list
+#
+# The data unit is a number taken from the following list:
+# 1: Number of objects
+# 2: Number of bytes
+# 3: Number of milliseconds
+# 4: Number of allocations
+# 5: Id
+# 6: Percent
+# Default value for data of type int/long is 2 (bytes).
+#
+# See system/core/logcat/event.logtags for the master copy of the tags.
+
+# 61000 - 61199 reserved for audioserver
+
+61000 audioserver_binder_timeout (command|3)
+
+# NOTE - the range 1000000-2000000 is reserved for partners and others who
+# want to define their own log tags without conflicting with the core platform.
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 0d50be0..1c54aec 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -158,6 +158,27 @@
     return ok;
 }
 
+bool modifyDefaultAudioEffectsAllowed() {
+    static const String16 sModifyDefaultAudioEffectsAllowed(
+            "android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sModifyDefaultAudioEffectsAllowed);
+
+#ifdef TARGET_ANDROID_THINGS
+    if (!ok) {
+        // Use a secondary permission on Android Things to allow a more lenient level of protection.
+        static const String16 sModifyDefaultAudioEffectsAndroidThingsAllowed(
+                "com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+        ok = PermissionCache::checkCallingPermission(
+                sModifyDefaultAudioEffectsAndroidThingsAllowed);
+    }
+    if (!ok) ALOGE("com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+#else
+    if (!ok) ALOGE("android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+#endif
+    return ok;
+}
+
 bool dumpAllowed() {
     static const String16 sDump("android.permission.DUMP");
     // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
diff --git a/media/libmedia/TimeCheck.cpp b/media/utils/TimeCheck.cpp
similarity index 91%
rename from media/libmedia/TimeCheck.cpp
rename to media/utils/TimeCheck.cpp
index dab5d4f..59cf4ef 100644
--- a/media/libmedia/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -15,7 +15,9 @@
  */
 
 
+#include <utils/Log.h>
 #include <media/TimeCheck.h>
+#include <media/EventLog.h>
 
 namespace android {
 
@@ -81,7 +83,10 @@
             status = mCond.waitRelative(mMutex, waitTimeNs);
         }
     }
-    LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "TimeCheck timeout for %s", tag);
+    if (status != NO_ERROR) {
+        LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+        LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
+    }
     return true;
 }
 
diff --git a/media/utils/include/mediautils/EventLog.h b/media/utils/include/mediautils/EventLog.h
new file mode 100644
index 0000000..553d3bd
--- /dev/null
+++ b/media/utils/include/mediautils/EventLog.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_AUDIO_EVENT_LOG_H_
+#define ANDROID_AUDIO_EVENT_LOG_H_
+
+namespace android {
+
+// keep values in sync with frameworks/av/media/utils/EventLogTags.logtags
+enum {
+  LOGTAG_AUDIO_BINDER_TIMEOUT = 61000,
+};
+
+}  // namespace android
+
+#endif  // ANDROID_AUDIO_EVENT_LOG_H_
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 0911744..98f54c2 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -68,6 +68,7 @@
 bool captureHotwordAllowed(pid_t pid, uid_t uid);
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
+bool modifyDefaultAudioEffectsAllowed();
 bool dumpAllowed();
 bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
 status_t checkIMemory(const sp<IMemory>& iMemory);
diff --git a/media/libmedia/include/media/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
similarity index 100%
rename from media/libmedia/include/media/TimeCheck.h
rename to media/utils/include/mediautils/TimeCheck.h
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 53a4ce9..38483c3 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -20,9 +20,11 @@
 //#define LOG_NDEBUG 0
 
 #include "Configuration.h"
+#include <algorithm>    // std::any_of
 #include <dirent.h>
 #include <math.h>
 #include <signal.h>
+#include <string>
 #include <sys/time.h>
 #include <sys/resource.h>
 
@@ -306,7 +308,7 @@
         *sessionId = actualSessionId;
     } else {
         if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
-            AudioSystem::releaseOutput(io, streamType, actualSessionId);
+            AudioSystem::releaseOutput(portId);
         } else {
             AudioSystem::releaseInput(portId);
         }
@@ -434,6 +436,15 @@
     if (!dumpAllowed()) {
         dumpPermissionDenial(fd, args);
     } else {
+        // XXX This is sort of hacky for now.
+        const bool formatJson = std::any_of(args.begin(), args.end(),
+                [](const String16 &arg) { return arg == String16("--json"); });
+        if (formatJson) {
+            // XXX consider buffering if the string happens to be too long.
+            dprintf(fd, "%s", getJsonString().c_str());
+            return NO_ERROR;
+        }
+
         // get state of hardware lock
         bool hardwareLocked = dumpTryLock(mHardwareLock);
         if (!hardwareLocked) {
@@ -443,7 +454,7 @@
             mHardwareLock.unlock();
         }
 
-        bool locked = dumpTryLock(mLock);
+        const bool locked = dumpTryLock(mLock);
 
         // failed to lock - AudioFlinger is probably deadlocked
         if (!locked) {
@@ -545,6 +556,36 @@
     return NO_ERROR;
 }
 
+std::string AudioFlinger::getJsonString()
+{
+    std::string jsonStr = "{\n";
+    const bool locked = dumpTryLock(mLock);
+
+    // failed to lock - AudioFlinger is probably deadlocked
+    if (!locked) {
+        jsonStr += "    \"deadlock_message\": ";
+        jsonStr += kDeadlockedString;
+        jsonStr += ",\n";
+    }
+    // FIXME risky to access data structures without a lock held?
+
+    jsonStr += "  \"Playback_Threads\": [\n";
+    // dump playback threads
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        if (i != 0) {
+            jsonStr += ",\n";
+        }
+        jsonStr += mPlaybackThreads.valueAt(i)->getJsonString();
+    }
+    jsonStr += "\n  ]\n}\n";
+
+    if (locked) {
+        mLock.unlock();
+    }
+
+    return jsonStr;
+}
+
 sp<AudioFlinger::Client> AudioFlinger::registerPid(pid_t pid)
 {
     Mutex::Autolock _cl(mClientLock);
@@ -777,7 +818,7 @@
 
 Exit:
     if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
-        AudioSystem::releaseOutput(output.outputId, streamType, sessionId);
+        AudioSystem::releaseOutput(portId);
     }
     *status = lStatus;
     return trackHandle;
@@ -1144,6 +1185,23 @@
     }
 }
 
+// forwardAudioHwSyncToDownstreamPatches_l() must be called with AudioFlinger::mLock held
+void AudioFlinger::forwardParametersToDownstreamPatches_l(
+        audio_io_handle_t upStream, const String8& keyValuePairs,
+        std::function<bool(const sp<PlaybackThread>&)> useThread)
+{
+    std::vector<PatchPanel::SoftwarePatch> swPatches;
+    if (mPatchPanel.getDownstreamSoftwarePatches(upStream, &swPatches) != OK) return;
+    ALOGV_IF(!swPatches.empty(), "%s found %zu downstream patches for stream ID %d",
+            __func__, swPatches.size(), upStream);
+    for (const auto& swPatch : swPatches) {
+        sp<PlaybackThread> downStream = checkPlaybackThread_l(swPatch.getPlaybackThreadHandle());
+        if (downStream != NULL && (useThread == nullptr || useThread(downStream))) {
+            downStream->setParameters(keyValuePairs);
+        }
+    }
+}
+
 // Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
 // Some keys are used for audio routing and audio path configuration and should be reserved for use
 // by audio policy and audio flinger for functional, privacy and security reasons.
@@ -1259,7 +1317,9 @@
         }
     }
     if (thread != 0) {
-        return thread->setParameters(filteredKeyValuePairs);
+        status_t result = thread->setParameters(filteredKeyValuePairs);
+        forwardParametersToDownstreamPatches_l(thread->id(), filteredKeyValuePairs);
+        return result;
     }
     return BAD_VALUE;
 }
@@ -1817,6 +1877,10 @@
 
         mHardwareStatus = AUDIO_HW_IDLE;
     }
+    if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_MSD) == 0) {
+        // An MSD module is inserted before hardware modules in order to mix encoded streams.
+        flags = static_cast<AudioHwDevice::Flags>(flags | AudioHwDevice::AHWD_IS_INSERT);
+    }
 
     audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
     mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
@@ -1960,7 +2024,10 @@
         if (sessions & ThreadBase::TRACK_SESSION) {
             AudioParameter param = AudioParameter();
             param.addInt(String8(AudioParameter::keyStreamHwAvSync), value);
-            thread->setParameters(param.toString());
+            String8 keyValuePairs = param.toString();
+            thread->setParameters(keyValuePairs);
+            forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+                    [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
             break;
         }
     }
@@ -2006,7 +2073,10 @@
         ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
         AudioParameter param = AudioParameter();
         param.addInt(String8(AudioParameter::keyStreamHwAvSync), syncId);
-        thread->setParameters(param.toString());
+        String8 keyValuePairs = param.toString();
+        thread->setParameters(keyValuePairs);
+        forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+                [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
     }
 }
 
@@ -2096,6 +2166,7 @@
                       *output, thread.get());
             }
             mPlaybackThreads.add(*output, thread);
+            mPatchPanel.notifyStreamOpened(outHwDev, *output);
             return thread;
         }
     }
@@ -2231,6 +2302,7 @@
         const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
         ioDesc->mIoHandle = output;
         ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+        mPatchPanel.notifyStreamClosed(output);
     }
     // The thread entity (active unit of execution) is no longer running here,
     // but the ThreadBase container still exists.
@@ -2877,16 +2949,74 @@
 }
 
 status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid,
-        effect_descriptor_t *descriptor) const
+                                           const effect_uuid_t *pTypeUuid,
+                                           uint32_t preferredTypeFlag,
+                                           effect_descriptor_t *descriptor) const
 {
+    if (pUuid == NULL || pTypeUuid == NULL || descriptor == NULL) {
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock _l(mLock);
-    if (mEffectsFactoryHal.get()) {
-        return mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
-    } else {
+
+    if (!mEffectsFactoryHal.get()) {
         return -ENODEV;
     }
-}
 
+    status_t status = NO_ERROR;
+    if (!EffectsFactoryHalInterface::isNullUuid(pUuid)) {
+        // If uuid is specified, request effect descriptor from that.
+        status = mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
+    } else if (!EffectsFactoryHalInterface::isNullUuid(pTypeUuid)) {
+        // If uuid is not specified, look for an available implementation
+        // of the required type instead.
+
+        // Use a temporary descriptor to avoid modifying |descriptor| in the failure case.
+        effect_descriptor_t desc;
+        desc.flags = 0; // prevent compiler warning
+
+        uint32_t numEffects = 0;
+        status = mEffectsFactoryHal->queryNumberEffects(&numEffects);
+        if (status < 0) {
+            ALOGW("getEffectDescriptor() error %d from FactoryHal queryNumberEffects", status);
+            return status;
+        }
+
+        bool found = false;
+        for (uint32_t i = 0; i < numEffects; i++) {
+            status = mEffectsFactoryHal->getDescriptor(i, &desc);
+            if (status < 0) {
+                ALOGW("getEffectDescriptor() error %d from FactoryHal getDescriptor", status);
+                continue;
+            }
+            if (memcmp(&desc.type, pTypeUuid, sizeof(effect_uuid_t)) == 0) {
+                // If matching type found save effect descriptor.
+                found = true;
+                *descriptor = desc;
+
+                // If there's no preferred flag or this descriptor matches the preferred
+                // flag, success! If this descriptor doesn't match the preferred
+                // flag, continue enumeration in case a better matching version of this
+                // effect type is available. Note that this means if no effect with a
+                // correct flag is found, the descriptor returned will correspond to the
+                // last effect that at least had a matching type uuid (if any).
+                if (preferredTypeFlag == EFFECT_FLAG_TYPE_MASK ||
+                    (desc.flags & EFFECT_FLAG_TYPE_MASK) == preferredTypeFlag) {
+                    break;
+                }
+            }
+        }
+
+        if (!found) {
+            status = NAME_NOT_FOUND;
+            ALOGW("getEffectDescriptor(): Effect not found by type.");
+        }
+    } else {
+        status = BAD_VALUE;
+        ALOGE("getEffectDescriptor(): Either uuid or type uuid must be non-null UUIDs.");
+    }
+    return status;
+}
 
 sp<IEffect> AudioFlinger::createEffect(
         effect_descriptor_t *pDesc,
@@ -2940,60 +3070,15 @@
     }
 
     {
-        if (!EffectsFactoryHalInterface::isNullUuid(&pDesc->uuid)) {
-            // if uuid is specified, request effect descriptor
-            lStatus = mEffectsFactoryHal->getDescriptor(&pDesc->uuid, &desc);
-            if (lStatus < 0) {
-                ALOGW("createEffect() error %d from EffectGetDescriptor", lStatus);
-                goto Exit;
-            }
-        } else {
-            // if uuid is not specified, look for an available implementation
-            // of the required type in effect factory
-            if (EffectsFactoryHalInterface::isNullUuid(&pDesc->type)) {
-                ALOGW("createEffect() no effect type");
-                lStatus = BAD_VALUE;
-                goto Exit;
-            }
-            uint32_t numEffects = 0;
-            effect_descriptor_t d;
-            d.flags = 0; // prevent compiler warning
-            bool found = false;
-
-            lStatus = mEffectsFactoryHal->queryNumberEffects(&numEffects);
-            if (lStatus < 0) {
-                ALOGW("createEffect() error %d from EffectQueryNumberEffects", lStatus);
-                goto Exit;
-            }
-            for (uint32_t i = 0; i < numEffects; i++) {
-                lStatus = mEffectsFactoryHal->getDescriptor(i, &desc);
-                if (lStatus < 0) {
-                    ALOGW("createEffect() error %d from EffectQueryEffect", lStatus);
-                    continue;
-                }
-                if (memcmp(&desc.type, &pDesc->type, sizeof(effect_uuid_t)) == 0) {
-                    // If matching type found save effect descriptor. If the session is
-                    // 0 and the effect is not auxiliary, continue enumeration in case
-                    // an auxiliary version of this effect type is available
-                    found = true;
-                    d = desc;
-                    if (sessionId != AUDIO_SESSION_OUTPUT_MIX ||
-                            (desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
-                        break;
-                    }
-                }
-            }
-            if (!found) {
-                lStatus = BAD_VALUE;
-                ALOGW("createEffect() effect not found");
-                goto Exit;
-            }
-            // For same effect type, chose auxiliary version over insert version if
-            // connect to output mix (Compliance to OpenSL ES)
-            if (sessionId == AUDIO_SESSION_OUTPUT_MIX &&
-                    (d.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) {
-                desc = d;
-            }
+        // Get the full effect descriptor from the uuid/type.
+        // If the session is the output mix, prefer an auxiliary effect,
+        // otherwise no preference.
+        uint32_t preferredType = (sessionId == AUDIO_SESSION_OUTPUT_MIX ?
+                                  EFFECT_FLAG_TYPE_AUXILIARY : EFFECT_FLAG_TYPE_MASK);
+        lStatus = getEffectDescriptor(&pDesc->uuid, &pDesc->type, preferredType, &desc);
+        if (lStatus < 0) {
+            ALOGW("createEffect() error %d from getEffectDescriptor", lStatus);
+            goto Exit;
         }
 
         // Do not allow auxiliary effects on a session different from 0 (output mix)
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 0276cad..9b9a15d 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -23,6 +23,8 @@
 #include <mutex>
 #include <deque>
 #include <map>
+#include <memory>
+#include <string>
 #include <vector>
 #include <stdint.h>
 #include <sys/types.h>
@@ -62,6 +64,7 @@
 #include <media/LinearMap.h>
 #include <media/VolumeShaper.h>
 
+#include <audio_utils/clock.h>
 #include <audio_utils/SimpleLog.h>
 #include <audio_utils/TimestampVerifier.h>
 
@@ -111,6 +114,7 @@
     static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
 
     virtual     status_t    dump(int fd, const Vector<String16>& args);
+                std::string getJsonString();
 
     // IAudioFlinger interface, in binder opcode order
     virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
@@ -204,6 +208,8 @@
     virtual status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor) const;
 
     virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
+                                         const effect_uuid_t *pTypeUuid,
+                                         uint32_t preferredTypeFlag,
                                          effect_descriptor_t *descriptor) const;
 
     virtual sp<IEffect> createEffect(
@@ -677,6 +683,9 @@
                 bool            updateOrphanEffectChains(const sp<EffectModule>& effect);
 
                 void broacastParametersToRecordThreads_l(const String8& keyValuePairs);
+                void forwardParametersToDownstreamPatches_l(
+                        audio_io_handle_t upStream, const String8& keyValuePairs,
+                        std::function<bool(const sp<PlaybackThread>&)> useThread = nullptr);
 
     // AudioStreamIn is immutable, so their fields are const.
     // For emphasis, we could also make all pointers to them be "const *",
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index eb826c6..d4299b0 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -35,6 +35,9 @@
     enum Flags {
         AHWD_CAN_SET_MASTER_VOLUME  = 0x1,
         AHWD_CAN_SET_MASTER_MUTE    = 0x2,
+        // Means that this isn't a terminal module, and software patches
+        // are used to transport audio data further.
+        AHWD_IS_INSERT              = 0x4,
     };
 
     AudioHwDevice(audio_module_handle_t handle,
@@ -55,6 +58,10 @@
         return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
     }
 
+    bool isInsert() const {
+        return (0 != (mFlags & AHWD_IS_INSERT));
+    }
+
     audio_module_handle_t handle() const { return mHandle; }
     const char *moduleName() const { return mModuleName; }
     sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
index ede8e3f..34cd821 100644
--- a/services/audioflinger/Configuration.h
+++ b/services/audioflinger/Configuration.h
@@ -27,7 +27,7 @@
 //#define AUDIO_WATCHDOG
 
 // uncomment to display CPU load adjusted for CPU frequency
-//#define CPU_FREQUENCY_STATISTICS
+//define CPU_FREQUENCY_STATISTICS
 
 // uncomment to enable fast threads to take performance samples for later statistical analysis
 #define FAST_THREAD_STATISTICS
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index d063772..dd84bf2 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -20,6 +20,7 @@
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 
 #include "Configuration.h"
+#include <audio_utils/format.h>
 #include <linux/futex.h>
 #include <sys/syscall.h>
 #include <media/AudioBufferProvider.h>
@@ -161,7 +162,21 @@
     const FastCaptureState * const current = (const FastCaptureState *) mCurrent;
     FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) mDumpState;
     const FastCaptureState::Command command = mCommand;
-    const size_t frameCount = current->mFrameCount;
+    size_t frameCount = current->mFrameCount;
+    AudioBufferProvider* fastPatchRecordBufferProvider = current->mFastPatchRecordBufferProvider;
+    AudioBufferProvider::Buffer patchBuffer;
+
+    if (fastPatchRecordBufferProvider != 0) {
+        patchBuffer.frameCount = ~0;
+        status_t status = fastPatchRecordBufferProvider->getNextBuffer(&patchBuffer);
+        if (status != NO_ERROR) {
+            frameCount = 0;
+        } else if (patchBuffer.frameCount < frameCount) {
+            // TODO: Make sure that it doesn't cause any issues if we just get a small available
+            // buffer from the buffer provider.
+            frameCount = patchBuffer.frameCount;
+        }
+    }
 
     if ((command & FastCaptureState::READ) /*&& isWarm*/) {
         ALOG_ASSERT(mInputSource != NULL);
@@ -176,6 +191,7 @@
             mTotalNativeFramesRead += framesRead;
             dumpState->mFramesRead = mTotalNativeFramesRead;
             mReadBufferState = framesRead;
+            patchBuffer.frameCount = framesRead;
         } else {
             dumpState->mReadErrors++;
             mReadBufferState = 0;
@@ -193,11 +209,18 @@
         }
         if (mReadBufferState > 0) {
             ssize_t framesWritten = mPipeSink->write(mReadBuffer, mReadBufferState);
-            // FIXME This supports at most one fast capture client.
-            //       To handle multiple clients this could be converted to an array,
-            //       or with a lot more work the control block could be shared by all clients.
             audio_track_cblk_t* cblk = current->mCblk;
-            if (cblk != NULL && framesWritten > 0) {
+            if (fastPatchRecordBufferProvider != 0) {
+                // This indicates the fast track is a patch record, update the cblk by
+                // calling releaseBuffer().
+                memcpy_by_audio_format(patchBuffer.raw, current->mFastPatchRecordFormat,
+                        mReadBuffer, mFormat.mFormat, framesWritten * mFormat.mChannelCount);
+                patchBuffer.frameCount = framesWritten;
+                fastPatchRecordBufferProvider->releaseBuffer(&patchBuffer);
+            } else if (cblk != NULL && framesWritten > 0) {
+                // FIXME This supports at most one fast capture client.
+                //       To handle multiple clients this could be converted to an array,
+                //       or with a lot more work the control block could be shared by all clients.
                 int32_t rear = cblk->u.mStreaming.mRear;
                 android_atomic_release_store(framesWritten + rear, &cblk->u.mStreaming.mRear);
                 cblk->mServer += framesWritten;
diff --git a/services/audioflinger/FastCaptureState.h b/services/audioflinger/FastCaptureState.h
index 9bca2d4..d287232 100644
--- a/services/audioflinger/FastCaptureState.h
+++ b/services/audioflinger/FastCaptureState.h
@@ -18,6 +18,7 @@
 #define ANDROID_AUDIO_FAST_CAPTURE_STATE_H
 
 #include <media/nbaio/NBAIO.h>
+#include <media/AudioBufferProvider.h>
 #include "FastThreadState.h"
 #include <private/media/AudioTrackShared.h>
 
@@ -37,6 +38,10 @@
     size_t          mFrameCount;        // number of frames per fast capture buffer
     audio_track_cblk_t* mCblk;          // control block for the single fast client, or NULL
 
+    audio_format_t  mFastPatchRecordFormat = AUDIO_FORMAT_INVALID;
+    AudioBufferProvider* mFastPatchRecordBufferProvider = nullptr;   // a reference to a patch
+                                                                     // record in fast mode
+
     // Extends FastThreadState::Command
     static const Command
         // The following commands also process configuration changes, and can be "or"ed:
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index ad35264..a42d6b3 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -32,7 +32,7 @@
 #include <utils/Trace.h>
 #include <system/audio.h>
 #ifdef FAST_THREAD_STATISTICS
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
 #ifdef CPU_FREQUENCY_STATISTICS
 #include <cpustats/ThreadCpuUsage.h>
 #endif
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index d60643c..e26dca1 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -19,11 +19,12 @@
 
 #include "Configuration.h"
 #ifdef FAST_THREAD_STATISTICS
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
 #ifdef CPU_FREQUENCY_STATISTICS
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 #endif
+#include <string>
 #include <utils/Debug.h>
 #include <utils/Log.h>
 #include "FastMixerDumpState.h"
@@ -76,14 +77,14 @@
     dprintf(fd, "  FastMixer Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
 #ifdef FAST_THREAD_STATISTICS
     // find the interval of valid samples
-    uint32_t bounds = mBounds;
-    uint32_t newestOpen = bounds & 0xFFFF;
+    const uint32_t bounds = mBounds;
+    const uint32_t newestOpen = bounds & 0xFFFF;
     uint32_t oldestClosed = bounds >> 16;
 
     //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
     uint32_t n;
     __builtin_sub_overflow(newestOpen, oldestClosed, &n);
-    n = n & 0xFFFF;
+    n &= 0xFFFF;
 
     if (n > mSamplingN) {
         ALOGE("too many samples %u", n);
@@ -91,9 +92,9 @@
     }
     // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
     // and adjusted CPU load in MHz normalized for CPU clock frequency
-    CentralTendencyStatistics wall, loadNs;
+    audio_utils::Statistics<double> wall, loadNs;
 #ifdef CPU_FREQUENCY_STATISTICS
-    CentralTendencyStatistics kHz, loadMHz;
+    audio_utils::Statistics<double> kHz, loadMHz;
     uint32_t previousCpukHz = 0;
 #endif
     // Assuming a normal distribution for cycle times, three standard deviations on either side of
@@ -108,18 +109,18 @@
         if (tail != NULL) {
             tail[j] = wallNs;
         }
-        wall.sample(wallNs);
+        wall.add(wallNs);
         uint32_t sampleLoadNs = mLoadNs[i];
-        loadNs.sample(sampleLoadNs);
+        loadNs.add(sampleLoadNs);
 #ifdef CPU_FREQUENCY_STATISTICS
         uint32_t sampleCpukHz = mCpukHz[i];
         // skip bad kHz samples
         if ((sampleCpukHz & ~0xF) != 0) {
-            kHz.sample(sampleCpukHz >> 4);
+            kHz.add(sampleCpukHz >> 4);
             if (sampleCpukHz == previousCpukHz) {
                 double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
                 double adjMHz = megacycles / mixPeriodSec;  // _not_ wallNs * 1e9
-                loadMHz.sample(adjMHz);
+                loadMHz.add(adjMHz);
             }
         }
         previousCpukHz = sampleCpukHz;
@@ -127,42 +128,42 @@
     }
     if (n) {
         dprintf(fd, "  Simple moving statistics over last %.1f seconds:\n",
-                    wall.n() * mixPeriodSec);
+                    wall.getN() * mixPeriodSec);
         dprintf(fd, "    wall clock time in ms per mix cycle:\n"
                     "      mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
-                    wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
-                    wall.stddev()*1e-6);
+                    wall.getMean()*1e-6, wall.getMin()*1e-6, wall.getMax()*1e-6,
+                    wall.getStdDev()*1e-6);
         dprintf(fd, "    raw CPU load in us per mix cycle:\n"
                     "      mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
-                    loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
-                    loadNs.stddev()*1e-3);
+                    loadNs.getMean()*1e-3, loadNs.getMin()*1e-3, loadNs.getMax()*1e-3,
+                    loadNs.getStdDev()*1e-3);
     } else {
         dprintf(fd, "  No FastMixer statistics available currently\n");
     }
 #ifdef CPU_FREQUENCY_STATISTICS
     dprintf(fd, "  CPU clock frequency in MHz:\n"
                 "    mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
-                kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3);
+                kHz.getMean()*1e-3, kHz.getMin()*1e-3, kHz.getMax()*1e-3, kHz.getStdDev()*1e-3);
     dprintf(fd, "  adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
                 "    mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
-                loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev());
+                loadMHz.getMean(), loadMHz.getMin(), loadMHz.getMax(), loadMHz.getStdDev());
 #endif
     if (tail != NULL) {
         qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
         // assume same number of tail samples on each side, left and right
         uint32_t count = n / kTailDenominator;
-        CentralTendencyStatistics left, right;
+        audio_utils::Statistics<double> left, right;
         for (uint32_t i = 0; i < count; ++i) {
-            left.sample(tail[i]);
-            right.sample(tail[n - (i + 1)]);
+            left.add(tail[i]);
+            right.add(tail[n - (i + 1)]);
         }
         dprintf(fd, "  Distribution of mix cycle times in ms for the tails "
                     "(> ~3 stddev outliers):\n"
                     "    left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
                     "    right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
-                    left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
-                    right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
-                    right.stddev()*1e-6);
+                    left.getMean()*1e-6, left.getMin()*1e-6, left.getMax()*1e-6, left.getStdDev()*1e-6,
+                    right.getMean()*1e-6, right.getMin()*1e-6, right.getMax()*1e-6,
+                    right.getStdDev()*1e-6);
         delete[] tail;
     }
 #endif
@@ -204,4 +205,56 @@
     }
 }
 
+// TODO get rid of extraneous lines and use better key names.
+// TODO may go back to using a library to do the json formatting.
+std::string FastMixerDumpState::getJsonString() const
+{
+    if (mCommand == FastMixerState::INITIAL) {
+        return "    {\n      \"status\": \"uninitialized\"\n    }";
+    }
+    std::string jsonStr = "    {\n";
+#ifdef FAST_THREAD_STATISTICS
+    // find the interval of valid samples
+    const uint32_t bounds = mBounds;
+    const uint32_t newestOpen = bounds & 0xFFFF;
+    uint32_t oldestClosed = bounds >> 16;
+
+    //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+    uint32_t n;
+    __builtin_sub_overflow(newestOpen, oldestClosed, &n);
+    n &= 0xFFFF;
+
+    if (n > mSamplingN) {
+        ALOGE("too many samples %u", n);
+        n = mSamplingN;
+    }
+    // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
+    // and adjusted CPU load in MHz normalized for CPU clock frequency
+    std::string jsonWallStr = "      \"wall_clock_time\":[";
+    std::string jsonLoadNsStr = "      \"raw_cpu_load\":[";
+    // loop over all the samples
+    for (uint32_t j = 0; j < n; ++j) {
+        size_t i = oldestClosed++ & (mSamplingN - 1);
+        uint32_t wallNs = mMonotonicNs[i];
+        if (j != 0) {
+            jsonWallStr += ',';
+            jsonLoadNsStr += ',';
+        }
+        /* jsonObject["wall"].append(wallNs); */
+        jsonWallStr += std::to_string(wallNs);
+        uint32_t sampleLoadNs = mLoadNs[i];
+        jsonLoadNsStr += std::to_string(sampleLoadNs);
+    }
+    jsonWallStr += ']';
+    jsonLoadNsStr += ']';
+    if (n) {
+        jsonStr += jsonWallStr + ",\n" + jsonLoadNsStr + "\n";
+    } else {
+        //dprintf(fd, "  No FastMixer statistics available currently\n");
+    }
+#endif
+    jsonStr += "    }";
+    return jsonStr;
+}
+
 }   // android
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 9b91cbc..81c4175 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -18,6 +18,7 @@
 #define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
 
 #include <stdint.h>
+#include <string>
 #include <audio_utils/TimestampVerifier.h>
 #include "Configuration.h"
 #include "FastThreadDumpState.h"
@@ -65,7 +66,8 @@
     FastMixerDumpState();
     /*virtual*/ ~FastMixerDumpState();
 
-    void dump(int fd) const;    // should only be called on a stable copy, not the original
+    void dump(int fd) const;             // should only be called on a stable copy, not the original
+    std::string getJsonString() const;   // should only be called on a stable copy, not the original
 
     double   mLatencyMs = 0.;   // measured latency, default of 0 if no valid timestamp read.
     uint32_t mWriteSequence;    // incremented before and after each write()
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 42a5a90..ada8572 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -83,6 +83,16 @@
     return mPatchPanel.listAudioPatches(num_patches, patches);
 }
 
+status_t AudioFlinger::PatchPanel::SoftwarePatch::getLatencyMs_l(double *latencyMs) const
+{
+    const auto& iter = mPatchPanel.mPatches.find(mPatchHandle);
+    if (iter != mPatchPanel.mPatches.end()) {
+        return iter->second.getLatencyMs(latencyMs);
+    } else {
+        return BAD_VALUE;
+    }
+}
+
 /* List connected audio ports and their attributes */
 status_t AudioFlinger::PatchPanel::listAudioPorts(unsigned int *num_ports __unused,
                                 struct audio_port *ports __unused)
@@ -110,9 +120,7 @@
     status_t status = NO_ERROR;
     audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
 
-    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            (patch->num_sinks == 0 && patch->num_sources != 2) ||
-            patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+    if (!audio_patch_is_valid(patch) || (patch->num_sinks == 0 && patch->num_sources != 2)) {
         return BAD_VALUE;
     }
     // limit number of sources to 1 for now or 2 sources for special cross hw module case.
@@ -161,21 +169,21 @@
                 }
             }
             mPatches.erase(iter);
+            removeSoftwarePatchFromInsertedModules(*handle);
         }
     }
 
     Patch newPatch{*patch};
+    audio_module_handle_t insertedModule = AUDIO_MODULE_HANDLE_NONE;
 
     switch (patch->sources[0].type) {
         case AUDIO_PORT_TYPE_DEVICE: {
             audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
-            ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(srcModule);
-            if (index < 0) {
-                ALOGW("%s() bad src hw module %d", __func__, srcModule);
+            AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(srcModule);
+            if (!audioHwDevice) {
                 status = BAD_VALUE;
                 goto exit;
             }
-            AudioHwDevice *audioHwDevice = mAudioFlinger.mAudioHwDevs.valueAt(index);
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 // support only one sink if connection to a mix or across HW modules
                 if ((patch->sinks[i].type == AUDIO_PORT_TYPE_MIX ||
@@ -227,9 +235,19 @@
                     audio_devices_t device = patch->sinks[0].ext.device.type;
                     String8 address = String8(patch->sinks[0].ext.device.address);
                     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-                    audio_output_flags_t flags =
-                            patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
-                            patch->sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
+                    audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+                        config.sample_rate = patch->sinks[0].sample_rate;
+                    }
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+                        config.channel_mask = patch->sinks[0].channel_mask;
+                    }
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+                        config.format = patch->sinks[0].format;
+                    }
+                    if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+                        flags = patch->sinks[0].flags.output;
+                    }
                     sp<ThreadBase> thread = mAudioFlinger.openOutput_l(
                                                             patch->sinks[0].ext.device.hw_module,
                                                             &output,
@@ -287,6 +305,9 @@
                 if (status != NO_ERROR) {
                     goto exit;
                 }
+                if (audioHwDevice->isInsert()) {
+                    insertedModule = audioHwDevice->handle();
+                }
             } else {
                 if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
                     sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(
@@ -366,6 +387,9 @@
         *handle = (audio_patch_handle_t) mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
         newPatch.mHalHandle = halHandle;
         mPatches.insert(std::make_pair(*handle, std::move(newPatch)));
+        if (insertedModule != AUDIO_MODULE_HANDLE_NONE) {
+            addSoftwarePatchToInsertedModules(insertedModule, *handle);
+        }
         ALOGV("%s() added new patch handle %d halHandle %d", __func__, *handle, halHandle);
     } else {
         newPatch.clearConnections(this);
@@ -407,14 +431,14 @@
     // use a pseudo LCM between input and output framecount
     size_t playbackFrameCount = mPlayback.thread()->frameCount();
     int playbackShift = __builtin_ctz(playbackFrameCount);
-    size_t recordFramecount = mRecord.thread()->frameCount();
-    int shift = __builtin_ctz(recordFramecount);
+    size_t recordFrameCount = mRecord.thread()->frameCount();
+    int shift = __builtin_ctz(recordFrameCount);
     if (playbackShift < shift) {
         shift = playbackShift;
     }
-    size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
-    ALOGV("%s() playframeCount %zu recordFramecount %zu frameCount %zu",
-            __func__, playbackFrameCount, recordFramecount, frameCount);
+    size_t frameCount = (playbackFrameCount * recordFrameCount) >> shift;
+    ALOGV("%s() playframeCount %zu recordFrameCount %zu frameCount %zu",
+            __func__, playbackFrameCount, recordFrameCount, frameCount);
 
     // create a special record track to capture from record thread
     uint32_t channelCount = mPlayback.thread()->channelCount();
@@ -431,6 +455,17 @@
     }
     audio_input_flags_t inputFlags = mAudioPatch.sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
             mAudioPatch.sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
+    if (sampleRate == mRecord.thread()->sampleRate() &&
+            inChannelMask == mRecord.thread()->channelMask() &&
+            mRecord.thread()->fastTrackAvailable() &&
+            mRecord.thread()->hasFastCapture()) {
+        // Create a fast track if the record thread has fast capture to get better performance.
+        // Only enable fast mode when there is no resample needed.
+        inputFlags = (audio_input_flags_t) (inputFlags | AUDIO_INPUT_FLAG_FAST);
+    } else {
+        // Fast mode is not available in this case.
+        inputFlags = (audio_input_flags_t) (inputFlags & ~AUDIO_INPUT_FLAG_FAST);
+    }
     sp<RecordThread::PatchRecord> tempRecordTrack = new (std::nothrow) RecordThread::PatchRecord(
                                              mRecord.thread().get(),
                                              sampleRate,
@@ -447,12 +482,21 @@
 
     audio_output_flags_t outputFlags = mAudioPatch.sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
             mAudioPatch.sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
+    audio_stream_type_t streamType = AUDIO_STREAM_PATCH;
+    if (mAudioPatch.num_sources == 2 && mAudioPatch.sources[1].type == AUDIO_PORT_TYPE_MIX) {
+        // "reuse one existing output mix" case
+        streamType = mAudioPatch.sources[1].ext.mix.usecase.stream;
+    }
+    if (mPlayback.thread()->hasFastMixer()) {
+        // Create a fast track if the playback thread has fast mixer to get better performance.
+        outputFlags = (audio_output_flags_t) (outputFlags | AUDIO_OUTPUT_FLAG_FAST);
+    }
 
     // create a special playback track to render to playback thread.
     // this track is given the same buffer as the PatchRecord buffer
     sp<PlaybackThread::PatchTrack> tempPatchTrack = new (std::nothrow) PlaybackThread::PatchTrack(
                                            mPlayback.thread().get(),
-                                           mAudioPatch.sources[1].ext.mix.usecase.stream,
+                                           streamType,
                                            sampleRate,
                                            outChannelMask,
                                            format,
@@ -513,21 +557,17 @@
     return OK;
 }
 
-String8 AudioFlinger::PatchPanel::Patch::dump(audio_patch_handle_t myHandle)
+String8 AudioFlinger::PatchPanel::Patch::dump(audio_patch_handle_t myHandle) const
 {
-    String8 result;
-
     // TODO: Consider table dump form for patches, just like tracks.
-    result.appendFormat("Patch %d: thread %p => thread %p",
-            myHandle, mRecord.thread().get(), mPlayback.thread().get());
+    String8 result = String8::format("Patch %d: thread %p => thread %p",
+            myHandle, mRecord.const_thread().get(), mPlayback.const_thread().get());
 
     // add latency if it exists
     double latencyMs;
     if (getLatencyMs(&latencyMs) == OK) {
         result.appendFormat("  latency: %.2lf", latencyMs);
     }
-
-    result.append("\n");
     return result;
 }
 
@@ -598,6 +638,7 @@
     }
 
     mPatches.erase(iter);
+    removeSoftwarePatchFromInsertedModules(handle);
     return status;
 }
 
@@ -609,35 +650,114 @@
     return NO_ERROR;
 }
 
-sp<DeviceHalInterface> AudioFlinger::PatchPanel::findHwDeviceByModule(audio_module_handle_t module)
+status_t AudioFlinger::PatchPanel::getDownstreamSoftwarePatches(
+        audio_io_handle_t stream,
+        std::vector<AudioFlinger::PatchPanel::SoftwarePatch> *patches) const
+{
+    for (const auto& module : mInsertedModules) {
+        if (module.second.streams.count(stream)) {
+            for (const auto& patchHandle : module.second.sw_patches) {
+                const auto& patch_iter = mPatches.find(patchHandle);
+                if (patch_iter != mPatches.end()) {
+                    const Patch &patch = patch_iter->second;
+                    patches->emplace_back(*this, patchHandle,
+                            patch.mPlayback.const_thread()->id(),
+                            patch.mRecord.const_thread()->id());
+                } else {
+                    ALOGE("Stale patch handle in the cache: %d", patchHandle);
+                }
+            }
+            return OK;
+        }
+    }
+    // The stream is not associated with any of inserted modules.
+    return BAD_VALUE;
+}
+
+void AudioFlinger::PatchPanel::notifyStreamOpened(
+        AudioHwDevice *audioHwDevice, audio_io_handle_t stream)
+{
+    if (audioHwDevice->isInsert()) {
+        mInsertedModules[audioHwDevice->handle()].streams.insert(stream);
+    }
+}
+
+void AudioFlinger::PatchPanel::notifyStreamClosed(audio_io_handle_t stream)
+{
+    for (auto& module : mInsertedModules) {
+        module.second.streams.erase(stream);
+    }
+}
+
+AudioHwDevice* AudioFlinger::PatchPanel::findAudioHwDeviceByModule(audio_module_handle_t module)
 {
     if (module == AUDIO_MODULE_HANDLE_NONE) return nullptr;
     ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(module);
     if (index < 0) {
+        ALOGW("%s() bad hw module %d", __func__, module);
         return nullptr;
     }
-    return mAudioFlinger.mAudioHwDevs.valueAt(index)->hwDevice();
+    return mAudioFlinger.mAudioHwDevs.valueAt(index);
 }
 
-void AudioFlinger::PatchPanel::dump(int fd)
+sp<DeviceHalInterface> AudioFlinger::PatchPanel::findHwDeviceByModule(audio_module_handle_t module)
 {
+    AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(module);
+    return audioHwDevice ? audioHwDevice->hwDevice() : nullptr;
+}
+
+void AudioFlinger::PatchPanel::addSoftwarePatchToInsertedModules(
+        audio_module_handle_t module, audio_patch_handle_t handle)
+{
+    mInsertedModules[module].sw_patches.insert(handle);
+}
+
+void AudioFlinger::PatchPanel::removeSoftwarePatchFromInsertedModules(
+        audio_patch_handle_t handle)
+{
+    for (auto& module : mInsertedModules) {
+        module.second.sw_patches.erase(handle);
+    }
+}
+
+void AudioFlinger::PatchPanel::dump(int fd) const
+{
+    String8 patchPanelDump;
+    const char *indent = "  ";
+
     // Only dump software patches.
     bool headerPrinted = false;
-    for (auto& iter : mPatches) {
+    for (const auto& iter : mPatches) {
         if (iter.second.isSoftware()) {
             if (!headerPrinted) {
-                String8 header("\nSoftware patches:\n");
-                write(fd, header.string(), header.size());
+                patchPanelDump += "\nSoftware patches:\n";
                 headerPrinted = true;
             }
-            String8 patchDump("  ");
-            patchDump.append(iter.second.dump(iter.first));
-            write(fd, patchDump.string(), patchDump.size());
+            patchPanelDump.appendFormat("%s%s\n", indent, iter.second.dump(iter.first).string());
         }
     }
-    if (headerPrinted) {
-        String8 trailing("\n");
-        write(fd, trailing.string(), trailing.size());
+
+    headerPrinted = false;
+    for (const auto& module : mInsertedModules) {
+        if (!module.second.streams.empty() || !module.second.sw_patches.empty()) {
+            if (!headerPrinted) {
+                patchPanelDump += "\nTracked inserted modules:\n";
+                headerPrinted = true;
+            }
+            String8 moduleDump = String8::format("Module %d: I/O handles: ", module.first);
+            for (const auto& stream : module.second.streams) {
+                moduleDump.appendFormat("%d ", stream);
+            }
+            moduleDump.append("; SW Patches: ");
+            for (const auto& patch : module.second.sw_patches) {
+                moduleDump.appendFormat("%d ", patch);
+            }
+            patchPanelDump.appendFormat("%s%s\n", indent, moduleDump.string());
+        }
+    }
+
+    if (!patchPanelDump.isEmpty()) {
+        write(fd, patchPanelDump.string(), patchPanelDump.size());
     }
 }
 
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 5d6bf00..269a398 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -19,9 +19,31 @@
     #error This header file should only be included from AudioFlinger.h
 #endif
 
+
 // PatchPanel is concealed within AudioFlinger, their lifetimes are the same.
 class PatchPanel {
 public:
+    class SoftwarePatch {
+      public:
+        SoftwarePatch(const PatchPanel &patchPanel, audio_patch_handle_t patchHandle,
+                audio_io_handle_t playbackThreadHandle, audio_io_handle_t recordThreadHandle)
+                : mPatchPanel(patchPanel), mPatchHandle(patchHandle),
+                  mPlaybackThreadHandle(playbackThreadHandle),
+                  mRecordThreadHandle(recordThreadHandle) {}
+        SoftwarePatch(const SoftwarePatch&) = default;
+        SoftwarePatch& operator=(const SoftwarePatch&) = default;
+
+        // Must be called under AudioFlinger::mLock
+        status_t getLatencyMs_l(double *latencyMs) const;
+        audio_io_handle_t getPlaybackThreadHandle() const { return mPlaybackThreadHandle; };
+        audio_io_handle_t getRecordThreadHandle() const { return mRecordThreadHandle; };
+      private:
+        const PatchPanel &mPatchPanel;
+        const audio_patch_handle_t mPatchHandle;
+        const audio_io_handle_t mPlaybackThreadHandle;
+        const audio_io_handle_t mRecordThreadHandle;
+    };
+
     explicit PatchPanel(AudioFlinger* audioFlinger) : mAudioFlinger(*audioFlinger) {}
 
     /* List connected audio ports and their attributes */
@@ -42,7 +64,16 @@
     status_t listAudioPatches(unsigned int *num_patches,
                                       struct audio_patch *patches);
 
-    void dump(int fd);
+    // Retrieves all currently estrablished software patches for a stream
+    // opened on an intermediate module.
+    status_t getDownstreamSoftwarePatches(audio_io_handle_t stream,
+            std::vector<SoftwarePatch> *patches) const;
+
+    // Notifies patch panel about all opened and closed streams.
+    void notifyStreamOpened(AudioHwDevice *audioHwDevice, audio_io_handle_t stream);
+    void notifyStreamClosed(audio_io_handle_t stream);
+
+    void dump(int fd) const;
 
 private:
     template<typename ThreadType, typename TrackType>
@@ -65,6 +96,7 @@
         audio_patch_handle_t handle() const { return mHandle; }
         sp<ThreadType> thread() { return mThread; }
         sp<TrackType> track() { return mTrack; }
+        sp<const ThreadType> const_thread() const { return mThread; }
         sp<const TrackType> const_track() const { return mTrack; }
 
         void closeConnections(PatchPanel *panel) {
@@ -122,7 +154,7 @@
         // returns the latency of the patch (from record to playback).
         status_t getLatencyMs(double *latencyMs) const;
 
-        String8 dump(audio_patch_handle_t myHandle);
+        String8 dump(audio_patch_handle_t myHandle) const;
 
         // Note that audio_patch::id is only unique within a HAL module
         struct audio_patch              mAudioPatch;
@@ -138,8 +170,38 @@
         Endpoint<RecordThread, RecordThread::PatchRecord> mRecord;
     };
 
+    AudioHwDevice* findAudioHwDeviceByModule(audio_module_handle_t module);
     sp<DeviceHalInterface> findHwDeviceByModule(audio_module_handle_t module);
+    void addSoftwarePatchToInsertedModules(
+            audio_module_handle_t module, audio_patch_handle_t handle);
+    void removeSoftwarePatchFromInsertedModules(audio_patch_handle_t handle);
 
     AudioFlinger &mAudioFlinger;
     std::map<audio_patch_handle_t, Patch> mPatches;
+
+    // This map allows going from a thread to "downstream" software patches
+    // when a processing module inserted in between. Example:
+    //
+    //  from map value.streams                               map key
+    //  [Mixer thread] --> [Virtual output device] --> [Processing module] ---\
+    //       [Harware module] <-- [Physical output device] <-- [S/W Patch] <--/
+    //                                                 from map value.sw_patches
+    //
+    // This allows the mixer thread to look up the threads of the software patch
+    // for propagating timing info, parameters, etc.
+    //
+    // The current assumptions are:
+    //   1) The processing module acts as a mixer with several outputs which
+    //      represent differently downmixed and / or encoded versions of the same
+    //      mixed stream. There is no 1:1 correspondence between the input streams
+    //      and the software patches, but rather a N:N correspondence between
+    //      a group of streams and a group of patches.
+    //   2) There are only a couple of inserted processing modules in the system,
+    //      so when looking for a stream or patch handle we can iterate over
+    //      all modules.
+    struct ModuleConnections {
+        std::set<audio_io_handle_t> streams;
+        std::set<audio_patch_handle_t> sw_patches;
+    };
+    std::map<audio_module_handle_t, ModuleConnections> mInsertedModules;
 };
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 70af5c6..3ef8ce2 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -23,6 +23,8 @@
 #include "Configuration.h"
 #include <math.h>
 #include <fcntl.h>
+#include <memory>
+#include <string>
 #include <linux/futex.h>
 #include <sys/stat.h>
 #include <sys/syscall.h>
@@ -71,7 +73,7 @@
 #endif
 
 #ifdef DEBUG_CPU_USAGE
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 
@@ -333,9 +335,9 @@
 #ifdef DEBUG_CPU_USAGE
 private:
     ThreadCpuUsage mCpuUsage;           // instantaneous thread CPU usage in wall clock ns
-    CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns
+    audio_utils::Statistics<double> mWcStats; // statistics on thread CPU usage in wall clock ns
 
-    CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles
+    audio_utils::Statistics<double> mHzStats; // statistics on thread CPU usage in cycles
 
     int mCpuNum;                        // thread's current CPU number
     int mCpukHz;                        // frequency of thread's current CPU in kHz
@@ -361,7 +363,7 @@
 
     // record sample for wall clock statistics
     if (valid) {
-        mWcStats.sample(wcNs);
+        mWcStats.add(wcNs);
     }
 
     // get the current CPU number
@@ -380,26 +382,26 @@
 
     // if no change in CPU number or frequency, then record sample for cycle statistics
     if (valid && mCpukHz > 0) {
-        double cycles = wcNs * cpukHz * 0.000001;
-        mHzStats.sample(cycles);
+        const double cycles = wcNs * cpukHz * 0.000001;
+        mHzStats.add(cycles);
     }
 
-    unsigned n = mWcStats.n();
+    const unsigned n = mWcStats.getN();
     // mCpuUsage.elapsed() is expensive, so don't call it every loop
     if ((n & 127) == 1) {
-        long long elapsed = mCpuUsage.elapsed();
+        const long long elapsed = mCpuUsage.elapsed();
         if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
-            double perLoop = elapsed / (double) n;
-            double perLoop100 = perLoop * 0.01;
-            double perLoop1k = perLoop * 0.001;
-            double mean = mWcStats.mean();
-            double stddev = mWcStats.stddev();
-            double minimum = mWcStats.minimum();
-            double maximum = mWcStats.maximum();
-            double meanCycles = mHzStats.mean();
-            double stddevCycles = mHzStats.stddev();
-            double minCycles = mHzStats.minimum();
-            double maxCycles = mHzStats.maximum();
+            const double perLoop = elapsed / (double) n;
+            const double perLoop100 = perLoop * 0.01;
+            const double perLoop1k = perLoop * 0.001;
+            const double mean = mWcStats.getMean();
+            const double stddev = mWcStats.getStdDev();
+            const double minimum = mWcStats.getMin();
+            const double maximum = mWcStats.getMax();
+            const double meanCycles = mHzStats.getMean();
+            const double stddevCycles = mHzStats.getStdDev();
+            const double minCycles = mHzStats.getMin();
+            const double maxCycles = mHzStats.getMax();
             mCpuUsage.resetElapsed();
             mWcStats.reset();
             mHzStats.reset();
@@ -857,7 +859,8 @@
     if (mType == RECORD
             || mType == MIXER
             || mType == DUPLICATING
-            || (mType == DIRECT && audio_is_linear_pcm(mHALFormat))) {
+            || mType == DIRECT
+            || mType == OFFLOAD) {
         dprintf(fd, "  Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
     }
 
@@ -1760,6 +1763,11 @@
     mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
 }
 
+std::string AudioFlinger::PlaybackThread::getJsonString() const
+{
+    return "{}";
+}
+
 void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
@@ -2323,15 +2331,13 @@
         if (track->isExternalTrack()) {
             TrackBase::track_state state = track->mState;
             mLock.unlock();
-            status = AudioSystem::startOutput(mId, track->streamType(),
-                                              track->sessionId());
+            status = AudioSystem::startOutput(track->portId());
             mLock.lock();
             // abort track was stopped/paused while we released the lock
             if (state != track->mState) {
                 if (status == NO_ERROR) {
                     mLock.unlock();
-                    AudioSystem::stopOutput(mId, track->streamType(),
-                                            track->sessionId());
+                    AudioSystem::stopOutput(track->portId());
                     mLock.lock();
                 }
                 return INVALID_OPERATION;
@@ -2482,6 +2488,11 @@
     Mutex::Autolock _l(mLock);
     // reject out of sequence requests
     if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
+        // Register discontinuity when HW drain is completed because that can cause
+        // the timestamp frame position to reset to 0 for direct and offload threads.
+        // (Out of sequence requests are ignored, since the discontinuity would be handled
+        // elsewhere, e.g. in flush).
+        mTimestampVerifier.discontinuity();
         mDrainSequence &= ~1;
         mWaitWorkCV.signal();
     }
@@ -2806,15 +2817,13 @@
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             if (track->isExternalTrack()) {
-                AudioSystem::stopOutput(mId, track->streamType(),
-                                        track->sessionId());
+                AudioSystem::stopOutput(track->portId());
 #ifdef ADD_BATTERY_DATA
                 // to track the speaker usage
                 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
 #endif
                 if (track->isTerminated()) {
-                    AudioSystem::releaseOutput(mId, track->streamType(),
-                                               track->sessionId());
+                    AudioSystem::releaseOutput(track->portId());
                 }
             }
         }
@@ -3190,6 +3199,15 @@
 
     checkSilentMode_l();
 
+    // DIRECT and OFFLOAD threads should reset frame count to zero on stop/flush
+    // TODO: add confirmation checks:
+    // 1) DIRECT threads and linear PCM format really resets to 0?
+    // 2) Is frame count really valid if not linear pcm?
+    // 3) Are all 64 bits of position returned, not just lowest 32 bits?
+    if (mType == OFFLOAD || mType == DIRECT) {
+        mTimestampVerifier.setDiscontinuityMode(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
+    }
+
     while (!exitPending())
     {
         // Log merge requests are performed during AudioFlinger binder transactions, but
@@ -3216,7 +3234,8 @@
             // Collect timestamp statistics for the Playback Thread types that support it.
             if (mType == MIXER
                     || mType == DUPLICATING
-                    || (mType == DIRECT && audio_is_linear_pcm(mHALFormat))) { // no indentation
+                    || mType == DIRECT
+                    || mType == OFFLOAD) { // no indentation
             // Gather the framesReleased counters for all active tracks,
             // and associate with the sink frames written out.  We need
             // this to convert the sink timestamp to the track timestamp.
@@ -3335,7 +3354,7 @@
 
                 continue;
             }
-            if ((!mActiveTracks.size() && systemTime() > mStandbyTimeNs) ||
+            if ((mActiveTracks.isEmpty() && systemTime() > mStandbyTimeNs) ||
                                    isSuspended()) {
                 // put audio hardware into standby after short delay
                 if (shouldStandby_l()) {
@@ -3349,7 +3368,7 @@
                     mStandby = true;
                 }
 
-                if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
+                if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
                     // we're about to wait, flush the binder command buffer
                     IPCThreadState::self()->flushCommands();
 
@@ -5098,9 +5117,8 @@
         // while we are dumping it.  It may be inconsistent, but it won't mutate!
         // This is a large object so we place it on the heap.
         // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
-        const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+        const std::unique_ptr<FastMixerDumpState> copy(new FastMixerDumpState(mFastMixerDumpState));
         copy->dump(fd);
-        delete copy;
 
 #ifdef STATE_QUEUE_DUMP
         // Similar for state queue
@@ -5123,6 +5141,16 @@
     }
 }
 
+std::string AudioFlinger::MixerThread::getJsonString() const
+{
+    // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+    // while we are dumping it.  It may be inconsistent, but it won't mutate!
+    // This is a large object so we place it on the heap.
+    // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+    return std::unique_ptr<FastMixerDumpState>(new FastMixerDumpState(mFastMixerDumpState))
+        ->getJsonString();
+}
+
 uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
 {
     return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2;
@@ -5622,6 +5650,7 @@
     mOutput->flush();
     mHwPaused = false;
     mFlushPending = false;
+    mTimestampVerifier.discontinuity(); // DIRECT and OFFLOADED flush resets frame count.
 }
 
 int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
@@ -5956,6 +5985,14 @@
                     track->presentationComplete(framesWritten, audioHALFrames);
                     track->reset();
                     tracksToRemove->add(track);
+                    // DIRECT and OFFLOADED stop resets frame counts.
+                    if (!mUseAsyncWrite) {
+                        // If we don't get explicit drain notification we must
+                        // register discontinuity regardless of whether this is
+                        // the previous (!last) or the upcoming (last) track
+                        // to avoid skipping the discontinuity.
+                        mTimestampVerifier.discontinuity();
+                    }
                 }
             } else {
                 // No buffers for this track. Give it a few chances to
@@ -6612,7 +6649,7 @@
             }
 
             // sleep if there are no active tracks to process
-            if (activeTracks.size() == 0) {
+            if (activeTracks.isEmpty()) {
                 if (sleepUs == 0) {
                     sleepUs = kRecordThreadSleepUs;
                 }
@@ -6663,6 +6700,14 @@
                 }
                 didModify = true;
             }
+            AudioBufferProvider* abp = (fastTrack != 0 && fastTrack->isPatchTrack()) ?
+                    reinterpret_cast<AudioBufferProvider*>(fastTrack.get()) : nullptr;
+            if (state->mFastPatchRecordBufferProvider != abp) {
+                state->mFastPatchRecordBufferProvider = abp;
+                state->mFastPatchRecordFormat = fastTrack == 0 ?
+                        AUDIO_FORMAT_INVALID : fastTrack->format();
+                didModify = true;
+            }
             sq->end(didModify);
             if (didModify) {
                 sq->push(block);
@@ -6688,8 +6733,7 @@
 
         // If an NBAIO source is present, use it to read the normal capture's data
         if (mPipeSource != 0) {
-            size_t framesToRead = mBufferSize / mFrameSize;
-            framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
+            size_t framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
 
             // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
             // to the full buffer point (clearing the overflow condition).  Upon OVERRUN error,
@@ -7040,6 +7084,12 @@
         goto Exit;
     }
 
+    if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
+        ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
     if (*pSampleRate == 0) {
         *pSampleRate = mSampleRate;
     }
@@ -7400,7 +7450,7 @@
     audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
     dprintf(fd, "  AudioStreamIn: %p flags %#x (%s)\n",
             input, flags, inputFlagsToString(flags).c_str());
-    if (mActiveTracks.size() == 0) {
+    if (mActiveTracks.isEmpty()) {
         dprintf(fd, "  No active record clients\n");
     }
 
@@ -7423,9 +7473,8 @@
     // while we are dumping it.  It may be inconsistent, but it won't mutate!
     // This is a large object so we place it on the heap.
     // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
-    const FastCaptureDumpState *copy = new FastCaptureDumpState(mFastCaptureDumpState);
+    std::unique_ptr<FastCaptureDumpState> copy(new FastCaptureDumpState(mFastCaptureDumpState));
     copy->dump(fd);
-    delete copy;
 }
 
 void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -7754,10 +7803,15 @@
 {
     status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
-    mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
-    LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d", mChannelCount, FCC_8);
     mFormat = mHALFormat;
-    LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+    mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+    if (audio_is_linear_pcm(mFormat)) {
+        LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d",
+                mChannelCount, FCC_8);
+    } else {
+        // Can have more that FCC_8 channels in encoded streams.
+        ALOGI("HAL format %#x is not linear pcm", mFormat);
+    }
     result = mInput->stream->getFrameSize(&mFrameSize);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
     result = mInput->stream->getBufferSize(&mBufferSize);
@@ -7862,7 +7916,7 @@
 status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
 {
     // only one chain per input thread
-    if (mEffectChains.size() != 0) {
+    if (!mEffectChains.isEmpty()) {
         ALOGW("addEffectChain_l() already one chain %p on thread %p", chain.get(), this);
         return INVALID_OPERATION;
     }
@@ -8075,7 +8129,7 @@
     }
     // This will decrement references and may cause the destruction of this thread.
     if (isOutput()) {
-        AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+        AudioSystem::releaseOutput(mPortId);
     } else {
         AudioSystem::releaseInput(mPortId);
     }
@@ -8189,7 +8243,7 @@
 
     bool silenced = false;
     if (isOutput()) {
-        ret = AudioSystem::startOutput(mId, streamType(), mSessionId);
+        ret = AudioSystem::startOutput(portId);
     } else {
         ret = AudioSystem::startInput(portId, &silenced);
     }
@@ -8198,10 +8252,10 @@
     // abort if start is rejected by audio policy manager
     if (ret != NO_ERROR) {
         ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
-        if (mActiveTracks.size() != 0) {
+        if (!mActiveTracks.isEmpty()) {
             mLock.unlock();
             if (isOutput()) {
-                AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+                AudioSystem::releaseOutput(portId);
             } else {
                 AudioSystem::releaseInput(portId);
             }
@@ -8273,8 +8327,8 @@
 
     mLock.unlock();
     if (isOutput()) {
-        AudioSystem::stopOutput(mId, streamType(), track->sessionId());
-        AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
+        AudioSystem::stopOutput(track->portId());
+        AudioSystem::releaseOutput(track->portId());
     } else {
         AudioSystem::stopInput(track->portId());
         AudioSystem::releaseInput(track->portId());
@@ -8299,7 +8353,7 @@
     if (mHalStream == 0) {
         return NO_INIT;
     }
-    if (mActiveTracks.size() != 0) {
+    if (!mActiveTracks.isEmpty()) {
         return INVALID_OPERATION;
     }
     mHalStream->standby();
@@ -8737,7 +8791,7 @@
     dprintf(fd, "  Attributes: content type %d usage %d source %d\n",
             mAttr.content_type, mAttr.usage, mAttr.source);
     dprintf(fd, "  Session: %d port Id: %d\n", mSessionId, mPortId);
-    if (mActiveTracks.size() == 0) {
+    if (mActiveTracks.isEmpty()) {
         dprintf(fd, "  No active clients\n");
     }
 }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 064e291..f0d625c 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -555,6 +555,9 @@
                     size_t          size() const {
                         return mActiveTracks.size();
                     }
+                    bool            isEmpty() const {
+                        return mActiveTracks.isEmpty();
+                    }
                     ssize_t         indexOf(const sp<T>& item) {
                         return mActiveTracks.indexOf(item);
                     }
@@ -658,6 +661,8 @@
     virtual             ~PlaybackThread();
 
                 void        dump(int fd, const Vector<String16>& args);
+                // returns a string of audio performance related data in JSON format.
+    virtual     std::string getJsonString() const;
 
     // Thread virtuals
     virtual     bool        threadLoop();
@@ -1103,6 +1108,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
     virtual     void        dumpInternals(int fd, const Vector<String16>& args);
+                std::string getJsonString() const override;
 
     virtual     bool        isTrackAllowed_l(
                                     audio_channel_mask_t channelMask, audio_format_t format,
@@ -1224,6 +1230,23 @@
     virtual     bool        hasFastMixer() const { return false; }
 
     virtual     int64_t     computeWaitTimeNs_l() const override;
+
+    status_t    threadloop_getHalTimestamp_l(ExtendedTimestamp *timestamp) const override {
+                    // For DIRECT and OFFLOAD threads, query the output sink directly.
+                    if (mOutput != nullptr) {
+                        uint64_t uposition64;
+                        struct timespec time;
+                        if (mOutput->getPresentationPosition(
+                                &uposition64, &time) == OK) {
+                            timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+                                    = (int64_t)uposition64;
+                            timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
+                                    = audio_utils_ns_from_timespec(&time);
+                            return NO_ERROR;
+                        }
+                    }
+                    return INVALID_OPERATION;
+                }
 };
 
 class OffloadThread : public DirectOutputThread {
@@ -1507,6 +1530,8 @@
 
             void        updateMetadata_l() override;
 
+            bool        fastTrackAvailable() const { return mFastTrackAvail; }
+
 private:
             // Enter standby if not already in standby, and set mStandby flag
             void    standbyIfNotAlreadyInStandby();
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 22e610e..8b9485f 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -486,7 +486,7 @@
             wasActive = playbackThread->destroyTrack_l(this);
         }
         if (isExternalTrack() && !wasActive) {
-            AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, mSessionId);
+            AudioSystem::releaseOutput(mPortId);
         }
     }
 }
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 4812b1f..d4c49d9 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -124,17 +124,11 @@
                                         audio_port_handle_t *selectedDeviceId,
                                         audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the output starts being used by corresponding stream.
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session) = 0;
+    virtual status_t startOutput(audio_port_handle_t portId) = 0;
     // indicates to the audio policy manager that the output stops being used by corresponding stream.
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session) = 0;
+    virtual status_t stopOutput(audio_port_handle_t portId) = 0;
     // releases the output.
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session) = 0;
+    virtual void releaseOutput(audio_port_handle_t portId) = 0;
 
     // request an input appropriate for record from the supplied device with supplied parameters.
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
@@ -147,16 +141,13 @@
                                      input_type_t *inputType,
                                      audio_port_handle_t *portId) = 0;
     // indicates to the audio policy manager that the input starts being used.
-    virtual status_t startInput(audio_io_handle_t input,
-                                audio_session_t session,
+    virtual status_t startInput(audio_port_handle_t portId,
                                 bool silenced,
                                 concurrency_type__mask_t *concurrency) = 0;
     // indicates to the audio policy manager that the input stops being used.
-    virtual status_t stopInput(audio_io_handle_t input,
-                               audio_session_t session) = 0;
+    virtual status_t stopInput(audio_port_handle_t portId) = 0;
     // releases the input.
-    virtual void releaseInput(audio_io_handle_t input,
-                              audio_session_t session) = 0;
+    virtual void releaseInput(audio_port_handle_t portId) = 0;
 
     //
     // volume control functions
@@ -235,9 +226,9 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle,
+                                      audio_port_handle_t *portId,
                                       uid_t uid) = 0;
-    virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
+    virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
@@ -324,11 +315,6 @@
     // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
     virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
 
-    // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
-    // over a telephony device during a phone call.
-    virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0;
-    virtual status_t stopTone() = 0;
-
     // set down link audio volume.
     virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
 
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index b3611c4..9b8f095 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -18,10 +18,10 @@
     src/EffectDescriptor.cpp \
     src/SoundTriggerSession.cpp \
     src/SessionRoute.cpp \
-    src/AudioSourceDescriptor.cpp \
     src/VolumeCurve.cpp \
     src/TypeConverter.cpp \
-    src/AudioSession.cpp
+    src/AudioSession.cpp \
+    src/ClientDescriptor.cpp
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
index 9f3fc0c..555412e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
@@ -34,12 +34,4 @@
     virtual void setPatchHandle(audio_patch_handle_t handle) = 0;
 };
 
-class AudioIODescriptorUpdateListener
-{
-public:
-    virtual ~AudioIODescriptorUpdateListener() {};
-
-    virtual void onIODescriptorUpdate() const = 0;
-};
-
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 85f3b86..44662e5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -19,6 +19,7 @@
 #include "AudioIODescriptorInterface.h"
 #include "AudioPort.h"
 #include "AudioSession.h"
+#include "ClientDescriptor.h"
 #include <utils/Errors.h>
 #include <system/audio.h>
 #include <utils/SortedVector.h>
@@ -66,6 +67,8 @@
     AudioSessionCollection getAudioSessions(bool activeOnly) const;
     size_t getAudioSessionCount(bool activeOnly) const;
     audio_source_t getHighestPrioritySource(bool activeOnly) const;
+    void changeRefCount(audio_session_t session, int delta);
+
 
     // implementation of AudioIODescriptorInterface
     audio_config_base_t getConfig() const override;
@@ -79,14 +82,20 @@
                   audio_input_flags_t flags,
                   audio_io_handle_t *input);
     // Called when a stream is about to be started.
-    // Note: called after AudioSession::changeActiveCount(1)
+    // Note: called after changeRefCount(session, 1)
     status_t start();
     // Called after a stream is stopped
-    // Note: called after AudioSession::changeActiveCount(-1)
+    // Note: called after changeRefCount(session, -1)
     void stop();
     void close();
 
-private:
+    RecordClientMap& clients() { return mClients; }
+    RecordClientVector getClientsForSession(audio_session_t session);
+
+ private:
+
+    void updateSessionRecordingConfiguration(int event, const sp<AudioSession>& audioSession);
+
     audio_patch_handle_t          mPatchHandle;
     audio_port_handle_t           mId;
     // audio sessions attached to this input
@@ -99,6 +108,9 @@
     // We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
     SortedVector<audio_session_t> mPreemptedSessions;
     AudioPolicyClientInterface *mClientInterface;
+    uint32_t mGlobalRefCount;  // non-session-specific ref count
+
+    RecordClientMap mClients;
 };
 
 class AudioInputCollection :
@@ -122,6 +134,8 @@
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
+    sp<AudioInputDescriptor> getInputForClient(audio_port_handle_t portId);
+
     status_t dump(int fd) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 57d1cfa..ff0201a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -17,15 +17,14 @@
 #pragma once
 
 #include <sys/types.h>
-
-#include "AudioPort.h"
-#include <RoutingStrategy.h>
 #include <utils/Errors.h>
 #include <utils/Timers.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
+#include <RoutingStrategy.h>
 #include "AudioIODescriptorInterface.h"
-#include "AudioSourceDescriptor.h"
+#include "AudioPort.h"
+#include "ClientDescriptor.h"
 
 namespace android {
 
@@ -79,7 +78,9 @@
     audio_patch_handle_t getPatchHandle() const override;
     void setPatchHandle(audio_patch_handle_t handle) override;
 
-    sp<AudioPort>       mPort;
+    TrackClientMap& clients() { return mClients; }
+
+    sp<AudioPort> mPort;
     audio_devices_t mDevice;                   // current device this output is routed to
     uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
     nsecs_t mStopTime[AUDIO_STREAM_CNT];
@@ -92,6 +93,7 @@
 protected:
     audio_patch_handle_t mPatchHandle;
     audio_port_handle_t mId;
+    TrackClientMap mClients;
 };
 
 // Audio output driven by a software mixer in audio flinger.
@@ -156,7 +158,7 @@
 class HwAudioOutputDescriptor: public AudioOutputDescriptor
 {
 public:
-    HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+    HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                             AudioPolicyClientInterface *clientInterface);
     virtual ~HwAudioOutputDescriptor() {}
 
@@ -173,7 +175,7 @@
                            const struct audio_port_config *srcConfig = NULL) const;
     virtual void toAudioPort(struct audio_port *port) const;
 
-    const sp<AudioSourceDescriptor> mSource;
+    const sp<SourceClientDescriptor> mSource;
 
 };
 
@@ -227,6 +229,8 @@
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
+    sp<SwAudioOutputDescriptor> getOutputForClient(audio_port_handle_t portId);
+
     status_t dump(int fd) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
index 4226ff2..a1ee708 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
@@ -16,93 +16,84 @@
 
 #pragma once
 
-#include "policy.h"
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
+#include <vector>
+
 #include <system/audio.h>
-#include <cutils/config_utils.h>
+#include <utils/RefBase.h>
+#include <utils/SortedVector.h>
+#include <utils/String8.h>
+
+#include "policy.h"
 
 namespace android {
 
 typedef SortedVector<uint32_t> SampleRateVector;
-typedef SortedVector<audio_channel_mask_t> ChannelsVector;
 typedef Vector<audio_format_t> FormatVector;
 
 template <typename T>
-bool operator == (const SortedVector<T> &left, const SortedVector<T> &right);
+bool operator== (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+    if (left.size() != right.size()) {
+        return false;
+    }
+    for (size_t index = 0; index < right.size(); index++) {
+        if (left[index] != right[index]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+template <typename T>
+bool operator!= (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+    return !(left == right);
+}
+
+class ChannelsVector : public SortedVector<audio_channel_mask_t>
+{
+public:
+    ChannelsVector() = default;
+    ChannelsVector(const ChannelsVector&) = default;
+    ChannelsVector(const SortedVector<audio_channel_mask_t>& sv) :
+            SortedVector<audio_channel_mask_t>(sv) {}
+    ChannelsVector& operator=(const ChannelsVector&) = default;
+
+    // Applies audio_channel_mask_out_to_in to all elements and returns the result.
+    ChannelsVector asInMask() const;
+    // Applies audio_channel_mask_in_to_out to all elements and returns the result.
+    ChannelsVector asOutMask() const;
+};
 
 class AudioProfile : public virtual RefBase
 {
 public:
     static sp<AudioProfile> createFullDynamic();
 
-    AudioProfile(audio_format_t format,
-                 audio_channel_mask_t channelMasks,
-                 uint32_t samplingRate) :
-        mName(String8("")),
-        mFormat(format)
-    {
-        mChannelMasks.add(channelMasks);
-        mSamplingRates.add(samplingRate);
-    }
-
+    AudioProfile(audio_format_t format, audio_channel_mask_t channelMasks, uint32_t samplingRate);
     AudioProfile(audio_format_t format,
                  const ChannelsVector &channelMasks,
-                 const SampleRateVector &samplingRateCollection) :
-        mName(String8("")),
-        mFormat(format),
-        mChannelMasks(channelMasks),
-        mSamplingRates(samplingRateCollection)
-    {}
+                 const SampleRateVector &samplingRateCollection);
 
     audio_format_t getFormat() const { return mFormat; }
-
-    void setChannels(const ChannelsVector &channelMasks)
-    {
-        if (mIsDynamicChannels) {
-            mChannelMasks = channelMasks;
-        }
-    }
     const ChannelsVector &getChannels() const { return mChannelMasks; }
-
-    void setSampleRates(const SampleRateVector &sampleRates)
-    {
-        if (mIsDynamicRate) {
-            mSamplingRates = sampleRates;
-        }
-    }
     const SampleRateVector &getSampleRates() const { return mSamplingRates; }
+    void setChannels(const ChannelsVector &channelMasks);
+    void setSampleRates(const SampleRateVector &sampleRates);
 
+    void clear();
     bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
-
-    void clear()
-    {
-        if (mIsDynamicChannels) {
-            mChannelMasks.clear();
-        }
-        if (mIsDynamicRate) {
-            mSamplingRates.clear();
-        }
-    }
-
-    inline bool supportsChannels(audio_channel_mask_t channels) const
+    bool supportsChannels(audio_channel_mask_t channels) const
     {
         return mChannelMasks.indexOf(channels) >= 0;
     }
-    inline bool supportsRate(uint32_t rate) const
-    {
-        return mSamplingRates.indexOf(rate) >= 0;
-    }
+    bool supportsRate(uint32_t rate) const { return mSamplingRates.indexOf(rate) >= 0; }
 
     status_t checkExact(uint32_t rate, audio_channel_mask_t channels, audio_format_t format) const;
-
     status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
                                         audio_channel_mask_t &updatedChannelMask,
                                         audio_port_type_t portType,
                                         audio_port_role_t portRole) const;
-
     status_t checkCompatibleSamplingRate(uint32_t samplingRate,
                                          uint32_t &updatedSamplingRate) const;
 
@@ -138,213 +129,48 @@
 class AudioProfileVector : public Vector<sp<AudioProfile> >
 {
 public:
-    ssize_t add(const sp<AudioProfile> &profile)
-    {
-        ssize_t index = Vector::add(profile);
-        // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
-        // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
-        // [](const audio_format_t *format1, const audio_format_t *format2) {
-        //     return compareFormats(*format1, *format2);
-        // }
-        sort(compareFormats);
-        return index;
-    }
-
+    ssize_t add(const sp<AudioProfile> &profile);
     // This API is intended to be used by the policy manager once retrieving capabilities
     // for a profile with dynamic format, rate and channels attributes
-    ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd)
-    {
-        // Check valid profile to add:
-        if (!profileToAdd->hasValidFormat()) {
-            return -1;
-        }
-        if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
-            FormatVector formats;
-            formats.add(profileToAdd->getFormat());
-            setFormats(FormatVector(formats));
-            return 0;
-        }
-        if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
-            setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
-            return 0;
-        }
-        if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
-            setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
-            return 0;
-        }
-        // Go through the list of profile to avoid duplicates
-        for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
-            const sp<AudioProfile> &profile = itemAt(profileIndex);
-            if (profile->isValid() && profile == profileToAdd) {
-                // Nothing to do
-                return profileIndex;
-            }
-        }
-        profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
-        return add(profileToAdd);
-    }
-
-    sp<AudioProfile> getFirstValidProfile() const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->isValid()) {
-                return itemAt(i);
-            }
-        }
-        return 0;
-    }
-
-    bool hasValidProfile() const { return getFirstValidProfile() != 0; }
+    ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd);
 
     status_t checkExactProfile(uint32_t samplingRate, audio_channel_mask_t channelMask,
                                audio_format_t format) const;
-
     status_t checkCompatibleProfile(uint32_t &samplingRate, audio_channel_mask_t &channelMask,
                                     audio_format_t &format,
                                     audio_port_type_t portType,
                                     audio_port_role_t portRole) const;
+    void clearProfiles();
+    // Assuming that this profile vector contains input profiles,
+    // find the best matching config from 'outputProfiles', according to
+    // the given preferences for audio formats and channel masks.
+    // Note: std::vectors are used because specialized containers for formats
+    //       and channels can be sorted and use their own ordering.
+    status_t findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
+            const std::vector<audio_format_t>& preferredFormats, // order: most pref -> least pref
+            const std::vector<audio_channel_mask_t>& preferredOutputChannels,
+            bool preferHigherSamplingRates,
+            audio_config_base *bestOutputConfig) const;
 
-    FormatVector getSupportedFormats() const
-    {
-        FormatVector supportedFormats;
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->hasValidFormat()) {
-                supportedFormats.add(itemAt(i)->getFormat());
-            }
-        }
-        return supportedFormats;
-    }
+    sp<AudioProfile> getFirstValidProfile() const;
+    sp<AudioProfile> getFirstValidProfileFor(audio_format_t format) const;
+    bool hasValidProfile() const { return getFirstValidProfile() != 0; }
 
-    bool hasDynamicProfile() const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->isDynamic()) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    bool hasDynamicFormat() const
-    {
-        return getProfileFor(gDynamicFormat) != 0;
-    }
-
-    bool hasDynamicChannelsFor(audio_format_t format) const
-    {
-       for (size_t i = 0; i < size(); i++) {
-           sp<AudioProfile> profile = itemAt(i);
-           if (profile->getFormat() == format && profile->isDynamicChannels()) {
-               return true;
-           }
-       }
-       return false;
-    }
-
-    bool hasDynamicRateFor(audio_format_t format) const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            sp<AudioProfile> profile = itemAt(i);
-            if (profile->getFormat() == format && profile->isDynamicRate()) {
-                return true;
-            }
-        }
-        return false;
-    }
+    FormatVector getSupportedFormats() const;
+    bool hasDynamicChannelsFor(audio_format_t format) const;
+    bool hasDynamicFormat() const { return getProfileFor(gDynamicFormat) != 0; }
+    bool hasDynamicProfile() const;
+    bool hasDynamicRateFor(audio_format_t format) const;
 
     // One audio profile will be added for each format supported by Audio HAL
-    void setFormats(const FormatVector &formats)
-    {
-        // Only allow to change the format of dynamic profile
-        sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
-        if (dynamicFormatProfile == 0) {
-            return;
-        }
-        for (size_t i = 0; i < formats.size(); i++) {
-            sp<AudioProfile> profile = new AudioProfile(formats[i],
-                                                        dynamicFormatProfile->getChannels(),
-                                                        dynamicFormatProfile->getSampleRates());
-            profile->setDynamicFormat(true);
-            profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
-            profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
-            add(profile);
-        }
-    }
+    void setFormats(const FormatVector &formats);
 
-    void clearProfiles()
-    {
-        for (size_t i = size(); i != 0; ) {
-            sp<AudioProfile> profile = itemAt(--i);
-            if (profile->isDynamicFormat() && profile->hasValidFormat()) {
-                removeAt(i);
-                continue;
-            }
-            profile->clear();
-        }
-    }
-
-    void dump(int fd, int spaces) const
-    {
-        const size_t SIZE = 256;
-        char buffer[SIZE];
-
-        snprintf(buffer, SIZE, "%*s- Profiles:\n", spaces, "");
-        write(fd, buffer, strlen(buffer));
-        for (size_t i = 0; i < size(); i++) {
-            snprintf(buffer, SIZE, "%*sProfile %zu:", spaces + 4, "", i);
-            write(fd, buffer, strlen(buffer));
-            itemAt(i)->dump(fd, spaces + 8);
-        }
-    }
+    void dump(int fd, int spaces) const;
 
 private:
-    void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format)
-    {
-        for (size_t i = 0; i < size(); i++) {
-            sp<AudioProfile> profile = itemAt(i);
-            if (profile->getFormat() == format && profile->isDynamicRate()) {
-                if (profile->hasValidRates()) {
-                    // Need to create a new profile with same format
-                    sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
-                                                                     sampleRates);
-                    profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
-                    add(profileToAdd);
-                } else {
-                    profile->setSampleRates(sampleRates);
-                }
-                return;
-            }
-        }
-    }
-
-    void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
-    {
-        for (size_t i = 0; i < size(); i++) {
-            sp<AudioProfile> profile = itemAt(i);
-            if (profile->getFormat() == format && profile->isDynamicChannels()) {
-                if (profile->hasValidChannels()) {
-                    // Need to create a new profile with same format
-                    sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
-                                                                     profile->getSampleRates());
-                    profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
-                    add(profileToAdd);
-                } else {
-                    profile->setChannels(channelMasks);
-                }
-                return;
-            }
-        }
-    }
-
-    sp<AudioProfile> getProfileFor(audio_format_t format) const
-    {
-        for (size_t i = 0; i < size(); i++) {
-            if (itemAt(i)->getFormat() == format) {
-                return itemAt(i);
-            }
-        }
-        return 0;
-    }
+    sp<AudioProfile> getProfileFor(audio_format_t format) const;
+    void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format);
+    void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format);
 
     static int compareFormats(const sp<AudioProfile> *profile1, const sp<AudioProfile> *profile2);
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index 53e6ec9..1636d3a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -29,7 +29,7 @@
 
 class AudioPolicyClientInterface;
 
-class AudioSession : public RefBase, public AudioIODescriptorUpdateListener
+class AudioSession : public RefBase
 {
 public:
     AudioSession(audio_session_t session,
@@ -39,9 +39,7 @@
                  audio_channel_mask_t channelMask,
                  audio_input_flags_t flags,
                  uid_t uid,
-                 bool isSoundTrigger,
-                 AudioMix* policyMix,
-                 AudioPolicyClientInterface *clientInterface);
+                 bool isSoundTrigger);
 
     status_t dump(int fd, int spaces, int index) const;
 
@@ -50,6 +48,8 @@
     audio_format_t format() const { return mConfig.format; }
     uint32_t sampleRate() const { return mConfig.sample_rate; }
     audio_channel_mask_t channelMask() const { return mConfig.channel_mask; }
+    audio_config_base config() const { return mConfig; }
+    record_client_info_t recordClientInfo() const { return mRecordClientInfo; }
     audio_input_flags_t flags() const { return mFlags; }
     uid_t uid() const { return mRecordClientInfo.uid; }
     void setUid(uid_t uid) { mRecordClientInfo.uid = uid; }
@@ -63,10 +63,6 @@
     uint32_t changeOpenCount(int delta);
     uint32_t changeActiveCount(int delta);
 
-    void setInfoProvider(AudioIODescriptorInterface *provider);
-    // implementation of AudioIODescriptorUpdateListener
-    virtual void onIODescriptorUpdate() const;
-
 private:
     record_client_info_t mRecordClientInfo;
     const struct audio_config_base mConfig;
@@ -75,19 +71,14 @@
     bool mSilenced;
     uint32_t  mOpenCount;
     uint32_t  mActiveCount;
-    AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
-    AudioPolicyClientInterface* mClientInterface;
-    const AudioIODescriptorInterface* mInfoProvider;
 };
 
 class AudioSessionCollection :
-    public DefaultKeyedVector<audio_session_t, sp<AudioSession> >,
-    public AudioIODescriptorUpdateListener
+    public DefaultKeyedVector<audio_session_t, sp<AudioSession> >
 {
 public:
     status_t addSession(audio_session_t session,
-                             const sp<AudioSession>& audioSession,
-                             AudioIODescriptorInterface *provider);
+                             const sp<AudioSession>& audioSession);
 
     status_t removeSession(audio_session_t session);
 
@@ -99,9 +90,6 @@
     bool isSourceActive(audio_source_t source) const;
     audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
-    // implementation of AudioIODescriptorUpdateListener
-    virtual void onIODescriptorUpdate() const;
-
     status_t dump(int fd, int spaces) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
deleted file mode 100644
index 0d90f42..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <system/audio.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/RefBase.h>
-#include <RoutingStrategy.h>
-#include <AudioPatch.h>
-
-namespace android {
-
-class SwAudioOutputDescriptor;
-class HwAudioOutputDescriptor;
-class DeviceDescriptor;
-
-class AudioSourceDescriptor: public RefBase
-{
-public:
-    AudioSourceDescriptor(const sp<DeviceDescriptor> device, const audio_attributes_t *attributes,
-                          uid_t uid) :
-        mDevice(device), mAttributes(*attributes), mUid(uid) {}
-    virtual ~AudioSourceDescriptor() {}
-
-    audio_patch_handle_t getHandle() const { return mPatchDesc->mHandle; }
-
-    status_t    dump(int fd);
-
-    const sp<DeviceDescriptor> mDevice;
-    const audio_attributes_t mAttributes;
-    uid_t mUid;
-    sp<AudioPatch> mPatchDesc;
-    wp<SwAudioOutputDescriptor> mSwOutput;
-    wp<HwAudioOutputDescriptor> mHwOutput;
-};
-
-class AudioSourceCollection :
-        public DefaultKeyedVector< audio_patch_handle_t, sp<AudioSourceDescriptor> >
-{
-public:
-    status_t dump(int fd) const;
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
new file mode 100644
index 0000000..9efe57f
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+#include <map>
+#include <unistd.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include "AudioPatch.h"
+
+namespace android {
+
+class DeviceDescriptor;
+class HwAudioOutputDescriptor;
+class SwAudioOutputDescriptor;
+
+class ClientDescriptor: public RefBase
+{
+public:
+    ClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+                   audio_attributes_t attributes, audio_config_base_t config,
+                   audio_port_handle_t preferredDeviceId) :
+        mPortId(portId), mUid(uid), mSessionId(sessionId), mAttributes(attributes),
+        mConfig(config), mPreferredDeviceId(preferredDeviceId), mActive(false) {}
+    ~ClientDescriptor() override = default;
+
+    status_t dump(int fd, int spaces, int index);
+    virtual status_t dump(String8& dst, int spaces, int index);
+
+    audio_port_handle_t portId() const { return mPortId; }
+    uid_t uid() const { return mUid; }
+    audio_session_t session() const { return mSessionId; };
+    audio_attributes_t attributes() const { return mAttributes; }
+    audio_config_base_t config() const { return mConfig; }
+    audio_port_handle_t preferredDeviceId() const { return mPreferredDeviceId; };
+    void setActive(bool active) { mActive = active; }
+    bool active() const { return mActive; }
+
+private:
+    const audio_port_handle_t mPortId;  // unique Id for this client
+    const uid_t mUid;                     // client UID
+    const audio_session_t mSessionId;       // audio session ID
+    const audio_attributes_t mAttributes; // usage...
+    const audio_config_base_t mConfig;
+    const audio_port_handle_t mPreferredDeviceId;  // selected input device port ID
+          bool mActive;
+
+protected:
+    // FIXME: use until other descriptor classes have a dump to String8 method
+    int mDumpFd;
+};
+
+class TrackClientDescriptor: public ClientDescriptor
+{
+public:
+    TrackClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+                   audio_attributes_t attributes, audio_config_base_t config,
+                   audio_port_handle_t preferredDeviceId,
+                   audio_stream_type_t stream, audio_output_flags_t flags) :
+        ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
+        mStream(stream), mFlags(flags) {}
+    ~TrackClientDescriptor() override = default;
+
+    using ClientDescriptor::dump;
+    status_t dump(String8& dst, int spaces, int index) override;
+
+    audio_output_flags_t flags() const { return mFlags; }
+    audio_stream_type_t stream() const { return mStream; }
+
+private:
+    const audio_stream_type_t mStream;
+    const audio_output_flags_t mFlags;
+};
+
+class RecordClientDescriptor: public ClientDescriptor
+{
+public:
+    RecordClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+                        audio_attributes_t attributes, audio_config_base_t config,
+                        audio_port_handle_t preferredDeviceId,
+                        audio_source_t source, audio_input_flags_t flags) :
+        ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
+        mSource(source), mFlags(flags) {}
+    ~RecordClientDescriptor() override = default;
+
+    using ClientDescriptor::dump;
+    status_t dump(String8& dst, int spaces, int index) override;
+
+    audio_source_t source() const { return mSource; }
+    audio_input_flags_t flags() const { return mFlags; }
+
+private:
+    const audio_source_t mSource;
+    const audio_input_flags_t mFlags;
+};
+
+class SourceClientDescriptor: public TrackClientDescriptor
+{
+public:
+    SourceClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
+                           const sp<AudioPatch>& patchDesc, const sp<DeviceDescriptor>& srcDevice,
+                           audio_stream_type_t stream);
+    ~SourceClientDescriptor() override = default;
+
+    sp<AudioPatch> patchDesc() const { return mPatchDesc; }
+    sp<DeviceDescriptor> srcDevice() const { return mSrcDevice; };
+    wp<SwAudioOutputDescriptor> swOutput() const { return mSwOutput; }
+    void setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput);
+    wp<HwAudioOutputDescriptor> hwOutput() const { return mHwOutput; }
+    void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
+
+    using ClientDescriptor::dump;
+    status_t dump(String8& dst, int spaces, int index) override;
+
+ private:
+    const sp<AudioPatch> mPatchDesc;
+    const sp<DeviceDescriptor> mSrcDevice;
+    wp<SwAudioOutputDescriptor> mSwOutput;
+    wp<HwAudioOutputDescriptor> mHwOutput;
+};
+
+class SourceClientCollection :
+    public DefaultKeyedVector< audio_port_handle_t, sp<SourceClientDescriptor> >
+{
+public:
+    status_t dump(int fd) const;
+};
+
+typedef std::vector< sp<TrackClientDescriptor> > TrackClientVector;
+typedef std::map< audio_port_handle_t, sp<TrackClientDescriptor> > TrackClientMap;
+typedef std::vector< sp<RecordClientDescriptor> > RecordClientVector;
+typedef std::map< audio_port_handle_t, sp<RecordClientDescriptor> > RecordClientMap;
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 2325e4f..c08e752 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -82,8 +82,8 @@
     DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
     sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
     sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
-
-    audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+    DeviceVector getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+    audio_devices_t getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const;
 
     status_t dump(int fd, const String8 &tag, int spaces = 0, bool verbose = true) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index f0144db..2770e74 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -32,7 +32,7 @@
     : mIoHandle(0),
       mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
       mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
-      mClientInterface(clientInterface)
+      mClientInterface(clientInterface), mGlobalRefCount(0)
 {
     if (profile != NULL) {
         profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -164,7 +164,7 @@
 
 status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
                          const sp<AudioSession>& audioSession) {
-    return mSessions.addSession(session, audioSession, /*AudioIODescriptorInterface*/this);
+    return mSessions.addSession(session, audioSession);
 }
 
 status_t AudioInputDescriptor::removeAudioSession(audio_session_t session) {
@@ -179,7 +179,11 @@
 void AudioInputDescriptor::setPatchHandle(audio_patch_handle_t handle)
 {
     mPatchHandle = handle;
-    mSessions.onIODescriptorUpdate();
+    for (size_t i = 0; i < mSessions.size(); i++) {
+        if (mSessions[i]->activeCount() > 0) {
+            updateSessionRecordingConfiguration(RECORD_CONFIG_EVENT_START, mSessions[i]);
+        }
+    }
 }
 
 audio_config_base_t AudioInputDescriptor::getConfig() const
@@ -266,7 +270,7 @@
         LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
                             __FUNCTION__, mProfile->curOpenCount);
         // do not call stop() here as stop() is supposed to be called after
-        // AudioSession::changeActiveCount(-1) and we don't know how many sessions
+        //  changeRefCount(session, -1) and we don't know how many sessions
         // are still active at this time
         if (isActive()) {
             mProfile->curActiveCount--;
@@ -276,6 +280,78 @@
     }
 }
 
+void AudioInputDescriptor::changeRefCount(audio_session_t session, int delta)
+{
+    sp<AudioSession> audioSession = mSessions.valueFor(session);
+    if (audioSession == 0) {
+        return;
+    }
+    // handle session-independent ref count
+    uint32_t oldGlobalRefCount = mGlobalRefCount;
+    if ((delta + (int)mGlobalRefCount) < 0) {
+        ALOGW("changeRefCount() invalid delta %d globalRefCount %d", delta, mGlobalRefCount);
+        delta = -((int)mGlobalRefCount);
+    }
+    mGlobalRefCount += delta;
+    if ((oldGlobalRefCount == 0) && (mGlobalRefCount > 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+                                                            MIX_STATE_MIXING);
+        }
+
+    } else if ((oldGlobalRefCount > 0) && (mGlobalRefCount == 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+                                                            MIX_STATE_IDLE);
+        }
+    }
+
+    uint32_t oldActiveCount = audioSession->activeCount();
+    if ((delta + (int)oldActiveCount) < 0) {
+        ALOGW("changeRefCount() invalid delta %d for sesion %d active count %d",
+              delta, session, oldActiveCount);
+        delta = -((int)oldActiveCount);
+    }
+
+    audioSession->changeActiveCount(delta);
+
+    int event = RECORD_CONFIG_EVENT_NONE;
+    if ((oldActiveCount == 0) && (audioSession->activeCount() > 0)) {
+        event = RECORD_CONFIG_EVENT_START;
+    } else if ((oldActiveCount > 0) && (audioSession->activeCount() == 0)) {
+        event = RECORD_CONFIG_EVENT_STOP;
+    }
+    if (event != RECORD_CONFIG_EVENT_NONE) {
+        updateSessionRecordingConfiguration(event, audioSession);
+    }
+
+}
+
+void AudioInputDescriptor::updateSessionRecordingConfiguration(
+    int event, const sp<AudioSession>& audioSession) {
+
+    const audio_config_base_t sessionConfig = audioSession->config();
+    const record_client_info_t recordClientInfo = audioSession->recordClientInfo();
+    const audio_config_base_t config = getConfig();
+    mClientInterface->onRecordingConfigurationUpdate(event,
+                                                     &recordClientInfo, &sessionConfig,
+                                                     &config, mPatchHandle);
+}
+
+RecordClientVector AudioInputDescriptor::getClientsForSession(
+    audio_session_t session)
+{
+    RecordClientVector clients;
+    for (const auto &client : mClients) {
+        if (client.second->session() == session) {
+            clients.push_back(client.second);
+        }
+    }
+    return clients;
+}
+
 status_t AudioInputDescriptor::dump(int fd)
 {
     const size_t SIZE = 256;
@@ -297,6 +373,13 @@
 
     mSessions.dump(fd, 1);
 
+    size_t index = 0;
+    result = " AudioRecord clients:\n";
+    for (const auto& client: mClients) {
+        client.second->dump(result, 2, index++);
+    }
+    result.append(" \n");
+    write(fd, result.string(), result.size());
     return NO_ERROR;
 }
 
@@ -359,6 +442,19 @@
     return devices;
 }
 
+sp<AudioInputDescriptor> AudioInputCollection::getInputForClient(audio_port_handle_t portId)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioInputDescriptor> inputDesc = valueAt(i);
+        for (const auto& client : inputDesc->clients()) {
+            if (client.second->portId() == portId) {
+                return inputDesc;
+            }
+        }
+    }
+    return 0;
+}
+
 status_t AudioInputCollection::dump(int fd) const
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 3c69de5..39fce4d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -224,6 +224,13 @@
                  i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
         result.append(buffer);
     }
+
+    result.append(" AudioTrack clients:\n");
+    size_t index = 0;
+    for (const auto& client : mClients) {
+        client.second->dump(result, 2, index++);
+    }
+    result.append(" \n");
     write(fd, result.string(), result.size());
 
     return NO_ERROR;
@@ -551,9 +558,9 @@
 }
 
 // HwAudioOutputDescriptor implementation
-HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
-    : AudioOutputDescriptor(source->mDevice, clientInterface),
+    : AudioOutputDescriptor(source->srcDevice(), clientInterface),
       mSource(source)
 {
 }
@@ -569,7 +576,7 @@
     snprintf(buffer, SIZE, "Source:\n");
     result.append(buffer);
     write(fd, result.string(), result.size());
-    mSource->dump(fd);
+    mSource->dump(fd, 0, 0);
 
     return NO_ERROR;
 }
@@ -583,13 +590,13 @@
                                                  struct audio_port_config *dstConfig,
                                                  const struct audio_port_config *srcConfig) const
 {
-    mSource->mDevice->toAudioPortConfig(dstConfig, srcConfig);
+    mSource->srcDevice()->toAudioPortConfig(dstConfig, srcConfig);
 }
 
 void HwAudioOutputDescriptor::toAudioPort(
                                                     struct audio_port *port) const
 {
-    mSource->mDevice->toAudioPort(port);
+    mSource->srcDevice()->toAudioPort(port);
 }
 
 
@@ -731,6 +738,18 @@
     return devices;
 }
 
+sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputForClient(audio_port_handle_t portId)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
+        for (const auto& client : outputDesc->clients()) {
+            if (client.second->portId() == portId) {
+                return outputDesc;
+            }
+        }
+    }
+    return 0;
+}
 
 status_t SwAudioOutputCollection::dump(int fd) const
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 26af9b4..d04beec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -14,20 +14,53 @@
  * limitations under the License.
  */
 
+#include <algorithm>
+#include <set>
+#include <string>
+
 #define LOG_TAG "APM::AudioProfile"
 //#define LOG_NDEBUG 0
 
-#include "AudioProfile.h"
-#include "AudioPort.h"
-#include "HwModule.h"
-#include "AudioGain.h"
-#include <utils/SortedVector.h>
-#include "TypeConverter.h"
 #include <media/AudioResamplerPublic.h>
-#include <algorithm>
+#include <utils/Errors.h>
+
+#include "AudioGain.h"
+#include "AudioPort.h"
+#include "AudioProfile.h"
+#include "HwModule.h"
+#include "TypeConverter.h"
 
 namespace android {
 
+ChannelsVector ChannelsVector::asInMask() const
+{
+    ChannelsVector inMaskVector;
+    for (const auto& channel : *this) {
+        if (audio_channel_mask_out_to_in(channel) != AUDIO_CHANNEL_INVALID) {
+            inMaskVector.add(audio_channel_mask_out_to_in(channel));
+        }
+    }
+    return inMaskVector;
+}
+
+ChannelsVector ChannelsVector::asOutMask() const
+{
+    ChannelsVector outMaskVector;
+    for (const auto& channel : *this) {
+        if (audio_channel_mask_in_to_out(channel) != AUDIO_CHANNEL_INVALID) {
+            outMaskVector.add(audio_channel_mask_in_to_out(channel));
+        }
+    }
+    return outMaskVector;
+}
+
+bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
+{
+    return (left.getFormat() == compareTo.getFormat()) &&
+            (left.getChannels() == compareTo.getChannels()) &&
+            (left.getSampleRates() == compareTo.getSampleRates());
+}
+
 static AudioProfile* createFullDynamicImpl()
 {
     AudioProfile* dynamicProfile = new AudioProfile(gDynamicFormat,
@@ -45,6 +78,48 @@
     return dynamicProfile;
 }
 
+AudioProfile::AudioProfile(audio_format_t format,
+                           audio_channel_mask_t channelMasks,
+                           uint32_t samplingRate) :
+        mName(String8("")),
+        mFormat(format)
+{
+    mChannelMasks.add(channelMasks);
+    mSamplingRates.add(samplingRate);
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+                           const ChannelsVector &channelMasks,
+                           const SampleRateVector &samplingRateCollection) :
+        mName(String8("")),
+        mFormat(format),
+        mChannelMasks(channelMasks),
+        mSamplingRates(samplingRateCollection) {}
+
+void AudioProfile::setChannels(const ChannelsVector &channelMasks)
+{
+    if (mIsDynamicChannels) {
+        mChannelMasks = channelMasks;
+    }
+}
+
+void AudioProfile::setSampleRates(const SampleRateVector &sampleRates)
+{
+    if (mIsDynamicRate) {
+        mSamplingRates = sampleRates;
+    }
+}
+
+void AudioProfile::clear()
+{
+    if (mIsDynamicChannels) {
+        mChannelMasks.clear();
+    }
+    if (mIsDynamicRate) {
+        mSamplingRates.clear();
+    }
+}
+
 status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
                                   audio_format_t format) const
 {
@@ -56,27 +131,6 @@
     return BAD_VALUE;
 }
 
-template <typename T>
-bool operator == (const SortedVector<T> &left, const SortedVector<T> &right)
-{
-    if (left.size() != right.size()) {
-        return false;
-    }
-    for(size_t index = 0; index < right.size(); index++) {
-        if (left[index] != right[index]) {
-            return false;
-        }
-    }
-    return true;
-}
-
-bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
-{
-    return (left.getFormat() == compareTo.getFormat()) &&
-            (left.getChannels() == compareTo.getChannels()) &&
-            (left.getSampleRates() == compareTo.getSampleRates());
-}
-
 status_t AudioProfile::checkCompatibleSamplingRate(uint32_t samplingRate,
                                                    uint32_t &updatedSamplingRate) const
 {
@@ -242,6 +296,50 @@
     write(fd, result.string(), result.size());
 }
 
+ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
+{
+    ssize_t index = Vector::add(profile);
+    // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
+    // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
+    // [](const audio_format_t *format1, const audio_format_t *format2) {
+    //     return compareFormats(*format1, *format2);
+    // }
+    sort(compareFormats);
+    return index;
+}
+
+ssize_t AudioProfileVector::addProfileFromHal(const sp<AudioProfile> &profileToAdd)
+{
+    // Check valid profile to add:
+    if (!profileToAdd->hasValidFormat()) {
+        return -1;
+    }
+    if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+        FormatVector formats;
+        formats.add(profileToAdd->getFormat());
+        setFormats(FormatVector(formats));
+        return 0;
+    }
+    if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
+        setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
+        return 0;
+    }
+    if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+        setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
+        return 0;
+    }
+    // Go through the list of profile to avoid duplicates
+    for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
+        const sp<AudioProfile> &profile = itemAt(profileIndex);
+        if (profile->isValid() && profile == profileToAdd) {
+            // Nothing to do
+            return profileIndex;
+        }
+    }
+    profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
+    return add(profileToAdd);
+}
+
 status_t AudioProfileVector::checkExactProfile(uint32_t samplingRate,
                                                audio_channel_mask_t channelMask,
                                                audio_format_t format) const
@@ -298,6 +396,233 @@
     return BAD_VALUE;
 }
 
+void AudioProfileVector::clearProfiles()
+{
+    for (size_t i = size(); i != 0; ) {
+        sp<AudioProfile> profile = itemAt(--i);
+        if (profile->isDynamicFormat() && profile->hasValidFormat()) {
+            removeAt(i);
+            continue;
+        }
+        profile->clear();
+    }
+}
+
+// Returns an intersection between two possibly unsorted vectors and the contents of 'order'.
+// The result is ordered according to 'order'.
+template<typename T, typename Order>
+std::vector<typename T::value_type> intersectFilterAndOrder(
+        const T& input1, const T& input2, const Order& order)
+{
+    std::set<typename T::value_type> set1{input1.begin(), input1.end()};
+    std::set<typename T::value_type> set2{input2.begin(), input2.end()};
+    std::set<typename T::value_type> common;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+            std::inserter(common, common.begin()));
+    std::vector<typename T::value_type> result;
+    for (const auto& e : order) {
+        if (common.find(e) != common.end()) result.push_back(e);
+    }
+    return result;
+}
+
+// Intersect two possibly unsorted vectors, return common elements according to 'comp' ordering.
+// 'comp' is a comparator function.
+template<typename T, typename Compare>
+std::vector<typename T::value_type> intersectAndOrder(
+        const T& input1, const T& input2, Compare comp)
+{
+    std::set<typename T::value_type, Compare> set1{input1.begin(), input1.end(), comp};
+    std::set<typename T::value_type, Compare> set2{input2.begin(), input2.end(), comp};
+    std::vector<typename T::value_type> result;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+            std::back_inserter(result), comp);
+    return result;
+}
+
+status_t AudioProfileVector::findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
+            const std::vector<audio_format_t>& preferredFormats,
+            const std::vector<audio_channel_mask_t>& preferredOutputChannels,
+            bool preferHigherSamplingRates,
+            audio_config_base *bestOutputConfig) const
+{
+    auto formats = intersectFilterAndOrder(getSupportedFormats(),
+            outputProfiles.getSupportedFormats(), preferredFormats);
+    // Pick the best compatible profile.
+    for (const auto& f : formats) {
+        sp<AudioProfile> inputProfile = getFirstValidProfileFor(f);
+        sp<AudioProfile> outputProfile = outputProfiles.getFirstValidProfileFor(f);
+        if (inputProfile == nullptr || outputProfile == nullptr) {
+            continue;
+        }
+        auto channels = intersectFilterAndOrder(inputProfile->getChannels().asOutMask(),
+                outputProfile->getChannels(), preferredOutputChannels);
+        if (channels.empty()) {
+            continue;
+        }
+        auto sampleRates = preferHigherSamplingRates ?
+                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+                        std::greater<typename SampleRateVector::value_type>()) :
+                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+                        std::less<typename SampleRateVector::value_type>());
+        if (sampleRates.empty()) {
+            continue;
+        }
+        ALOGD("%s() found channel mask %#x and sample rate %d for format %#x.",
+                __func__, *channels.begin(), *sampleRates.begin(), f);
+        bestOutputConfig->format = f;
+        bestOutputConfig->sample_rate = *sampleRates.begin();
+        bestOutputConfig->channel_mask = *channels.begin();
+        return NO_ERROR;
+    }
+    return BAD_VALUE;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfile() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->isValid()) {
+            return itemAt(i);
+        }
+    }
+    return 0;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfileFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->isValid() && itemAt(i)->getFormat() == format) {
+            return itemAt(i);
+        }
+    }
+    return 0;
+}
+
+FormatVector AudioProfileVector::getSupportedFormats() const
+{
+    FormatVector supportedFormats;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->hasValidFormat()) {
+            supportedFormats.add(itemAt(i)->getFormat());
+        }
+    }
+    return supportedFormats;
+}
+
+bool AudioProfileVector::hasDynamicChannelsFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicChannels()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioProfileVector::hasDynamicProfile() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->isDynamic()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioProfileVector::hasDynamicRateFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicRate()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void AudioProfileVector::setFormats(const FormatVector &formats)
+{
+    // Only allow to change the format of dynamic profile
+    sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
+    if (dynamicFormatProfile == 0) {
+        return;
+    }
+    for (size_t i = 0; i < formats.size(); i++) {
+        sp<AudioProfile> profile = new AudioProfile(formats[i],
+                dynamicFormatProfile->getChannels(),
+                dynamicFormatProfile->getSampleRates());
+        profile->setDynamicFormat(true);
+        profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
+        profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
+        add(profile);
+    }
+}
+
+void AudioProfileVector::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "%*s- Profiles:\n", spaces, "");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "%*sProfile %zu:", spaces + 4, "", i);
+        write(fd, buffer, strlen(buffer));
+        itemAt(i)->dump(fd, spaces + 8);
+    }
+}
+
+sp<AudioProfile> AudioProfileVector::getProfileFor(audio_format_t format) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->getFormat() == format) {
+            return itemAt(i);
+        }
+    }
+    return 0;
+}
+
+void AudioProfileVector::setSampleRatesFor(
+        const SampleRateVector &sampleRates, audio_format_t format)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicRate()) {
+            if (profile->hasValidRates()) {
+                // Need to create a new profile with same format
+                sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
+                        sampleRates);
+                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                add(profileToAdd);
+            } else {
+                profile->setSampleRates(sampleRates);
+            }
+            return;
+        }
+    }
+}
+
+void AudioProfileVector::setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioProfile> profile = itemAt(i);
+        if (profile->getFormat() == format && profile->isDynamicChannels()) {
+            if (profile->hasValidChannels()) {
+                // Need to create a new profile with same format
+                sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
+                        profile->getSampleRates());
+                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                add(profileToAdd);
+            } else {
+                profile->setChannels(channelMasks);
+            }
+            return;
+        }
+    }
+}
+
+// static
 int AudioProfileVector::compareFormats(const sp<AudioProfile> *profile1,
                                        const sp<AudioProfile> *profile2)
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index 91dee35..5ea4c92 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -35,14 +35,11 @@
                            audio_channel_mask_t channelMask,
                            audio_input_flags_t flags,
                            uid_t uid,
-                           bool isSoundTrigger,
-                           AudioMix* policyMix,
-                           AudioPolicyClientInterface *clientInterface) :
+                           bool isSoundTrigger) :
     mRecordClientInfo({ .uid = uid, .session = session, .source = inputSource}),
     mConfig({ .format = format, .sample_rate = sampleRate, .channel_mask = channelMask}),
     mFlags(flags), mIsSoundTrigger(isSoundTrigger),
-    mOpenCount(1), mActiveCount(0), mPolicyMix(policyMix), mClientInterface(clientInterface),
-    mInfoProvider(NULL)
+    mOpenCount(1), mActiveCount(0)
 {
 }
 
@@ -60,7 +57,6 @@
 
 uint32_t AudioSession::changeActiveCount(int delta)
 {
-    const uint32_t oldActiveCount = mActiveCount;
     if ((delta + (int)mActiveCount) < 0) {
         ALOGW("%s invalid delta %d, active count %d",
               __FUNCTION__, delta, mActiveCount);
@@ -68,34 +64,6 @@
     }
     mActiveCount += delta;
     ALOGV("%s active count %d", __FUNCTION__, mActiveCount);
-    int event = RECORD_CONFIG_EVENT_NONE;
-
-    if ((oldActiveCount == 0) && (mActiveCount > 0)) {
-        event = RECORD_CONFIG_EVENT_START;
-    } else if ((oldActiveCount > 0) && (mActiveCount == 0)) {
-        event = RECORD_CONFIG_EVENT_STOP;
-    }
-
-    if (event != RECORD_CONFIG_EVENT_NONE) {
-        // Dynamic policy callback:
-        // if input maps to a dynamic policy with an activity listener, notify of state change
-        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
-        {
-            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
-                    (event == RECORD_CONFIG_EVENT_START) ? MIX_STATE_MIXING : MIX_STATE_IDLE);
-        }
-
-        // Recording configuration callback:
-        const AudioIODescriptorInterface* provider = mInfoProvider;
-        const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
-                AUDIO_CONFIG_BASE_INITIALIZER;
-        const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
-                AUDIO_PATCH_HANDLE_NONE;
-        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-            mClientInterface->onRecordingConfigurationUpdate(event, &mRecordClientInfo,
-                    &mConfig, &deviceConfig, patchHandle);
-        }
-    }
 
     return mActiveCount;
 }
@@ -114,27 +82,6 @@
     return false;
 }
 
-void AudioSession::setInfoProvider(AudioIODescriptorInterface *provider)
-{
-    mInfoProvider = provider;
-}
-
-void AudioSession::onIODescriptorUpdate() const
-{
-    if (mActiveCount > 0) {
-        // resend the callback after requerying the informations from the info provider
-        const AudioIODescriptorInterface* provider = mInfoProvider;
-        const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
-                AUDIO_CONFIG_BASE_INITIALIZER;
-        const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
-                AUDIO_PATCH_HANDLE_NONE;
-        if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-            mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
-                    &mRecordClientInfo, &mConfig, &deviceConfig, patchHandle);
-        }
-    }
-}
-
 status_t AudioSession::dump(int fd, int spaces, int index) const
 {
     const size_t SIZE = 256;
@@ -169,8 +116,7 @@
 }
 
 status_t AudioSessionCollection::addSession(audio_session_t session,
-                                         const sp<AudioSession>& audioSession,
-                                         AudioIODescriptorInterface *provider)
+                                         const sp<AudioSession>& audioSession)
 {
     ssize_t index = indexOfKey(session);
 
@@ -178,7 +124,6 @@
         ALOGW("addSession() session %d already in", session);
         return ALREADY_EXISTS;
     }
-    audioSession->setInfoProvider(provider);
     add(session, audioSession);
     ALOGV("addSession() session %d  client %d source %d",
             session, audioSession->uid(), audioSession->inputSource());
@@ -194,7 +139,6 @@
         return ALREADY_EXISTS;
     }
     ALOGV("removeSession() session %d", session);
-    valueAt(index)->setInfoProvider(NULL);
     removeItemsAt(index);
     return NO_ERROR;
 }
@@ -271,13 +215,6 @@
     return source;
 }
 
-void AudioSessionCollection::onIODescriptorUpdate() const
-{
-    for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->onIODescriptorUpdate();
-    }
-}
-
 status_t AudioSessionCollection::dump(int fd, int spaces) const
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
deleted file mode 100644
index ba33e57..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioSourceDescriptor"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/String8.h>
-#include <media/AudioPolicyHelper.h>
-#include <HwModule.h>
-#include <AudioGain.h>
-#include <AudioSourceDescriptor.h>
-#include <DeviceDescriptor.h>
-#include <IOProfile.h>
-#include <AudioOutputDescriptor.h>
-
-namespace android {
-
-status_t AudioSourceDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "mStream: %d\n", audio_attributes_to_stream_type(&mAttributes));
-    result.append(buffer);
-    snprintf(buffer, SIZE, "mDevice:\n");
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-    mDevice->dump(fd, 2 , 0);
-    return NO_ERROR;
-}
-
-
-status_t AudioSourceCollection::dump(int fd) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-
-    snprintf(buffer, SIZE, "\nAudio sources dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < size(); i++) {
-        snprintf(buffer, SIZE, "- Source %d dump:\n", keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        valueAt(i)->dump(fd);
-    }
-
-    return NO_ERROR;
-}
-
-}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
new file mode 100644
index 0000000..5aca3cc
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM_ClientDescriptor"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include "AudioGain.h"
+#include "AudioOutputDescriptor.h"
+#include "AudioPatch.h"
+#include "ClientDescriptor.h"
+#include "DeviceDescriptor.h"
+#include "HwModule.h"
+#include "IOProfile.h"
+
+namespace android {
+
+status_t ClientDescriptor::dump(int fd, int spaces, int index)
+{
+    String8 out;
+
+    // FIXME: use until other descriptor classes have a dump to String8 method
+    mDumpFd = fd;
+
+    status_t status = dump(out, spaces, index);
+    if (status == NO_ERROR) {
+        write(fd, out.string(), out.size());
+    }
+
+    return status;
+}
+
+status_t ClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    out.appendFormat("%*sClient %d:\n", spaces, "", index+1);
+    out.appendFormat("%*s- Port ID: %d Session Id: %d UID: %d\n", spaces, "",
+             mPortId, mSessionId, mUid);
+    out.appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
+             mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
+    out.appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
+    out.appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+    return NO_ERROR;
+}
+
+status_t TrackClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    ClientDescriptor::dump(out, spaces, index);
+
+    out.appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
+
+    return NO_ERROR;
+}
+
+status_t RecordClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    ClientDescriptor::dump(out, spaces, index);
+
+    out.appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+
+    return NO_ERROR;
+}
+
+SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
+         audio_attributes_t attributes, const sp<AudioPatch>& patchDesc,
+         const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream) :
+    TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
+        AUDIO_CONFIG_BASE_INITIALIZER, AUDIO_PORT_HANDLE_NONE, stream, AUDIO_OUTPUT_FLAG_NONE),
+        mPatchDesc(patchDesc), mSrcDevice(srcDevice)
+{
+}
+
+void SourceClientDescriptor::setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput)
+{
+    mSwOutput = swOutput;
+}
+
+void SourceClientDescriptor::setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput)
+{
+    mHwOutput = hwOutput;
+}
+
+status_t SourceClientDescriptor::dump(String8& out, int spaces, int index)
+{
+    TrackClientDescriptor::dump(out, spaces, index);
+
+    if (mDumpFd >= 0) {
+        out.appendFormat("%*s- Device:\n", spaces, "");
+        write(mDumpFd, out.string(), out.size());
+
+        mSrcDevice->dump(mDumpFd, 2, 0);
+        mDumpFd = -1;
+    }
+
+    return NO_ERROR;
+}
+
+status_t SourceClientCollection::dump(int fd) const
+{
+    String8 out;
+    out.append("\nAudio sources:\n");
+    write(fd, out.string(), out.size());
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->dump(fd, 2, i);
+    }
+
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index d3cc8b9..1638645 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -121,17 +121,28 @@
     return ret;
 }
 
-audio_devices_t DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
+DeviceVector DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
 {
-    audio_devices_t devices = AUDIO_DEVICE_NONE;
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->getModuleHandle() == moduleHandle) {
-            devices |= itemAt(i)->type();
+    DeviceVector devices;
+    for (const auto& device : *this) {
+        if (device->getModuleHandle() == moduleHandle) {
+            devices.add(device);
         }
     }
     return devices;
 }
 
+audio_devices_t DeviceVector::getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const
+{
+    audio_devices_t deviceTypes = AUDIO_DEVICE_NONE;
+    for (const auto& device : *this) {
+        if (device->getModuleHandle() == moduleHandle) {
+            deviceTypes |= device->type();
+        }
+    }
+    return deviceTypes;
+}
+
 sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address) const
 {
     sp<DeviceDescriptor> device;
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
index 38ab560..440a4e7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "APM::SessionRoute"
+#define LOG_TAG "APM_SessionRoute"
 //#define LOG_NDEBUG 0
 
 #include "SessionRoute.h"
@@ -122,19 +122,17 @@
 audio_devices_t SessionRouteMap::getActiveDeviceForStream(audio_stream_type_t streamType,
                                                           const DeviceVector& availableDevices)
 {
-    audio_devices_t device = AUDIO_DEVICE_NONE;
-
     for (size_t index = 0; index < size(); index++) {
         sp<SessionRoute> route = valueAt(index);
         if (streamType == route->mStreamType && route->isActiveOrChanged()
                 && route->mDeviceDescriptor != 0) {
-            device = route->mDeviceDescriptor->type();
+            audio_devices_t device = route->mDeviceDescriptor->type();
             if (!availableDevices.getDevicesFromTypeMask(device).isEmpty()) {
-                break;
+                return device;
             }
         }
     }
-    return device;
+    return AUDIO_DEVICE_NONE;
 }
 
 } // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 007eea0..30f275f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -313,7 +313,7 @@
             audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
             sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
             audio_devices_t availPrimaryInputDevices =
-                 availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
+                 availableInputDevices.getDeviceTypesFromHwModule(primaryOutput->getModuleHandle());
 
             // TODO: getPrimaryOutput return only devices from first module in
             // audio_policy_configuration.xml, hearing aid is not there, but it's
@@ -408,8 +408,7 @@
 
     case STRATEGY_SONIFICATION:
 
-        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
-        // handleIncallSonification().
+        // If incall, just select the STRATEGY_PHONE device
         if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
             device = getDeviceForStrategyInt(
                     STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
@@ -669,9 +668,8 @@
         if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
                 (availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
             sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
-            availableDeviceTypes =
-                    availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle())
-                    & ~AUDIO_DEVICE_BIT_IN;
+            availableDeviceTypes = availableInputDevices.getDeviceTypesFromHwModule(
+                    primaryOutput->getModuleHandle()) & ~AUDIO_DEVICE_BIT_IN;
         }
 
         switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
@@ -726,6 +724,9 @@
             device = AUDIO_DEVICE_IN_BACK_MIC;
         } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
             device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+            // This is specifically for a device without built-in mic
+            device = AUDIO_DEVICE_IN_USB_DEVICE;
         }
         break;
     case AUDIO_SOURCE_VOICE_DOWNLINK:
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 7154cb2..d1515e2 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -31,6 +31,7 @@
 
 #include <inttypes.h>
 #include <math.h>
+#include <vector>
 
 #include <AudioPolicyManagerInterface.h>
 #include <AudioPolicyEngineInstance.h>
@@ -38,7 +39,6 @@
 #include <utils/Log.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicyHelper.h>
-#include <media/PatchBuilder.h>
 #include <private/android_filesystem_config.h>
 #include <soundtrigger/SoundTrigger.h>
 #include <system/audio.h>
@@ -82,6 +82,16 @@
     AUDIO_FORMAT_AAC_XHE,
 };
 
+// Compressed formats for MSD module, ordered from most preferred to least preferred.
+static const std::vector<audio_format_t> compressedFormatsOrder = {{
+        AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
+        AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
+// Channel masks for MSD module, 3D > 2D > 1D ordering (most preferred to least preferred).
+static const std::vector<audio_channel_mask_t> surroundChannelMasksOrder = {{
+        AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
+        AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
+        AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
+
 // ----------------------------------------------------------------------------
 // AudioPolicyInterface implementation
 // ----------------------------------------------------------------------------
@@ -202,31 +212,30 @@
             return BAD_VALUE;
         }
 
-        // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
-        // output is suspended before any tracks are moved to it
-        checkA2dpSuspend();
-        checkOutputForAllStrategies();
-        // outputs must be closed after checkOutputForAllStrategies() is executed
-        if (!outputs.isEmpty()) {
-            for (audio_io_handle_t output : outputs) {
-                sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
-                // close unused outputs after device disconnection or direct outputs that have been
-                // opened by checkOutputsForDevice() to query dynamic parameters
-                if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
-                        (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
-                         (desc->mDirectOpenCount == 0))) {
-                    closeOutput(output);
+        checkForDeviceAndOutputChanges([&]() {
+            // outputs must be closed after checkOutputForAllStrategies() is executed
+            if (!outputs.isEmpty()) {
+                for (audio_io_handle_t output : outputs) {
+                    sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+                    // close unused outputs after device disconnection or direct outputs that have been
+                    // opened by checkOutputsForDevice() to query dynamic parameters
+                    if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
+                            (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+                             (desc->mDirectOpenCount == 0))) {
+                        closeOutput(output);
+                    }
                 }
+                // check A2DP again after closing A2DP output to reset mA2dpSuspended if needed
+                return true;
             }
-            // check again after closing A2DP output to reset mA2dpSuspended if needed
-            checkA2dpSuspend();
-        }
+            return false;
+        });
 
-        updateDevicesAndOutputs();
         if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
             audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
             updateCallRouting(newDevice);
         }
+        const audio_devices_t msdOutDevice = getMsdAudioOutDeviceTypes();
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
@@ -234,7 +243,8 @@
                 // do not force device change on duplicated output because if device is 0, it will
                 // also force a device 0 for the two outputs it is duplicated to which may override
                 // a valid device selection on those outputs.
-                bool force = !desc->isDuplicated()
+                bool force = (msdOutDevice == AUDIO_DEVICE_NONE || msdOutDevice != desc->device())
+                        && !desc->isDuplicated()
                         && (!device_distinguishes_on_address(device)
                                 // always force when disconnecting (a non-duplicated device)
                                 || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
@@ -507,13 +517,7 @@
         // symmetric to the one in startInput()
         for (const auto& activeDesc : mInputs.getActiveInputs()) {
             if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
-                AudioSessionCollection activeSessions =
-                        activeDesc->getAudioSessions(true /*activeOnly*/);
-                for (size_t j = 0; j < activeSessions.size(); j++) {
-                    audio_session_t activeSession = activeSessions.keyAt(j);
-                    stopInput(activeDesc->mIoHandle, activeSession);
-                    releaseInput(activeDesc->mIoHandle, activeSession);
-                }
+                closeSessions(activeDesc, true  /*activeOnly*/);
             }
         }
     }
@@ -533,7 +537,7 @@
 }
 
 sp<DeviceDescriptor> AudioPolicyManager::findDevice(
-        const DeviceVector& devices, audio_devices_t device) {
+        const DeviceVector& devices, audio_devices_t device) const {
     DeviceVector deviceList = devices.getDevicesFromTypeMask(device);
     ALOG_ASSERT(!deviceList.isEmpty(),
             "%s() selected device type %#x is not in devices list", __func__, device);
@@ -551,14 +555,8 @@
         return;
     }
     /// Opens: can these line be executed after the switch of volume curves???
-    // if leaving call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(oldState)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-            handleIncallSonification((audio_stream_type_t)stream, false, true);
-        }
-
         // force reevaluating accessibility routing when call stops
         mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
     }
@@ -571,9 +569,7 @@
                   || (is_state_in_call(state) && (state != oldState)));
 
     // check for device and output changes triggered by new phone state
-    checkA2dpSuspend();
-    checkOutputForAllStrategies();
-    updateDevicesAndOutputs();
+    checkForDeviceAndOutputChanges();
 
     int delayMs = 0;
     if (isStateInCall(state)) {
@@ -637,14 +633,8 @@
         }
     }
 
-    // if entering in call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(state)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-            handleIncallSonification((audio_stream_type_t)stream, true, true);
-        }
-
         // force reevaluating accessibility routing when call starts
         mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
     }
@@ -679,9 +669,7 @@
             (usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
 
     // check for device and output changes triggered by new force usage
-    checkA2dpSuspend();
-    checkOutputForAllStrategies();
-    updateDevicesAndOutputs();
+    checkForDeviceAndOutputChanges();
 
     //FIXME: workaround for truncated touch sounds
     // to be removed when the problem is handled by system UI
@@ -797,6 +785,12 @@
                                               audio_port_handle_t *portId)
 {
     audio_attributes_t attributes;
+    DeviceVector outputDevices;
+    routing_strategy strategy;
+    audio_devices_t device;
+    audio_port_handle_t requestedDeviceId = *selectedDeviceId;
+    audio_devices_t msdDevice = getMsdAudioOutDeviceTypes();
+
     if (attr != NULL) {
         if (!isValidAttributes(attr)) {
             ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
@@ -813,63 +807,116 @@
         stream_type_to_audio_attributes(*stream, &attributes);
     }
 
-    // TODO: check for existing client for this port ID
-    if (*portId == AUDIO_PORT_HANDLE_NONE) {
-        *portId = AudioPort::getNextUniqueId();
-    }
-
-    sp<SwAudioOutputDescriptor> desc;
-    if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
-        ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
-        if (!audio_has_proportional_frames(config->format)) {
-            return BAD_VALUE;
-        }
-        *stream = streamTypefromAttributesInt(&attributes);
-        *output = desc->mIoHandle;
-        ALOGV("getOutputForAttr() returns output %d", *output);
-        return NO_ERROR;
-    }
-    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
-        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
-        return BAD_VALUE;
-    }
-
     ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
             " session %d selectedDeviceId %d",
             attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
             session, *selectedDeviceId);
 
-    *stream = streamTypefromAttributesInt(&attributes);
+    // TODO: check for existing client for this port ID
+    if (*portId == AUDIO_PORT_HANDLE_NONE) {
+        *portId = AudioPort::getNextUniqueId();
+    }
 
-    // Explicit routing?
+    // First check for explicit routing (eg. setPreferredDevice)
     sp<DeviceDescriptor> deviceDesc;
     if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
         deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
+    } else {
+        // If no explict route, is there a matching dynamic policy that applies?
+        sp<SwAudioOutputDescriptor> desc;
+        if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
+            ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+            if (!audio_has_proportional_frames(config->format)) {
+                return BAD_VALUE;
+            }
+            *stream = streamTypefromAttributesInt(&attributes);
+            *output = desc->mIoHandle;
+            ALOGV("getOutputForAttr() returns output %d", *output);
+            goto exit;
+        }
+
+        // Virtual sources must always be dynamicaly or explicitly routed
+        if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+            ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+            return BAD_VALUE;
+        }
     }
+
+    // Virtual sources must always be dynamicaly or explicitly routed
+    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+        return BAD_VALUE;
+    }
+
+    *stream = streamTypefromAttributesInt(&attributes);
+
+    // TODO:  Should this happen only if an explicit route is active?
+    // the previous code structure meant that this would always happen which
+    // would appear to result in adding a null deviceDesc when not using an
+    // explicit route.  Is that the intended and necessary behavior?
     mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
 
-    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    strategy = (routing_strategy) getStrategyForAttr(&attributes);
+    device = getDeviceForStrategy(strategy, false /*fromCache*/);
 
     if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
         *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
     }
 
+    // Set incall music only if device was explicitly set, and fallback to the device which is
+    // chosen by the engine if not.
+    // FIXME: provide a more generic approach which is not device specific and move this back
+    // to getOutputForDevice.
+    if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
+        *stream == AUDIO_STREAM_MUSIC &&
+        audio_is_linear_pcm(config->format) &&
+        isInCall()) {
+        if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
+            *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
+        } else {
+            device = mEngine->getDeviceForStrategy(strategy);
+        }
+    }
+
     ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
           "flags %#x",
           device, config->sample_rate, config->format, config->channel_mask, *flags);
 
-    *output = getOutputForDevice(device, session, *stream, config, flags);
+    *output = AUDIO_IO_HANDLE_NONE;
+    if (msdDevice != AUDIO_DEVICE_NONE) {
+        *output = getOutputForDevice(msdDevice, session, *stream, config, flags);
+        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
+            ALOGV("%s() Using MSD device 0x%x instead of device 0x%x",
+                    __func__, msdDevice, device);
+            device = msdDevice;
+        } else {
+            *output = AUDIO_IO_HANDLE_NONE;
+        }
+    }
+    if (*output == AUDIO_IO_HANDLE_NONE) {
+        *output = getOutputForDevice(device, session, *stream, config, flags);
+    }
     if (*output == AUDIO_IO_HANDLE_NONE) {
         mOutputRoutes.removeRoute(session);
         return INVALID_OPERATION;
     }
 
-    DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
+    outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
     *selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
             : AUDIO_PORT_HANDLE_NONE;
 
-    ALOGV("  getOutputForAttr() returns output %d selectedDeviceId %d", *output, *selectedDeviceId);
+exit:
+    audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
+        .format = config->format,
+        .channel_mask = config->channel_mask };
+    sp<TrackClientDescriptor> clientDesc =
+        new TrackClientDescriptor(*portId, uid, session,
+                                  attributes, clientConfig, requestedDeviceId, *stream, *flags);
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
+    outputDesc->clients().emplace(*portId, clientDesc);
+
+    ALOGV("  getOutputForAttr() returns output %d selectedDeviceId %d for port ID %d",
+          *output, *selectedDeviceId, *portId);
 
     return NO_ERROR;
 }
@@ -910,11 +957,6 @@
         *flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
                                        AUDIO_OUTPUT_FLAG_DIRECT);
         ALOGV("Set VoIP and Direct output flags for PCM format");
-    } else if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
-        stream == AUDIO_STREAM_MUSIC &&
-        audio_is_linear_pcm(config->format) &&
-        isInCall()) {
-        *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
     }
 
 
@@ -1036,6 +1078,164 @@
     return output;
 }
 
+sp<DeviceDescriptor> AudioPolicyManager::getMsdAudioInDevice() const {
+    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+    if (msdModule != 0) {
+        DeviceVector msdInputDevices = mAvailableInputDevices.getDevicesFromHwModule(
+                msdModule->getHandle());
+        if (!msdInputDevices.isEmpty()) return msdInputDevices.itemAt(0);
+    }
+    return 0;
+}
+
+audio_devices_t AudioPolicyManager::getMsdAudioOutDeviceTypes() const {
+    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+    if (msdModule != 0) {
+        return mAvailableOutputDevices.getDeviceTypesFromHwModule(msdModule->getHandle());
+    }
+    return AUDIO_DEVICE_NONE;
+}
+
+const AudioPatchCollection AudioPolicyManager::getMsdPatches() const {
+    AudioPatchCollection msdPatches;
+    audio_module_handle_t msdModuleHandle = mHwModules.getModuleFromName(
+            AUDIO_HARDWARE_MODULE_ID_MSD)->getHandle();
+    if (msdModuleHandle == AUDIO_MODULE_HANDLE_NONE) return msdPatches;
+    for (size_t i = 0; i < mAudioPatches.size(); ++i) {
+        sp<AudioPatch> patch = mAudioPatches.valueAt(i);
+        for (size_t j = 0; j < patch->mPatch.num_sources; ++j) {
+            const struct audio_port_config *source = &patch->mPatch.sources[j];
+            if (source->type == AUDIO_PORT_TYPE_DEVICE &&
+                    source->ext.device.hw_module == msdModuleHandle) {
+                msdPatches.addAudioPatch(patch->mHandle, patch);
+            }
+        }
+    }
+    return msdPatches;
+}
+
+status_t AudioPolicyManager::getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+        bool hwAvSync, audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
+{
+    sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+    if (msdModule == nullptr) {
+        ALOGE("%s() unable to get MSD module", __func__);
+        return NO_INIT;
+    }
+    sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice);
+    if (deviceModule == nullptr) {
+        ALOGE("%s() unable to get module for %#x", __func__, outputDevice);
+        return NO_INIT;
+    }
+    const InputProfileCollection &inputProfiles = msdModule->getInputProfiles();
+    if (inputProfiles.isEmpty()) {
+        ALOGE("%s() no input profiles for MSD module", __func__);
+        return NO_INIT;
+    }
+    const OutputProfileCollection &outputProfiles = deviceModule->getOutputProfiles();
+    if (outputProfiles.isEmpty()) {
+        ALOGE("%s() no output profiles for device %#x", __func__, outputDevice);
+        return NO_INIT;
+    }
+    AudioProfileVector msdProfiles;
+    // Each IOProfile represents a MixPort from audio_policy_configuration.xml
+    for (const auto &inProfile : inputProfiles) {
+        if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0)) {
+            msdProfiles.appendVector(inProfile->getAudioProfiles());
+        }
+    }
+    AudioProfileVector deviceProfiles;
+    for (const auto &outProfile : outputProfiles) {
+        if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) {
+            deviceProfiles.appendVector(outProfile->getAudioProfiles());
+        }
+    }
+    struct audio_config_base bestSinkConfig;
+    status_t result = msdProfiles.findBestMatchingOutputConfig(deviceProfiles,
+            compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/,
+            &bestSinkConfig);
+    if (result != NO_ERROR) {
+        ALOGD("%s() no matching profiles found for device: %#x, hwAvSync: %d",
+                __func__, outputDevice, hwAvSync);
+        return result;
+    }
+    sinkConfig->sample_rate = bestSinkConfig.sample_rate;
+    sinkConfig->channel_mask = bestSinkConfig.channel_mask;
+    sinkConfig->format = bestSinkConfig.format;
+    // For encoded streams force direct flag to prevent downstream mixing.
+    sinkConfig->flags.output = static_cast<audio_output_flags_t>(
+            sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_DIRECT);
+    sourceConfig->sample_rate = bestSinkConfig.sample_rate;
+    // Specify exact channel mask to prevent guessing by bit count in PatchPanel.
+    sourceConfig->channel_mask = audio_channel_mask_out_to_in(bestSinkConfig.channel_mask);
+    sourceConfig->format = bestSinkConfig.format;
+    // Copy input stream directly without any processing (e.g. resampling).
+    sourceConfig->flags.input = static_cast<audio_input_flags_t>(
+            sourceConfig->flags.input | AUDIO_INPUT_FLAG_DIRECT);
+    if (hwAvSync) {
+        sinkConfig->flags.output = static_cast<audio_output_flags_t>(
+                sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+        sourceConfig->flags.input = static_cast<audio_input_flags_t>(
+                sourceConfig->flags.input | AUDIO_INPUT_FLAG_HW_AV_SYNC);
+    }
+    const unsigned int config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE |
+            AUDIO_PORT_CONFIG_CHANNEL_MASK | AUDIO_PORT_CONFIG_FORMAT | AUDIO_PORT_CONFIG_FLAGS;
+    sinkConfig->config_mask |= config_mask;
+    sourceConfig->config_mask |= config_mask;
+    return NO_ERROR;
+}
+
+PatchBuilder AudioPolicyManager::buildMsdPatch(audio_devices_t outputDevice) const
+{
+    PatchBuilder patchBuilder;
+    patchBuilder.addSource(getMsdAudioInDevice()).
+            addSink(findDevice(mAvailableOutputDevices, outputDevice));
+    audio_port_config sourceConfig = patchBuilder.patch()->sources[0];
+    audio_port_config sinkConfig = patchBuilder.patch()->sinks[0];
+    // TODO: Figure out whether MSD module has HW_AV_SYNC flag set in the AP config file.
+    // For now, we just forcefully try with HwAvSync first.
+    status_t res = getBestMsdAudioProfileFor(outputDevice, true /*hwAvSync*/,
+            &sourceConfig, &sinkConfig) == NO_ERROR ? NO_ERROR :
+            getBestMsdAudioProfileFor(
+                    outputDevice, false /*hwAvSync*/, &sourceConfig, &sinkConfig);
+    if (res == NO_ERROR) {
+        // Found a matching profile for encoded audio. Re-create PatchBuilder with this config.
+        return (PatchBuilder()).addSource(sourceConfig).addSink(sinkConfig);
+    }
+    ALOGV("%s() no matching profile found. Fall through to default PCM patch"
+            " supporting PCM format conversion.", __func__);
+    return patchBuilder;
+}
+
+status_t AudioPolicyManager::setMsdPatch(audio_devices_t outputDevice) {
+    ALOGV("%s() for outputDevice %#x", __func__, outputDevice);
+    if (outputDevice == AUDIO_DEVICE_NONE) {
+        // Use media strategy for unspecified output device. This should only
+        // occur on checkForDeviceAndOutputChanges(). Device connection events may
+        // therefore invalidate explicit routing requests.
+        outputDevice = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+    }
+    PatchBuilder patchBuilder = buildMsdPatch(outputDevice);
+    const struct audio_patch* patch = patchBuilder.patch();
+    const AudioPatchCollection msdPatches = getMsdPatches();
+    if (!msdPatches.isEmpty()) {
+        LOG_ALWAYS_FATAL_IF(msdPatches.size() > 1,
+                "The current MSD prototype only supports one output patch");
+        sp<AudioPatch> currentPatch = msdPatches.valueAt(0);
+        if (audio_patches_are_equal(&currentPatch->mPatch, patch)) {
+            return NO_ERROR;
+        }
+        releaseAudioPatch(currentPatch->mHandle, mUidCached);
+    }
+    status_t status = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/,
+            patch, 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
+    ALOGE_IF(status != NO_ERROR, "%s() error %d creating MSD audio patch", __func__, status);
+    ALOGI_IF(status == NO_ERROR, "%s() Patch created from MSD_IN to "
+           "device:%#x (format:%#x channels:%#x samplerate:%d)", __func__, outputDevice,
+           patch->sources[0].format, patch->sources[0].channel_mask, patch->sources[0].sample_rate);
+    return status;
+}
+
 audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                                        audio_output_flags_t flags,
                                                        audio_format_t format)
@@ -1116,19 +1316,21 @@
     return outputs[0];
 }
 
-status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
-                                             audio_stream_type_t stream,
-                                             audio_session_t session)
+status_t AudioPolicyManager::startOutput(audio_port_handle_t portId)
 {
-    ALOGV("startOutput() output %d, stream %d, session %d",
-          output, stream, session);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("startOutput() unknown output %d", output);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+    if (outputDesc == 0) {
+        ALOGW("startOutput() no output for client %d", portId);
         return BAD_VALUE;
     }
+    sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+    audio_stream_type_t stream = client->stream();
+    audio_session_t session = client->session();
 
-    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+    ALOGV("startOutput() output %d, stream %d, session %d",
+          outputDesc->mIoHandle, stream, session);
 
     status_t status = outputDesc->start();
     if (status != NO_ERROR) {
@@ -1152,7 +1354,7 @@
     } else if (mOutputRoutes.getAndClearRouteChanged(session)) {
         newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
         if (newDevice != outputDesc->device()) {
-            checkStrategyRoute(getStrategy(stream), output);
+            checkStrategyRoute(getStrategy(stream), outputDesc->mIoHandle);
         }
     } else {
         newDevice = AUDIO_DEVICE_NONE;
@@ -1279,11 +1481,6 @@
         const uint32_t muteWaitMs =
                 setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
 
-        // handle special case for sonification while in call
-        if (isInCall()) {
-            handleIncallSonification(stream, true, false);
-        }
-
         // apply volume rules for current stream and device if necessary
         checkAndSetVolume(stream,
                           mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
@@ -1325,19 +1522,20 @@
     return NO_ERROR;
 }
 
-
-status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
-                                            audio_stream_type_t stream,
-                                            audio_session_t session)
+status_t AudioPolicyManager::stopOutput(audio_port_handle_t portId)
 {
-    ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("stopOutput() unknown output %d", output);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+    if (outputDesc == 0) {
+        ALOGW("stopOutput() no output for client %d", portId);
         return BAD_VALUE;
     }
+    sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+    audio_stream_type_t stream = client->stream();
+    audio_session_t session = client->session();
 
-    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+    ALOGV("stopOutput() output %d, stream %d, session %d", outputDesc->mIoHandle, stream, session);
 
     if (outputDesc->mRefCount[stream] == 1) {
         // Automatically disable the remote submix input when output is stopped on a
@@ -1378,11 +1576,6 @@
     // always handle stream stop, check which stream type is stopping
     handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
 
-    // handle special case for sonification while in call
-    if (isInCall()) {
-        handleIncallSonification(stream, false, false);
-    }
-
     if (outputDesc->mRefCount[stream] > 0) {
         // decrement usage count of this stream on the output
         outputDesc->changeRefCount(stream, -1);
@@ -1439,32 +1632,35 @@
     }
 }
 
-void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
-                                       audio_stream_type_t stream __unused,
-                                       audio_session_t session __unused)
+void AudioPolicyManager::releaseOutput(audio_port_handle_t portId)
 {
-    ALOGV("releaseOutput() %d", output);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("releaseOutput() releasing unknown output %d", output);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+    if (outputDesc == 0) {
+        ALOGW("releaseOutput() no output for client %d", portId);
         return;
     }
+    sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+    audio_session_t session = client->session();
+
+    ALOGV("releaseOutput() %d", outputDesc->mIoHandle);
 
     // Routing
     mOutputRoutes.removeRoute(session);
 
-    sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(index);
-    if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-        if (desc->mDirectOpenCount <= 0) {
+    if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+        if (outputDesc->mDirectOpenCount <= 0) {
             ALOGW("releaseOutput() invalid open count %d for output %d",
-                                                              desc->mDirectOpenCount, output);
+                  outputDesc->mDirectOpenCount, outputDesc->mIoHandle);
             return;
         }
-        if (--desc->mDirectOpenCount == 0) {
-            closeOutput(output);
+        if (--outputDesc->mDirectOpenCount == 0) {
+            closeOutput(outputDesc->mIoHandle);
             mpClientInterface->onAudioPortListUpdate();
         }
     }
+    outputDesc->clients().erase(portId);
 }
 
 
@@ -1489,6 +1685,9 @@
     audio_source_t inputSource = attr->source;
     AudioMix *policyMix = NULL;
     DeviceVector inputDevices;
+    sp<AudioInputDescriptor> inputDesc;
+    sp<RecordClientDescriptor> clientDesc;
+    audio_port_handle_t requestedDeviceId = *selectedDeviceId;
 
     if (inputSource == AUDIO_SOURCE_DEFAULT) {
         inputSource = AUDIO_SOURCE_MIC;
@@ -1544,7 +1743,7 @@
                 : AUDIO_PORT_HANDLE_NONE;
         ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session);
 
-        return NO_ERROR;
+        goto exit;
     }
 
     *input = AUDIO_IO_HANDLE_NONE;
@@ -1610,8 +1809,14 @@
     *selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
             : AUDIO_PORT_HANDLE_NONE;
 
-    ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d",
-            *input, *inputType, *selectedDeviceId);
+exit:
+    clientDesc = new RecordClientDescriptor(*portId, uid, session,
+                                  *attr, *config, requestedDeviceId, inputSource, flags);
+    inputDesc = mInputs.valueFor(*input);
+    inputDesc->clients().emplace(*portId, clientDesc);
+
+    ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d for port ID %d",
+            *input, *inputType, *selectedDeviceId, *portId);
 
     return NO_ERROR;
 
@@ -1693,74 +1898,7 @@
                                                      config->channel_mask,
                                                      flags,
                                                      uid,
-                                                     isSoundTrigger,
-                                                     policyMix, mpClientInterface);
-
-// FIXME: disable concurrent capture until UI is ready
-#if 0
-    // reuse an open input if possible
-    sp<AudioInputDescriptor> reusedInputDesc;
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
-        // reuse input if:
-        // - it shares the same profile
-        //      AND
-        // - it is not a reroute submix input
-        //      AND
-        // - it is: not used for sound trigger
-        //                OR
-        //          used for sound trigger and all clients use the same session ID
-        //
-        if ((profile == desc->mProfile) &&
-            (isSoundTrigger == desc->isSoundTrigger()) &&
-            !is_virtual_input_device(device)) {
-
-            sp<AudioSession> as = desc->getAudioSession(session);
-            if (as != 0) {
-                // do not allow unmatching properties on same session
-                if (as->matches(audioSession)) {
-                    as->changeOpenCount(1);
-                } else {
-                    ALOGW("getInputForDevice() record with different attributes"
-                          " exists for session %d", session);
-                    continue;
-                }
-            } else if (isSoundTrigger) {
-                continue;
-            }
-
-            // Reuse the already opened input stream on this profile if:
-            // - the new capture source is background OR
-            // - the path requested configurations match OR
-            // - the new source priority is less than the highest source priority on this input
-            // If the input stream cannot be reused, close it before opening a new stream
-            // on the same profile for the new client so that the requested path configuration
-            // can be selected.
-            if (!isConcurrentSource(inputSource) &&
-                    ((desc->mSamplingRate != samplingRate ||
-                    desc->mChannelMask != config->channel_mask ||
-                    !audio_formats_match(desc->mFormat, config->format)) &&
-                    (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
-                     source_priority(inputSource)))) {
-                reusedInputDesc = desc;
-                continue;
-            } else {
-                desc->addAudioSession(session, audioSession);
-                ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
-                return mInputs.keyAt(i);
-            }
-        }
-    }
-
-    if (reusedInputDesc != 0) {
-        AudioSessionCollection sessions = reusedInputDesc->getAudioSessions(false /*activeOnly*/);
-        for (size_t j = 0; j < sessions.size(); j++) {
-            audio_session_t currentSession = sessions.keyAt(j);
-            stopInput(reusedInputDesc->mIoHandle, currentSession);
-            releaseInput(reusedInputDesc->mIoHandle, currentSession);
-        }
-    }
-#endif
+                                                     isSoundTrigger);
 
     if (!profile->canOpenNewIo()) {
         return AUDIO_IO_HANDLE_NONE;
@@ -1881,44 +2019,32 @@
 }
 
 
-status_t AudioPolicyManager::startInput(audio_io_handle_t input,
-                                        audio_session_t session,
+status_t AudioPolicyManager::startInput(audio_port_handle_t portId,
                                         bool silenced,
                                         concurrency_type__mask_t *concurrency)
 {
+    *concurrency = API_INPUT_CONCURRENCY_NONE;
+
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+    if (inputDesc == 0) {
+        ALOGW("startInput() no input for client %d", portId);
+        return BAD_VALUE;
+    }
+    sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+    audio_session_t session = client->session();
+    audio_io_handle_t input = inputDesc->mIoHandle;
 
     ALOGV("AudioPolicyManager::startInput(input:%d, session:%d, silenced:%d, concurrency:%d)",
             input, session, silenced, *concurrency);
 
-    *concurrency = API_INPUT_CONCURRENCY_NONE;
-
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("startInput() unknown input %d", input);
-        return BAD_VALUE;
-    }
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-
     sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
     if (audioSession == 0) {
         ALOGW("startInput() unknown session %d on input %d", session, input);
         return BAD_VALUE;
     }
 
-// FIXME: disable concurrent capture until UI is ready
-#if 0
-    if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
-        ALOGW("startInput(%d) failed: other input already started", input);
-        return INVALID_OPERATION;
-    }
-
-    if (isInCall()) {
-        *concurrency |= API_INPUT_CONCURRENCY_CALL;
-    }
-    if (mInputs.activeInputsCountOnDevices() != 0) {
-        *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
-    }
-#else
     if (!is_virtual_input_device(inputDesc->mDevice)) {
         if (mCallTxPatch != 0 &&
             inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
@@ -1943,11 +2069,8 @@
                         true /*activeOnly*/);
                 sp<AudioSession> activeSession = activeSessions.valueAt(0);
                 if (activeSession->isSilenced()) {
-                    audio_io_handle_t activeInput = activeDesc->mIoHandle;
-                    audio_session_t activeSessionId = activeSession->session();
-                    stopInput(activeInput, activeSessionId);
-                    releaseInput(activeInput, activeSessionId);
-                    ALOGV("startInput(%d) stopping silenced input %d", input, activeInput);
+                    closeSession(activeDesc, activeSession);
+                    ALOGV("startInput() session %d stopping silenced session %d", session, activeSession->session());
                     activeInputs = mInputs.getActiveInputs();
                 }
             }
@@ -2001,27 +2124,24 @@
             if (activeSource == AUDIO_SOURCE_HOTWORD) {
                 AudioSessionCollection activeSessions =
                         activeDesc->getAudioSessions(true /*activeOnly*/);
-                audio_session_t activeSession = activeSessions.keyAt(0);
-                audio_io_handle_t activeHandle = activeDesc->mIoHandle;
+                sp<AudioSession> activeSession = activeSessions[0];
                 SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
                 *concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
-                sessions.add(activeSession);
+                sessions.add(activeSession->session());
                 inputDesc->setPreemptedSessions(sessions);
-                stopInput(activeHandle, activeSession);
-                releaseInput(activeHandle, activeSession);
+                closeSession(inputDesc, activeSession);
                 ALOGV("startInput(%d) for HOTWORD preempting HOTWORD input %d",
                       input, activeDesc->mIoHandle);
             }
         }
     }
-#endif
 
     // Make sure we start with the correct silence state
     audioSession->setSilenced(silenced);
 
     // increment activity count before calling getNewInputDevice() below as only active sessions
     // are considered for device selection
-    audioSession->changeActiveCount(1);
+    inputDesc->changeRefCount(session, 1);
 
     // Routing?
     mInputRoutes.incRouteActivity(session);
@@ -2035,7 +2155,7 @@
         status_t status = inputDesc->start();
         if (status != NO_ERROR) {
             mInputRoutes.decRouteActivity(session);
-            audioSession->changeActiveCount(-1);
+            inputDesc->changeRefCount(session, -1);
             return status;
         }
 
@@ -2077,29 +2197,29 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
-                                       audio_session_t session)
+status_t AudioPolicyManager::stopInput(audio_port_handle_t portId)
 {
-    ALOGV("stopInput() input %d", input);
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("stopInput() unknown input %d", input);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+    if (inputDesc == 0) {
+        ALOGW("stopInput() no input for client %d", portId);
         return BAD_VALUE;
     }
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+    sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+    audio_session_t session = client->session();
+    audio_io_handle_t input = inputDesc->mIoHandle;
+
+    ALOGV("stopInput() input %d", input);
 
     sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
-    if (index < 0) {
-        ALOGW("stopInput() unknown session %d on input %d", session, input);
-        return BAD_VALUE;
-    }
 
     if (audioSession->activeCount() == 0) {
         ALOGW("stopInput() input %d already stopped", input);
         return INVALID_OPERATION;
     }
 
-    audioSession->changeActiveCount(-1);
+    inputDesc->changeRefCount(session, -1);
 
     // Routing?
     mInputRoutes.decRouteActivity(session);
@@ -2148,22 +2268,24 @@
     return NO_ERROR;
 }
 
-void AudioPolicyManager::releaseInput(audio_io_handle_t input,
-                                      audio_session_t session)
+void AudioPolicyManager::releaseInput(audio_port_handle_t portId)
 {
-    ALOGV("releaseInput() %d", input);
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("releaseInput() releasing unknown input %d", input);
+    ALOGV("%s portId %d", __FUNCTION__, portId);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+    if (inputDesc == 0) {
+        ALOGW("releaseInput() no input for client %d", portId);
         return;
     }
+    sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+    audio_session_t session = client->session();
+    audio_io_handle_t input = inputDesc->mIoHandle;
+
+    ALOGV("releaseInput() %d", input);
 
     // Routing
     mInputRoutes.removeRoute(session);
 
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-    ALOG_ASSERT(inputDesc != 0);
-
     sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
     if (audioSession == 0) {
         ALOGW("releaseInput() unknown session %d on input %d", session, input);
@@ -2186,10 +2308,31 @@
     }
 
     closeInput(input);
+    inputDesc->clients().erase(portId);
     mpClientInterface->onAudioPortListUpdate();
     ALOGV("releaseInput() exit");
 }
 
+void AudioPolicyManager::closeSessions(const sp<AudioInputDescriptor>& input, bool activeOnly)
+{
+    AudioSessionCollection sessions = input->getAudioSessions(activeOnly /*activeOnly*/);
+    for (size_t i = 0; i < sessions.size(); i++) {
+        closeSession(input, sessions[i]);
+    }
+}
+
+void AudioPolicyManager::closeSession(const sp<AudioInputDescriptor>& input,
+                                      const sp<AudioSession>& session)
+{
+    RecordClientVector clients = input->getClientsForSession(session->session());
+
+    for (const auto& client : clients) {
+        stopInput(client->portId());
+        releaseInput(client->portId());
+    }
+}
+
+
 void AudioPolicyManager::closeAllInputs() {
     bool patchRemoved = false;
 
@@ -2273,11 +2416,10 @@
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
         audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
         for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
-            if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+            if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
                 continue;
             }
-            if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
-                    (isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
+            if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
                 continue;
             }
             routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
@@ -2295,13 +2437,15 @@
                 applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
                         stream, curStreamDevice);
             }
-
+            // rescale index before applying to curStream as ranges may be different for
+            // stream and curStream
+            int idx = rescaleVolumeIndex(index, stream, (audio_stream_type_t)curStream);
             if (applyVolume) {
                 //FIXME: workaround for truncated touch sounds
                 // delayed volume change for system stream to be removed when the problem is
                 // handled by system UI
                 status_t volStatus =
-                        checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
+                        checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
                             (stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
                 if (volStatus != NO_ERROR) {
                     status = volStatus;
@@ -2650,6 +2794,7 @@
     mEffects.dump(fd);
     mAudioPatches.dump(fd);
     mPolicyMixes.dump(fd);
+    mAudioSources.dump(fd);
 
     return NO_ERROR;
 }
@@ -2834,8 +2979,7 @@
     }
     ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
 
-    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+    if (!audio_patch_is_valid(patch)) {
         return BAD_VALUE;
     }
     // only one source per audio patch supported for now
@@ -3293,8 +3437,8 @@
 void AudioPolicyManager::clearAudioSources(uid_t uid)
 {
     for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--)  {
-        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
-        if (sourceDesc->mUid == uid) {
+        sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        if (sourceDesc->uid() == uid) {
             stopAudioSource(mAudioSources.keyAt(i));
         }
     }
@@ -3312,20 +3456,23 @@
 }
 
 status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
-                                  const audio_attributes_t *attributes,
-                                  audio_patch_handle_t *handle,
-                                  uid_t uid)
+                                              const audio_attributes_t *attributes,
+                                              audio_port_handle_t *portId,
+                                              uid_t uid)
 {
-    ALOGV("%s source %p attributes %p handle %p", __FUNCTION__, source, attributes, handle);
-    if (source == NULL || attributes == NULL || handle == NULL) {
+    ALOGV("%s", __FUNCTION__);
+    *portId = AUDIO_PORT_HANDLE_NONE;
+
+    if (source == NULL || attributes == NULL || portId == NULL) {
+        ALOGW("%s invalid argument: source %p attributes %p handle %p",
+              __FUNCTION__, source, attributes, portId);
         return BAD_VALUE;
     }
 
-    *handle = AUDIO_PATCH_HANDLE_NONE;
-
     if (source->role != AUDIO_PORT_ROLE_SOURCE ||
             source->type != AUDIO_PORT_TYPE_DEVICE) {
-        ALOGV("%s INVALID_OPERATION source->role %d source->type %d", __FUNCTION__, source->role, source->type);
+        ALOGW("%s INVALID_OPERATION source->role %d source->type %d",
+              __FUNCTION__, source->role, source->type);
         return INVALID_OPERATION;
     }
 
@@ -3333,34 +3480,37 @@
             mAvailableInputDevices.getDevice(source->ext.device.type,
                                               String8(source->ext.device.address));
     if (srcDeviceDesc == 0) {
-        ALOGV("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
+        ALOGW("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
         return BAD_VALUE;
     }
-    sp<AudioSourceDescriptor> sourceDesc =
-            new AudioSourceDescriptor(srcDeviceDesc, attributes, uid);
+
+    *portId = AudioPort::getNextUniqueId();
 
     struct audio_patch dummyPatch = {};
     sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
-    sourceDesc->mPatchDesc = patchDesc;
+
+    sp<SourceClientDescriptor> sourceDesc =
+        new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDeviceDesc,
+                                   streamTypefromAttributesInt(attributes));
 
     status_t status = connectAudioSource(sourceDesc);
     if (status == NO_ERROR) {
-        mAudioSources.add(sourceDesc->getHandle(), sourceDesc);
-        *handle = sourceDesc->getHandle();
+        mAudioSources.add(*portId, sourceDesc);
     }
     return status;
 }
 
-status_t AudioPolicyManager::connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+status_t AudioPolicyManager::connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
-    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->portId());
 
     // make sure we only have one patch per source.
     disconnectAudioSource(sourceDesc);
 
-    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
-    audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
-    sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->mDevice;
+    audio_attributes_t attributes = sourceDesc->attributes();
+    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
+    audio_stream_type_t stream = sourceDesc->stream();
+    sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->srcDevice();
 
     audio_devices_t sinkDevice = getDeviceForStrategy(strategy, true);
     sp<DeviceDescriptor> sinkDeviceDesc =
@@ -3405,7 +3555,7 @@
                                                               0);
         ALOGV("%s patch panel returned %d patchHandle %d", __FUNCTION__,
                                                               status, afPatchHandle);
-        sourceDesc->mPatchDesc->mPatch = *patchBuilder.patch();
+        sourceDesc->patchDesc()->mPatch = *patchBuilder.patch();
         if (status != NO_ERROR) {
             ALOGW("%s patch panel could not connect device patch, error %d",
                   __FUNCTION__, status);
@@ -3415,32 +3565,32 @@
         status = startSource(outputDesc, stream, sinkDevice, NULL, &delayMs);
 
         if (status != NO_ERROR) {
-            mpClientInterface->releaseAudioPatch(sourceDesc->mPatchDesc->mAfPatchHandle, 0);
+            mpClientInterface->releaseAudioPatch(sourceDesc->patchDesc()->mAfPatchHandle, 0);
             return status;
         }
-        sourceDesc->mSwOutput = outputDesc;
+        sourceDesc->setSwOutput(outputDesc);
         if (delayMs != 0) {
             usleep(delayMs * 1000);
         }
     }
 
-    sourceDesc->mPatchDesc->mAfPatchHandle = afPatchHandle;
-    addAudioPatch(sourceDesc->mPatchDesc->mHandle, sourceDesc->mPatchDesc);
+    sourceDesc->patchDesc()->mAfPatchHandle = afPatchHandle;
+    addAudioPatch(sourceDesc->patchDesc()->mHandle, sourceDesc->patchDesc());
 
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::stopAudioSource(audio_patch_handle_t handle __unused)
+status_t AudioPolicyManager::stopAudioSource(audio_port_handle_t portId)
 {
-    sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueFor(handle);
-    ALOGV("%s handle %d", __FUNCTION__, handle);
+    sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueFor(portId);
+    ALOGV("%s port ID %d", __FUNCTION__, portId);
     if (sourceDesc == 0) {
-        ALOGW("%s unknown source for handle %d", __FUNCTION__, handle);
+        ALOGW("%s unknown source for port ID %d", __FUNCTION__, portId);
         return BAD_VALUE;
     }
     status_t status = disconnectAudioSource(sourceDesc);
 
-    mAudioSources.removeItem(handle);
+    mAudioSources.removeItem(portId);
     return status;
 }
 
@@ -3774,20 +3924,20 @@
     }
 }
 
-status_t AudioPolicyManager::disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+status_t AudioPolicyManager::disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
-    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+    ALOGV("%s port Id %d", __FUNCTION__, sourceDesc->portId());
 
-    sp<AudioPatch> patchDesc = mAudioPatches.valueFor(sourceDesc->mPatchDesc->mHandle);
+    sp<AudioPatch> patchDesc = mAudioPatches.valueFor(sourceDesc->patchDesc()->mHandle);
     if (patchDesc == 0) {
         ALOGW("%s source has no patch with handle %d", __FUNCTION__,
-              sourceDesc->mPatchDesc->mHandle);
+              sourceDesc->patchDesc()->mHandle);
         return BAD_VALUE;
     }
-    removeAudioPatch(sourceDesc->mPatchDesc->mHandle);
+    removeAudioPatch(sourceDesc->patchDesc()->mHandle);
 
-    audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
-    sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->mSwOutput.promote();
+    audio_stream_type_t stream = sourceDesc->stream();
+    sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->swOutput().promote();
     if (swOutputDesc != 0) {
         status_t status = stopSource(swOutputDesc, stream, false);
         if (status == NO_ERROR) {
@@ -3795,7 +3945,7 @@
         }
         mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
     } else {
-        sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->mHwOutput.promote();
+        sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->hwOutput().promote();
         if (hwOutputDesc != 0) {
           //   release patch between src device and output device
           //   close Hwoutput and remove from mHwOutputs
@@ -3806,15 +3956,16 @@
     return NO_ERROR;
 }
 
-sp<AudioSourceDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
+sp<SourceClientDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
         audio_io_handle_t output, routing_strategy strategy)
 {
-    sp<AudioSourceDescriptor> source;
+    sp<SourceClientDescriptor> source;
     for (size_t i = 0; i < mAudioSources.size(); i++)  {
-        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        audio_attributes_t attributes = sourceDesc->attributes();
         routing_strategy sourceStrategy =
-                (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
-        sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->mSwOutput.promote();
+                (routing_strategy) getStrategyForAttr(&attributes);
+        sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->swOutput().promote();
         if (sourceStrategy == strategy && outputDesc != 0 && outputDesc->mIoHandle == output) {
             source = sourceDesc;
             break;
@@ -4641,18 +4792,17 @@
     return outputs;
 }
 
-bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
-                                      SortedVector<audio_io_handle_t>& outputs2)
+void AudioPolicyManager::checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked)
 {
-    if (outputs1.size() != outputs2.size()) {
-        return false;
+    // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
+    // output is suspended before any tracks are moved to it
+    checkA2dpSuspend();
+    checkOutputForAllStrategies();
+    if (onOutputsChecked != nullptr && onOutputsChecked()) checkA2dpSuspend();
+    updateDevicesAndOutputs();
+    if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
+        setMsdPatch();
     }
-    for (size_t i = 0; i < outputs1.size(); i++) {
-        if (outputs1[i] != outputs2[i]) {
-            return false;
-        }
-    }
-    return true;
 }
 
 void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
@@ -4680,7 +4830,7 @@
         }
     }
 
-    if (!vectorsEqual(srcOutputs,dstOutputs)) {
+    if (srcOutputs != dstOutputs) {
         // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
         // audio from invalidated tracks will be rendered when unmuting
         uint32_t maxLatency = 0;
@@ -4699,7 +4849,7 @@
                 setStrategyMute(strategy, true, desc);
                 setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR, newDevice);
             }
-            sp<AudioSourceDescriptor> source =
+            sp<SourceClientDescriptor> source =
                     getSourceForStrategyOnOutput(srcOut, strategy);
             if (source != 0){
                 connectAudioSource(source);
@@ -5423,8 +5573,8 @@
         return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
     }
 
-    // in-call: always cap earpiece volume by voice volume + some low headroom
-    if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+    // in-call: always cap volume by voice volume + some low headroom
+    if ((stream != AUDIO_STREAM_VOICE_CALL) &&
             (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
         switch (stream) {
         case AUDIO_STREAM_SYSTEM:
@@ -5436,9 +5586,9 @@
         case AUDIO_STREAM_DTMF:
         case AUDIO_STREAM_ACCESSIBILITY: {
             int voiceVolumeIndex =
-                mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+                mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
             const float maxVoiceVolDb =
-                computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+                computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
                 + IN_CALL_EARPIECE_HEADROOM_DB;
             if (volumeDB > maxVoiceVolDb) {
                 ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
@@ -5509,6 +5659,21 @@
     return volumeDB;
 }
 
+int AudioPolicyManager::rescaleVolumeIndex(int srcIndex,
+                                           audio_stream_type_t srcStream,
+                                           audio_stream_type_t dstStream)
+{
+    if (srcStream == dstStream) {
+        return srcIndex;
+    }
+    float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
+    float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
+    float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
+    float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+
+    return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
+}
+
 status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
                                                    int index,
                                                    const sp<AudioOutputDescriptor>& outputDesc,
@@ -5635,55 +5800,6 @@
     }
 }
 
-void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
-                                                      bool starting, bool stateChange)
-{
-    if(!hasPrimaryOutput()) {
-        return;
-    }
-
-    // if the stream pertains to sonification strategy and we are in call we must
-    // mute the stream if it is low visibility. If it is high visibility, we must play a tone
-    // in the device used for phone strategy and play the tone if the selected device does not
-    // interfere with the device used for phone strategy
-    // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
-    // many times as there are active tracks on the output
-    const routing_strategy stream_strategy = getStrategy(stream);
-    if ((stream_strategy == STRATEGY_SONIFICATION) ||
-            ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
-        sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput;
-        ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
-                stream, starting, outputDesc->mDevice, stateChange);
-        if (outputDesc->mRefCount[stream]) {
-            int muteCount = 1;
-            if (stateChange) {
-                muteCount = outputDesc->mRefCount[stream];
-            }
-            if (audio_is_low_visibility(stream)) {
-                ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
-                for (int i = 0; i < muteCount; i++) {
-                    setStreamMute(stream, starting, mPrimaryOutput);
-                }
-            } else {
-                ALOGV("handleIncallSonification() high visibility");
-                if (outputDesc->device() &
-                        getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
-                    ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
-                    for (int i = 0; i < muteCount; i++) {
-                        setStreamMute(stream, starting, mPrimaryOutput);
-                    }
-                }
-                if (starting) {
-                    mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
-                                                 AUDIO_STREAM_VOICE_CALL);
-                } else {
-                    mpClientInterface->stopTone();
-                }
-            }
-        }
-    }
-}
-
 audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
 {
     // flags to stream type mapping
@@ -5697,39 +5813,7 @@
         return AUDIO_STREAM_TTS;
     }
 
-    // usage to stream type mapping
-    switch (attr->usage) {
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANT:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-        return AUDIO_STREAM_MUSIC;
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        return AUDIO_STREAM_ACCESSIBILITY;
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return AUDIO_STREAM_SYSTEM;
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return AUDIO_STREAM_VOICE_CALL;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return AUDIO_STREAM_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-        return AUDIO_STREAM_ALARM;
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return AUDIO_STREAM_RING;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return AUDIO_STREAM_NOTIFICATION;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return AUDIO_STREAM_MUSIC;
-    }
+    return audio_usage_to_stream_type(attr->usage);
 }
 
 bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa)
@@ -5814,10 +5898,10 @@
 void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
 {
     for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--)  {
-        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
-        if (sourceDesc->mDevice->equals(deviceDesc)) {
-            ALOGV("%s releasing audio source %d", __FUNCTION__, sourceDesc->getHandle());
-            stopAudioSource(sourceDesc->getHandle());
+        sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        if (sourceDesc->srcDevice()->equals(deviceDesc)) {
+            ALOGV("%s releasing audio source %d", __FUNCTION__, sourceDesc->portId());
+            stopAudioSource(sourceDesc->portId());
         }
     }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 893b963..9436767 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -17,6 +17,7 @@
 #pragma once
 
 #include <atomic>
+#include <functional>
 #include <memory>
 #include <unordered_set>
 
@@ -30,6 +31,7 @@
 #include <utils/SortedVector.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicy.h>
+#include <media/PatchBuilder.h>
 #include "AudioPolicyInterface.h"
 
 #include <AudioPolicyManagerInterface.h>
@@ -38,6 +40,7 @@
 #include <AudioPolicyConfig.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
+#include <AudioProfile.h>
 #include <DeviceDescriptor.h>
 #include <IOProfile.h>
 #include <HwModule.h>
@@ -118,15 +121,9 @@
                                           audio_output_flags_t *flags,
                                           audio_port_handle_t *selectedDeviceId,
                                           audio_port_handle_t *portId);
-        virtual status_t startOutput(audio_io_handle_t output,
-                                     audio_stream_type_t stream,
-                                     audio_session_t session);
-        virtual status_t stopOutput(audio_io_handle_t output,
-                                    audio_stream_type_t stream,
-                                    audio_session_t session);
-        virtual void releaseOutput(audio_io_handle_t output,
-                                   audio_stream_type_t stream,
-                                   audio_session_t session);
+        virtual status_t startOutput(audio_port_handle_t portId);
+        virtual status_t stopOutput(audio_port_handle_t portId);
+        virtual void releaseOutput(audio_port_handle_t portId);
         virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                          audio_io_handle_t *input,
                                          audio_session_t session,
@@ -138,16 +135,13 @@
                                          audio_port_handle_t *portId);
 
         // indicates to the audio policy manager that the input starts being used.
-        virtual status_t startInput(audio_io_handle_t input,
-                                    audio_session_t session,
+        virtual status_t startInput(audio_port_handle_t portId,
                                     bool silenced,
                                     concurrency_type__mask_t *concurrency);
 
         // indicates to the audio policy manager that the input stops being used.
-        virtual status_t stopInput(audio_io_handle_t input,
-                                   audio_session_t session);
-        virtual void releaseInput(audio_io_handle_t input,
-                                  audio_session_t session);
+        virtual status_t stopInput(audio_port_handle_t portId);
+        virtual void releaseInput(audio_port_handle_t portId);
         virtual void closeAllInputs();
         virtual void initStreamVolume(audio_stream_type_t stream,
                                                     int indexMin,
@@ -229,9 +223,9 @@
 
         virtual status_t startAudioSource(const struct audio_port_config *source,
                                           const audio_attributes_t *attributes,
-                                          audio_patch_handle_t *handle,
+                                          audio_port_handle_t *portId,
                                           uid_t uid);
-        virtual status_t stopAudioSource(audio_patch_handle_t handle);
+        virtual status_t stopAudioSource(audio_port_handle_t portId);
 
         virtual status_t setMasterMono(bool mono);
         virtual status_t getMasterMono(bool *mono);
@@ -353,6 +347,10 @@
                                     int index,
                                     audio_devices_t device);
 
+        // rescale volume index from srcStream within range of dstStream
+        int rescaleVolumeIndex(int srcIndex,
+                               audio_stream_type_t srcStream,
+                               audio_stream_type_t dstStream);
         // check that volume change is permitted, compute and send new volume to audio hardware
         virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
                                            const sp<AudioOutputDescriptor>& outputDesc,
@@ -377,10 +375,6 @@
                            int delayMs = 0,
                            audio_devices_t device = (audio_devices_t)0);
 
-        // handle special cases for sonification strategy while in call: mute streams or replace by
-        // a special tone in the device used for communication
-        void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
-
         audio_mode_t getPhoneState();
 
         // true if device is in a telephony or VoIP call
@@ -410,6 +404,12 @@
         // close an input.
         void closeInput(audio_io_handle_t input);
 
+        // runs all the checks required for accomodating changes in devices and outputs
+        // if 'onOutputsChecked' callback is provided, it is executed after the outputs
+        // check via 'checkOutputForAllStrategies'. If the callback returns 'true',
+        // A2DP suspend status is rechecked.
+        void checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked = nullptr);
+
         // checks and if necessary changes outputs used for all strategies.
         // must be called every time a condition that affects the output choice for a given strategy
         // changes: connected device, phone state, force use...
@@ -451,8 +451,6 @@
 
         SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
                                                             const SwAudioOutputCollection& openOutputs);
-        bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
-                                           SortedVector<audio_io_handle_t>& outputs2);
 
         // mute/unmute strategies using an incompatible device combination
         // if muting, wait for the audio in pcm buffer to be drained before proceeding
@@ -501,13 +499,14 @@
             if (!hasPrimaryOutput()) {
                 return AUDIO_DEVICE_NONE;
             }
-            return mAvailableInputDevices.getDevicesFromHwModule(mPrimaryOutput->getModuleHandle());
+            return mAvailableInputDevices.getDeviceTypesFromHwModule(
+                    mPrimaryOutput->getModuleHandle());
         }
 
         uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
         sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
         sp<DeviceDescriptor> findDevice(
-                const DeviceVector& devices, audio_devices_t device);
+                const DeviceVector& devices, audio_devices_t device) const;
 
         // if argument "device" is different from AUDIO_DEVICE_NONE,  startSource() will force
         // the re-evaluation of the output device.
@@ -526,10 +525,10 @@
 
         status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
 
-        status_t connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
-        status_t disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
+        status_t connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
+        status_t disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
 
-        sp<AudioSourceDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
+        sp<SourceClientDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
                                                                routing_strategy strategy);
 
         void cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc);
@@ -544,6 +543,10 @@
         static bool streamsMatchForvolume(audio_stream_type_t stream1,
                                           audio_stream_type_t stream2);
 
+        void closeSessions(const sp<AudioInputDescriptor>& input, bool activeOnly);
+        void closeSession(const sp<AudioInputDescriptor>& input,
+                          const sp<AudioSession>& session);
+
         const uid_t mUidCached;                         // AID_AUDIOSERVER
         AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
         sp<SwAudioOutputDescriptor> mPrimaryOutput;     // primary output descriptor
@@ -584,7 +587,7 @@
         sp<AudioPatch> mCallRxPatch;
 
         HwAudioOutputCollection mHwOutputs;
-        AudioSourceCollection mAudioSources;
+        SourceClientCollection mAudioSources;
 
         // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
         // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
@@ -617,6 +620,17 @@
 
         status_t getSupportedFormats(audio_io_handle_t ioHandle, FormatVector& formats);
 
+        // Support for Multi-Stream Decoder (MSD) module
+        sp<DeviceDescriptor> getMsdAudioInDevice() const;
+        audio_devices_t getMsdAudioOutDeviceTypes() const;
+        const AudioPatchCollection getMsdPatches() const;
+        status_t getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+                                           bool hwAvSync,
+                                           audio_port_config *sourceConfig,
+                                           audio_port_config *sinkConfig) const;
+        PatchBuilder buildMsdPatch(audio_devices_t outputDevice) const;
+        status_t setMsdPatch(audio_devices_t outputDevice = AUDIO_DEVICE_NONE);
+
         // If any, resolve any "dynamic" fields of an Audio Profiles collection
         void updateAudioProfiles(audio_devices_t device, audio_io_handle_t ioHandle,
                 AudioProfileVector &profiles);
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index b064f8c..21fffec 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -155,17 +155,6 @@
     return result;
 }
 
-status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
-              audio_stream_type_t stream)
-{
-    return mAudioPolicyService->startTone(tone, stream);
-}
-
-status_t AudioPolicyService::AudioPolicyClient::stopTone()
-{
-    return mAudioPolicyService->stopTone();
-}
-
 status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
 {
     return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index fdae23b..2858aad 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -23,6 +23,7 @@
 #include <memory>
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
+#include <media/AudioPolicyHelper.h>
 #include <media/EffectsConfig.h>
 #include <mediautils/ServiceUtilities.h>
 #include <system/audio.h>
@@ -317,6 +318,102 @@
     return status;
 }
 
+status_t AudioPolicyEffects::addStreamDefaultEffect(const effect_uuid_t *type,
+                                                    const String16& opPackageName,
+                                                    const effect_uuid_t *uuid,
+                                                    int32_t priority,
+                                                    audio_usage_t usage,
+                                                    audio_unique_id_t* id)
+{
+    if (uuid == NULL || type == NULL) {
+        ALOGE("addStreamDefaultEffect(): Null uuid or type uuid pointer");
+        return BAD_VALUE;
+    }
+
+    audio_stream_type_t stream = audio_usage_to_stream_type(usage);
+
+    if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
+        ALOGE("addStreamDefaultEffect(): Unsupported stream type %d", stream);
+        return BAD_VALUE;
+    }
+
+    // Check that |uuid| or |type| corresponds to an effect on the system.
+    effect_descriptor_t descriptor = {};
+    status_t res = AudioEffect::getEffectDescriptor(
+            uuid, type, EFFECT_FLAG_TYPE_INSERT, &descriptor);
+    if (res != OK) {
+        ALOGE("addStreamDefaultEffect(): Failed to find effect descriptor matching uuid/type.");
+        return res;
+    }
+
+    // Only insert effects can be added dynamically as stream defaults.
+    if ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_INSERT) {
+        ALOGE("addStreamDefaultEffect(): Desired effect cannot be attached "
+              "as a stream default effect.");
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    // Find the EffectDescVector for the given stream type, or create a new one if necessary.
+    ssize_t index = mOutputStreams.indexOfKey(stream);
+    EffectDescVector *desc = NULL;
+    if (index < 0) {
+        // No effects for this stream type yet.
+        desc = new EffectDescVector();
+        mOutputStreams.add(stream, desc);
+    } else {
+        desc = mOutputStreams.valueAt(index);
+    }
+
+    // Create a new effect and add it to the vector.
+    res = AudioEffect::newEffectUniqueId(id);
+    if (res != OK) {
+        ALOGE("addStreamDefaultEffect(): failed to get new unique id.");
+        return res;
+    }
+    EffectDesc *effect = new EffectDesc(
+            descriptor.name, *type, opPackageName, *uuid, priority, *id);
+    desc->mEffects.add(effect);
+    // TODO(b/71813697): Support setting params as well.
+
+    // TODO(b/71814300): Retroactively attach to any existing streams of the given type.
+    // This requires tracking the stream type of each session id in addition to what is
+    // already being tracked.
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyEffects::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+    if (id == AUDIO_UNIQUE_ID_ALLOCATE) {
+        // ALLOCATE is not a unique identifier, but rather a reserved value indicating
+        // a real id has not been assigned. For default effects, this value is only used
+        // by system-owned defaults from the loaded config, which cannot be removed.
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    // Check each stream type.
+    size_t numStreams = mOutputStreams.size();
+    for (size_t i = 0; i < numStreams; ++i) {
+        // Check each effect for each stream.
+        EffectDescVector* descVector = mOutputStreams[i];
+        for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+            if ((*desc)->mId == id) {
+                // Found it!
+                // TODO(b/71814300): Remove from any streams the effect was attached to.
+                descVector->mEffects.erase(desc);
+                // Handles are unique; there can only be one match, so return early.
+                return NO_ERROR;
+            }
+        }
+    }
+
+    // Effect wasn't found, so it's been trivially removed successfully.
+    return NO_ERROR;
+}
 
 void AudioPolicyEffects::EffectVector::setProcessorEnabled(bool enabled)
 {
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 623180e..69367b1 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -64,7 +64,6 @@
     status_t releaseInputEffects(audio_io_handle_t input,
                                  audio_session_t audioSession);
 
-
     // Return a list of effect descriptors for default output effects
     // associated with audioSession
     status_t queryDefaultOutputSessionEffects(audio_session_t audioSession,
@@ -82,18 +81,49 @@
                              audio_stream_type_t stream,
                              audio_session_t audioSession);
 
+    // Add the effect to the list of default effects for streams of type |stream|.
+    status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                    const String16& opPackageName,
+                                    const effect_uuid_t *uuid,
+                                    int32_t priority,
+                                    audio_usage_t usage,
+                                    audio_unique_id_t* id);
+
+    // Remove the default stream effect from wherever it's attached.
+    status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
 private:
 
     // class to store the description of an effects and its parameters
     // as defined in audio_effects.conf
     class EffectDesc {
     public:
-        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+        EffectDesc(const char *name,
+                   const effect_uuid_t& typeUuid,
+                   const String16& opPackageName,
+                   const effect_uuid_t& uuid,
+                   uint32_t priority,
+                   audio_unique_id_t id) :
                         mName(strdup(name)),
-                        mUuid(uuid) { }
+                        mTypeUuid(typeUuid),
+                        mOpPackageName(opPackageName),
+                        mUuid(uuid),
+                        mPriority(priority),
+                        mId(id) { }
+        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+                        EffectDesc(name,
+                                   *EFFECT_UUID_NULL,
+                                   String16(""),
+                                   uuid,
+                                   0,
+                                   AUDIO_UNIQUE_ID_ALLOCATE) { }
         EffectDesc(const EffectDesc& orig) :
                         mName(strdup(orig.mName)),
-                        mUuid(orig.mUuid) {
+                        mTypeUuid(orig.mTypeUuid),
+                        mOpPackageName(orig.mOpPackageName),
+                        mUuid(orig.mUuid),
+                        mPriority(orig.mPriority),
+                        mId(orig.mId) {
                             // deep copy mParams
                             for (size_t k = 0; k < orig.mParams.size(); k++) {
                                 effect_param_t *origParam = orig.mParams[k];
@@ -116,7 +146,11 @@
             }
         }
         char *mName;
+        effect_uuid_t mTypeUuid;
+        String16 mOpPackageName;
         effect_uuid_t mUuid;
+        int32_t mPriority;
+        audio_unique_id_t mId;
         Vector <effect_param_t *> mParams;
     };
 
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index d2bc40d..3439c9b 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -17,12 +17,12 @@
 #define LOG_TAG "AudioPolicyIntefaceImpl"
 //#define LOG_NDEBUG 0
 
-#include <utils/Log.h>
-#include <media/MediaAnalyticsItem.h>
-
 #include "AudioPolicyService.h"
-#include <mediautils/ServiceUtilities.h>
 #include "TypeConverter.h"
+#include <media/AudioPolicyHelper.h>
+#include <media/MediaAnalyticsItem.h>
+#include <mediautils/ServiceUtilities.h>
+#include <utils/Log.h>
 
 namespace android {
 
@@ -200,7 +200,7 @@
         !modifyPhoneStateAllowed(pid, uid)) {
         // If the app tries to play music through the telephony device and doesn't have permission
         // the fallback to the default output device.
-        mAudioPolicyManager->releaseOutput(*output, *stream, session);
+        mAudioPolicyManager->releaseOutput(*portId);
         flags = originalFlags;
         *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
         *portId = AUDIO_PORT_HANDLE_NONE;
@@ -208,93 +208,116 @@
                                                  config,
                                                  &flags, selectedDeviceId, portId);
     }
+
+    if (result == NO_ERROR) {
+        sp <AudioPlaybackClient> client =
+            new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+        mAudioPlaybackClients.add(*portId, client);
+    }
     return result;
 }
 
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
-                                         audio_stream_type_t stream,
-                                         audio_session_t session)
+status_t AudioPolicyService::startOutput(audio_port_handle_t portId)
 {
-    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
-        return BAD_VALUE;
-    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     ALOGV("startOutput()");
+    sp<AudioPlaybackClient> client;
     sp<AudioPolicyEffects>audioPolicyEffects;
     {
         Mutex::Autolock _l(mLock);
+        const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+        if (index < 0) {
+            ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+            return INVALID_OPERATION;
+        }
+        client = mAudioPlaybackClients.valueAt(index);
         audioPolicyEffects = mAudioPolicyEffects;
     }
     if (audioPolicyEffects != 0) {
         // create audio processors according to stream
-        status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
+        status_t status = audioPolicyEffects->addOutputSessionEffects(
+            client->io, client->stream, client->session);
         if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to add effects on session %d", session);
+            ALOGW("Failed to add effects on session %d", client->session);
         }
     }
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return mAudioPolicyManager->startOutput(output, stream, session);
+    status_t status = mAudioPolicyManager->startOutput(portId);
+    if (status == NO_ERROR) {
+        client->active = true;
+    }
+    return status;
 }
 
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
-                                        audio_stream_type_t stream,
-                                        audio_session_t session)
+status_t AudioPolicyService::stopOutput(audio_port_handle_t portId)
 {
-    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
-        return BAD_VALUE;
-    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     ALOGV("stopOutput()");
-    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    mOutputCommandThread->stopOutputCommand(portId);
     return NO_ERROR;
 }
 
-status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
-                                      audio_stream_type_t stream,
-                                      audio_session_t session)
+status_t  AudioPolicyService::doStopOutput(audio_port_handle_t portId)
 {
-    ALOGV("doStopOutput from tid %d", gettid());
+    ALOGV("doStopOutput");
+    sp<AudioPlaybackClient> client;
     sp<AudioPolicyEffects>audioPolicyEffects;
     {
         Mutex::Autolock _l(mLock);
+
+        const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+        if (index < 0) {
+            ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+            return INVALID_OPERATION;
+        }
+        client = mAudioPlaybackClients.valueAt(index);
         audioPolicyEffects = mAudioPolicyEffects;
     }
     if (audioPolicyEffects != 0) {
         // release audio processors from the stream
-        status_t status = audioPolicyEffects->releaseOutputSessionEffects(output, stream, session);
+        status_t status = audioPolicyEffects->releaseOutputSessionEffects(
+            client->io, client->stream, client->session);
         if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("Failed to release effects on session %d", session);
+            ALOGW("Failed to release effects on session %d", client->session);
         }
     }
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return mAudioPolicyManager->stopOutput(output, stream, session);
+    status_t status = mAudioPolicyManager->stopOutput(portId);
+    if (status == NO_ERROR) {
+        client->active = false;
+    }
+    return status;
 }
 
-void AudioPolicyService::releaseOutput(audio_io_handle_t output,
-                                       audio_stream_type_t stream,
-                                       audio_session_t session)
+void AudioPolicyService::releaseOutput(audio_port_handle_t portId)
 {
     if (mAudioPolicyManager == NULL) {
         return;
     }
     ALOGV("releaseOutput()");
-    mOutputCommandThread->releaseOutputCommand(output, stream, session);
+    mOutputCommandThread->releaseOutputCommand(portId);
 }
 
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
-                                         audio_stream_type_t stream,
-                                         audio_session_t session)
+void AudioPolicyService::doReleaseOutput(audio_port_handle_t portId)
 {
     ALOGV("doReleaseOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
+    const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+    if (index < 0) {
+        ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+        return;
+    }
+    sp<AudioPlaybackClient> client = mAudioPlaybackClients.valueAt(index);
+    mAudioRecordClients.removeItem(portId);
+
     // called from internal thread: no need to clear caller identity
-    mAudioPolicyManager->releaseOutput(output, stream, session);
+    mAudioPolicyManager->releaseOutput(portId);
 }
 
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
@@ -398,17 +421,13 @@
         if (status != NO_ERROR) {
             if (status == PERMISSION_DENIED) {
                 AutoCallerClear acc;
-                mAudioPolicyManager->releaseInput(*input, session);
+                mAudioPolicyManager->releaseInput(*portId);
             }
             return status;
         }
 
-        sp<AudioRecordClient> client =
-                new AudioRecordClient(*attr, *input, uid, pid, opPackageName, session);
-        client->active = false;
-        client->isConcurrent = false;
-        client->isVirtualDevice = false; //TODO : update from APM->getInputForAttr()
-        client->deviceId = *selectedDeviceId;
+        sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
+                                                             *selectedDeviceId, opPackageName);
         mAudioRecordClients.add(*portId, client);
     }
 
@@ -496,8 +515,7 @@
     status_t status;
     {
         AutoCallerClear acc;
-        status = mAudioPolicyManager->startInput(
-                    client->input, client->session, *silenced, &concurrency);
+        status = mAudioPolicyManager->startInput(portId, *silenced, &concurrency);
 
     }
 
@@ -610,7 +628,7 @@
     // finish the recording app op
     finishRecording(client->opPackageName, client->uid);
     AutoCallerClear acc;
-    return mAudioPolicyManager->stopInput(client->input, client->session);
+    return mAudioPolicyManager->stopInput(portId);
 }
 
 void AudioPolicyService::releaseInput(audio_port_handle_t portId)
@@ -635,15 +653,15 @@
     }
     if (audioPolicyEffects != 0) {
         // release audio processors from the input
-        status_t status = audioPolicyEffects->releaseInputEffects(client->input, client->session);
+        status_t status = audioPolicyEffects->releaseInputEffects(client->io, client->session);
         if(status != NO_ERROR) {
-            ALOGW("Failed to release effects on input %d", client->input);
+            ALOGW("Failed to release effects on input %d", client->io);
         }
     }
     {
         Mutex::Autolock _l(mLock);
         AutoCallerClear acc;
-        mAudioPolicyManager->releaseInput(client->input, client->session);
+        mAudioPolicyManager->releaseInput(portId);
     }
 }
 
@@ -832,6 +850,50 @@
             (audio_session_t)audioSession, descriptors, count);
 }
 
+status_t AudioPolicyService::addStreamDefaultEffect(const effect_uuid_t *type,
+                                                    const String16& opPackageName,
+                                                    const effect_uuid_t *uuid,
+                                                    int32_t priority,
+                                                    audio_usage_t usage,
+                                                    audio_unique_id_t* id)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!modifyDefaultAudioEffectsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    sp<AudioPolicyEffects>audioPolicyEffects;
+    {
+        Mutex::Autolock _l(mLock);
+        audioPolicyEffects = mAudioPolicyEffects;
+    }
+    if (audioPolicyEffects == 0) {
+        return NO_INIT;
+    }
+    return audioPolicyEffects->addStreamDefaultEffect(
+            type, opPackageName, uuid, priority, usage, id);
+}
+
+status_t AudioPolicyService::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!modifyDefaultAudioEffectsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    sp<AudioPolicyEffects>audioPolicyEffects;
+    {
+        Mutex::Autolock _l(mLock);
+        audioPolicyEffects = mAudioPolicyEffects;
+    }
+    if (audioPolicyEffects == 0) {
+        return NO_INIT;
+    }
+    return audioPolicyEffects->removeStreamDefaultEffect(id);
+}
+
 bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
 {
     if (mAudioPolicyManager == NULL) {
@@ -963,26 +1025,26 @@
 }
 
 status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
-                                  const audio_attributes_t *attributes,
-                                  audio_patch_handle_t *handle)
+                                              const audio_attributes_t *attributes,
+                                              audio_port_handle_t *portId)
 {
     Mutex::Autolock _l(mLock);
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     AutoCallerClear acc;
-    return mAudioPolicyManager->startAudioSource(source, attributes, handle,
+    return mAudioPolicyManager->startAudioSource(source, attributes, portId,
                                                  IPCThreadState::self()->getCallingUid());
 }
 
-status_t AudioPolicyService::stopAudioSource(audio_patch_handle_t handle)
+status_t AudioPolicyService::stopAudioSource(audio_port_handle_t portId)
 {
     Mutex::Autolock _l(mLock);
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     AutoCallerClear acc;
-    return mAudioPolicyManager->stopAudioSource(handle);
+    return mAudioPolicyManager->stopAudioSource(portId);
 }
 
 status_t AudioPolicyService::setMasterMono(bool mono)
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 1379223..8bca221 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -67,8 +67,6 @@
     {
         Mutex::Autolock _l(mLock);
 
-        // start tone playback thread
-        mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
         // start audio commands thread
         mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
         // start output activity command thread
@@ -90,7 +88,6 @@
 
 AudioPolicyService::~AudioPolicyService()
 {
-    mTonePlaybackThread->exit();
     mAudioCommandThread->exit();
     mOutputCommandThread->exit();
 
@@ -115,13 +112,17 @@
     Mutex::Autolock _l(mNotificationClientsLock);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
-    if (mNotificationClients.indexOfKey(uid) < 0) {
+    pid_t pid = IPCThreadState::self()->getCallingPid();
+    int64_t token = ((int64_t)uid<<32) | pid;
+
+    if (mNotificationClients.indexOfKey(token) < 0) {
         sp<NotificationClient> notificationClient = new NotificationClient(this,
                                                                            client,
-                                                                           uid);
-        ALOGV("registerClient() client %p, uid %d", client.get(), uid);
+                                                                           uid,
+                                                                           pid);
+        ALOGV("registerClient() client %p, uid %d pid %d", client.get(), uid, pid);
 
-        mNotificationClients.add(uid, notificationClient);
+        mNotificationClients.add(token, notificationClient);
 
         sp<IBinder> binder = IInterface::asBinder(client);
         binder->linkToDeath(notificationClient);
@@ -133,22 +134,33 @@
     Mutex::Autolock _l(mNotificationClientsLock);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
-    if (mNotificationClients.indexOfKey(uid) < 0) {
+    pid_t pid = IPCThreadState::self()->getCallingPid();
+    int64_t token = ((int64_t)uid<<32) | pid;
+
+    if (mNotificationClients.indexOfKey(token) < 0) {
         return;
     }
-    mNotificationClients.valueFor(uid)->setAudioPortCallbacksEnabled(enabled);
+    mNotificationClients.valueFor(token)->setAudioPortCallbacksEnabled(enabled);
 }
 
 // removeNotificationClient() is called when the client process dies.
-void AudioPolicyService::removeNotificationClient(uid_t uid)
+void AudioPolicyService::removeNotificationClient(uid_t uid, pid_t pid)
 {
     {
         Mutex::Autolock _l(mNotificationClientsLock);
-        mNotificationClients.removeItem(uid);
+        int64_t token = ((int64_t)uid<<32) | pid;
+        mNotificationClients.removeItem(token);
     }
     {
         Mutex::Autolock _l(mLock);
-        if (mAudioPolicyManager) {
+        bool hasSameUid = false;
+        for (size_t i = 0; i < mNotificationClients.size(); i++) {
+            if (mNotificationClients.valueAt(i)->uid() == uid) {
+                hasSameUid = true;
+                break;
+            }
+        }
+        if (mAudioPolicyManager && !hasSameUid) {
             // called from binder death notification: no need to clear caller identity
             mAudioPolicyManager->releaseResourcesForUid(uid);
         }
@@ -236,8 +248,9 @@
 
 AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
                                                      const sp<IAudioPolicyServiceClient>& client,
-                                                     uid_t uid)
-    : mService(service), mUid(uid), mAudioPolicyServiceClient(client),
+                                                     uid_t uid,
+                                                     pid_t pid)
+    : mService(service), mUid(uid), mPid(pid), mAudioPolicyServiceClient(client),
       mAudioPortCallbacksEnabled(false)
 {
 }
@@ -251,7 +264,7 @@
     sp<NotificationClient> keep(this);
     sp<AudioPolicyService> service = mService.promote();
     if (service != 0) {
-        service->removeNotificationClient(mUid);
+        service->removeNotificationClient(mUid, mPid);
     }
 }
 
@@ -322,8 +335,6 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
     result.append(buffer);
-    snprintf(buffer, SIZE, "Tones Thread: %p\n", mTonePlaybackThread.get());
-    result.append(buffer);
 
     write(fd, result.string(), result.size());
     return NO_ERROR;
@@ -359,9 +370,6 @@
         if (mAudioCommandThread != 0) {
             mAudioCommandThread->dump(fd);
         }
-        if (mTonePlaybackThread != 0) {
-            mTonePlaybackThread->dump(fd);
-        }
 
         if (mAudioPolicyManager) {
             mAudioPolicyManager->dump(fd);
@@ -632,7 +640,6 @@
                                                            const wp<AudioPolicyService>& service)
     : Thread(false), mName(name), mService(service)
 {
-    mpToneGenerator = NULL;
 }
 
 
@@ -642,7 +649,6 @@
         release_wake_lock(mName.string());
     }
     mAudioCommands.clear();
-    delete mpToneGenerator;
 }
 
 void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -667,26 +673,6 @@
                 mLastCommand = command;
 
                 switch (command->mCommand) {
-                case START_TONE: {
-                    mLock.unlock();
-                    ToneData *data = (ToneData *)command->mParam.get();
-                    ALOGV("AudioCommandThread() processing start tone %d on stream %d",
-                            data->mType, data->mStream);
-                    delete mpToneGenerator;
-                    mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
-                    mpToneGenerator->startTone(data->mType);
-                    mLock.lock();
-                    }break;
-                case STOP_TONE: {
-                    mLock.unlock();
-                    ALOGV("AudioCommandThread() processing stop tone");
-                    if (mpToneGenerator != NULL) {
-                        mpToneGenerator->stopTone();
-                        delete mpToneGenerator;
-                        mpToneGenerator = NULL;
-                    }
-                    mLock.lock();
-                    }break;
                 case SET_VOLUME: {
                     VolumeData *data = (VolumeData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set volume stream %d, \
@@ -709,26 +695,26 @@
                     }break;
                 case STOP_OUTPUT: {
                     StopOutputData *data = (StopOutputData *)command->mParam.get();
-                    ALOGV("AudioCommandThread() processing stop output %d",
-                            data->mIO);
+                    ALOGV("AudioCommandThread() processing stop output portId %d",
+                            data->mPortId);
                     svc = mService.promote();
                     if (svc == 0) {
                         break;
                     }
                     mLock.unlock();
-                    svc->doStopOutput(data->mIO, data->mStream, data->mSession);
+                    svc->doStopOutput(data->mPortId);
                     mLock.lock();
                     }break;
                 case RELEASE_OUTPUT: {
                     ReleaseOutputData *data = (ReleaseOutputData *)command->mParam.get();
-                    ALOGV("AudioCommandThread() processing release output %d",
-                            data->mIO);
+                    ALOGV("AudioCommandThread() processing release output portId %d",
+                            data->mPortId);
                     svc = mService.promote();
                     if (svc == 0) {
                         break;
                     }
                     mLock.unlock();
-                    svc->doReleaseOutput(data->mIO, data->mStream, data->mSession);
+                    svc->doReleaseOutput(data->mPortId);
                     mLock.lock();
                     }break;
                 case CREATE_AUDIO_PATCH: {
@@ -893,27 +879,6 @@
     return NO_ERROR;
 }
 
-void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type,
-        audio_stream_type_t stream)
-{
-    sp<AudioCommand> command = new AudioCommand();
-    command->mCommand = START_TONE;
-    sp<ToneData> data = new ToneData();
-    data->mType = type;
-    data->mStream = stream;
-    command->mParam = data;
-    ALOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream);
-    sendCommand(command);
-}
-
-void AudioPolicyService::AudioCommandThread::stopToneCommand()
-{
-    sp<AudioCommand> command = new AudioCommand();
-    command->mCommand = STOP_TONE;
-    ALOGV("AudioCommandThread() adding tone stop");
-    sendCommand(command);
-}
-
 status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
                                                                float volume,
                                                                audio_io_handle_t output,
@@ -960,33 +925,25 @@
     return sendCommand(command, delayMs);
 }
 
-void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
-                                                               audio_stream_type_t stream,
-                                                               audio_session_t session)
+void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_port_handle_t portId)
 {
     sp<AudioCommand> command = new AudioCommand();
     command->mCommand = STOP_OUTPUT;
     sp<StopOutputData> data = new StopOutputData();
-    data->mIO = output;
-    data->mStream = stream;
-    data->mSession = session;
+    data->mPortId = portId;
     command->mParam = data;
-    ALOGV("AudioCommandThread() adding stop output %d", output);
+    ALOGV("AudioCommandThread() adding stop output portId %d", portId);
     sendCommand(command);
 }
 
-void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output,
-                                                                  audio_stream_type_t stream,
-                                                                  audio_session_t session)
+void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_port_handle_t portId)
 {
     sp<AudioCommand> command = new AudioCommand();
     command->mCommand = RELEASE_OUTPUT;
     sp<ReleaseOutputData> data = new ReleaseOutputData();
-    data->mIO = output;
-    data->mStream = stream;
-    data->mSession = session;
+    data->mPortId = portId;
     command->mParam = data;
-    ALOGV("AudioCommandThread() adding release output %d", output);
+    ALOGV("AudioCommandThread() adding release output portId %d", portId);
     sendCommand(command);
 }
 
@@ -1250,8 +1207,6 @@
 
         } break;
 
-        case START_TONE:
-        case STOP_TONE:
         default:
             break;
         }
@@ -1324,27 +1279,6 @@
                                                    output, delayMs);
 }
 
-int AudioPolicyService::startTone(audio_policy_tone_t tone,
-                                  audio_stream_type_t stream)
-{
-    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
-        ALOGE("startTone: illegal tone requested (%d)", tone);
-    }
-    if (stream != AUDIO_STREAM_VOICE_CALL) {
-        ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
-            tone);
-    }
-    mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
-                                          AUDIO_STREAM_VOICE_CALL);
-    return 0;
-}
-
-int AudioPolicyService::stopTone()
-{
-    mTonePlaybackThread->stopToneCommand();
-    return 0;
-}
-
 int AudioPolicyService::setVoiceVolume(float volume, int delayMs)
 {
     return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
@@ -1400,9 +1334,6 @@
 int aps_set_stream_volume(void *service, audio_stream_type_t stream,
                                      float volume, audio_io_handle_t output,
                                      int delay_ms);
-int aps_start_tone(void *service, audio_policy_tone_t tone,
-                              audio_stream_type_t stream);
-int aps_stop_tone(void *service);
 int aps_set_voice_volume(void *service, float volume, int delay_ms);
 };
 
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 09375f1..44c0347 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -81,15 +81,9 @@
                                       audio_output_flags_t flags,
                                       audio_port_handle_t *selectedDeviceId,
                                       audio_port_handle_t *portId);
-    virtual status_t startOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session);
-    virtual status_t stopOutput(audio_io_handle_t output,
-                                audio_stream_type_t stream,
-                                audio_session_t session);
-    virtual void releaseOutput(audio_io_handle_t output,
-                               audio_stream_type_t stream,
-                               audio_session_t session);
+    virtual status_t startOutput(audio_port_handle_t portId);
+    virtual status_t stopOutput(audio_port_handle_t portId);
+    virtual void releaseOutput(audio_port_handle_t portId);
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
@@ -132,6 +126,14 @@
     virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count);
+    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+                                            const String16& opPackageName,
+                                            const effect_uuid_t *uuid,
+                                            int32_t priority,
+                                            audio_usage_t usage,
+                                            audio_unique_id_t* id);
+    virtual status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,
@@ -157,8 +159,6 @@
                                      float volume,
                                      audio_io_handle_t output,
                                      int delayMs = 0);
-    virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
-    virtual status_t stopTone();
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
     virtual bool isOffloadSupported(const audio_offload_info_t &config);
 
@@ -192,8 +192,8 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_patch_handle_t *handle);
-    virtual status_t stopAudioSource(audio_patch_handle_t handle);
+                                      audio_port_handle_t *portId);
+    virtual status_t stopAudioSource(audio_port_handle_t portId);
 
     virtual status_t setMasterMono(bool mono);
     virtual status_t getMasterMono(bool *mono);
@@ -207,12 +207,8 @@
                                         bool reported);
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
 
-            status_t doStopOutput(audio_io_handle_t output,
-                                  audio_stream_type_t stream,
-                                  audio_session_t session);
-            void doReleaseOutput(audio_io_handle_t output,
-                                 audio_stream_type_t stream,
-                                 audio_session_t session);
+            status_t doStopOutput(audio_port_handle_t portId);
+            void doReleaseOutput(audio_port_handle_t portId);
 
             status_t clientCreateAudioPatch(const struct audio_patch *patch,
                                       audio_patch_handle_t *handle,
@@ -222,7 +218,7 @@
             virtual status_t clientSetAudioPortConfig(const struct audio_port_config *config,
                                                       int delayMs);
 
-            void removeNotificationClient(uid_t uid);
+            void removeNotificationClient(uid_t uid, pid_t pid);
             void onAudioPortListUpdate();
             void doOnAudioPortListUpdate();
             void onAudioPatchListUpdate();
@@ -304,10 +300,7 @@
         std::unordered_map<uid_t, bool> mCachedUids;
     };
 
-    // Thread used for tone playback and to send audio config commands to audio flinger
-    // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
-    // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
-    // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+    // Thread used to send audio config commands to audio flinger
     // For audio config commands, it is necessary because audio flinger requires that the calling
     // process (user) has permission to modify audio settings.
     class AudioCommandThread : public Thread {
@@ -316,8 +309,6 @@
 
         // commands for tone AudioCommand
         enum {
-            START_TONE,
-            STOP_TONE,
             SET_VOLUME,
             SET_PARAMETERS,
             SET_VOICE_VOLUME,
@@ -342,20 +333,13 @@
         virtual     bool        threadLoop();
 
                     void        exit();
-                    void        startToneCommand(ToneGenerator::tone_type type,
-                                                 audio_stream_type_t stream);
-                    void        stopToneCommand();
                     status_t    volumeCommand(audio_stream_type_t stream, float volume,
                                             audio_io_handle_t output, int delayMs = 0);
                     status_t    parametersCommand(audio_io_handle_t ioHandle,
                                             const char *keyValuePairs, int delayMs = 0);
                     status_t    voiceVolumeCommand(float volume, int delayMs = 0);
-                    void        stopOutputCommand(audio_io_handle_t output,
-                                                  audio_stream_type_t stream,
-                                                  audio_session_t session);
-                    void        releaseOutputCommand(audio_io_handle_t output,
-                                                     audio_stream_type_t stream,
-                                                     audio_session_t session);
+                    void        stopOutputCommand(audio_port_handle_t portId);
+                    void        releaseOutputCommand(audio_port_handle_t portId);
                     status_t    sendCommand(sp<AudioCommand>& command, int delayMs = 0);
                     void        insertCommand_l(sp<AudioCommand>& command, int delayMs = 0);
                     status_t    createAudioPatchCommand(const struct audio_patch *patch,
@@ -387,7 +371,7 @@
 
             void dump(char* buffer, size_t size);
 
-            int mCommand;   // START_TONE, STOP_TONE ...
+            int mCommand;   // SET_VOLUME, SET_PARAMETERS...
             nsecs_t mTime;  // time stamp
             Mutex mLock;    // mutex associated to mCond
             Condition mCond; // condition for status return
@@ -403,12 +387,6 @@
             AudioCommandData() {}
         };
 
-        class ToneData : public AudioCommandData {
-        public:
-            ToneGenerator::tone_type mType; // tone type (START_TONE only)
-            audio_stream_type_t mStream;    // stream type (START_TONE only)
-        };
-
         class VolumeData : public AudioCommandData {
         public:
             audio_stream_type_t mStream;
@@ -429,16 +407,12 @@
 
         class StopOutputData : public AudioCommandData {
         public:
-            audio_io_handle_t mIO;
-            audio_stream_type_t mStream;
-            audio_session_t mSession;
+            audio_port_handle_t mPortId;
         };
 
         class ReleaseOutputData : public AudioCommandData {
         public:
-            audio_io_handle_t mIO;
-            audio_stream_type_t mStream;
-            audio_session_t mSession;
+            audio_port_handle_t mPortId;
         };
 
         class CreateAudioPatchData : public AudioCommandData {
@@ -475,7 +449,6 @@
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
-        ToneGenerator *mpToneGenerator;     // the tone generator
         sp<AudioCommand> mLastCommand;      // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
         wp<AudioPolicyService> mService;
@@ -550,11 +523,6 @@
         // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
         virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
 
-        // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
-        // over a telephony device during a phone call.
-        virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
-        virtual status_t stopTone();
-
         // set down link audio volume.
         virtual status_t setVoiceVolume(float volume, int delayMs = 0);
 
@@ -594,7 +562,7 @@
     public:
                             NotificationClient(const sp<AudioPolicyService>& service,
                                                 const sp<IAudioPolicyServiceClient>& client,
-                                                uid_t uid);
+                                                uid_t uid, pid_t pid);
         virtual             ~NotificationClient();
 
                             void      onAudioPortListUpdate();
@@ -607,6 +575,10 @@
                                         audio_patch_handle_t patchHandle);
                             void      setAudioPortCallbacksEnabled(bool enabled);
 
+                            uid_t uid() {
+                                return mUid;
+                            }
+
                 // IBinder::DeathRecipient
                 virtual     void        binderDied(const wp<IBinder>& who);
 
@@ -616,34 +588,61 @@
 
         const wp<AudioPolicyService>        mService;
         const uid_t                         mUid;
+        const pid_t                         mPid;
         const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
               bool                          mAudioPortCallbacksEnabled;
     };
 
+    class AudioClient : public virtual RefBase {
+    public:
+                AudioClient(const audio_attributes_t attributes,
+                            const audio_io_handle_t io, uid_t uid, pid_t pid,
+                            const audio_session_t session, const audio_port_handle_t deviceId) :
+                                attributes(attributes), io(io), uid(uid), pid(pid),
+                                session(session), deviceId(deviceId), active(false) {}
+                ~AudioClient() override = default;
+
+
+        const audio_attributes_t attributes; // source, flags ...
+        const audio_io_handle_t io;          // audio HAL stream IO handle
+        const uid_t uid;                     // client UID
+        const pid_t pid;                     // client PID
+        const audio_session_t session;       // audio session ID
+        const audio_port_handle_t deviceId;  // selected input device port ID
+              bool active;                   // Playback/Capture is active or inactive
+    };
+
     // --- AudioRecordClient ---
     // Information about each registered AudioRecord client
     // (between calls to getInputForAttr() and releaseInput())
-    class AudioRecordClient : public RefBase {
+    class AudioRecordClient : public AudioClient {
     public:
                 AudioRecordClient(const audio_attributes_t attributes,
-                                  const audio_io_handle_t input, uid_t uid, pid_t pid,
-                                  const String16& opPackageName, const audio_session_t session) :
-                                      attributes(attributes),
-                                      input(input), uid(uid), pid(pid),
-                                      opPackageName(opPackageName), session(session),
-                                      active(false), isConcurrent(false), isVirtualDevice(false) {}
-        virtual ~AudioRecordClient() {}
+                          const audio_io_handle_t io, uid_t uid, pid_t pid,
+                          const audio_session_t session, const audio_port_handle_t deviceId,
+                          const String16& opPackageName) :
+                    AudioClient(attributes, io, uid, pid, session, deviceId),
+                    opPackageName(opPackageName), isConcurrent(false), isVirtualDevice(false) {}
+                ~AudioRecordClient() override = default;
 
-        const audio_attributes_t attributes; // source, flags ...
-        const audio_io_handle_t input;       // audio HAL input IO handle
-        const uid_t uid;                     // client UID
-        const pid_t pid;                     // client PID
         const String16 opPackageName;        // client package name
-        const audio_session_t session;       // audio session ID
-        bool active;                   // Capture is active or inactive
         bool isConcurrent;             // is allowed to concurrent capture
         bool isVirtualDevice;          // uses virtual device: updated by APM::getInputForAttr()
-        audio_port_handle_t deviceId;  // selected input device port ID
+    };
+
+    // --- AudioPlaybackClient ---
+    // Information about each registered AudioTrack client
+    // (between calls to getOutputForAttr() and releaseOutput())
+    class AudioPlaybackClient : public AudioClient {
+    public:
+                AudioPlaybackClient(const audio_attributes_t attributes,
+                      const audio_io_handle_t io, uid_t uid, pid_t pid,
+                            const audio_session_t session, audio_port_handle_t deviceId,
+                            audio_stream_type_t stream) :
+                    AudioClient(attributes, io, uid, pid, session, deviceId), stream(stream) {}
+                ~AudioPlaybackClient() override = default;
+
+        const audio_stream_type_t stream;
     };
 
     // A class automatically clearing and restoring binder caller identity inside
@@ -673,14 +672,13 @@
     // mLock protects AudioPolicyManager methods that can call into audio flinger
     // and possibly back in to audio policy service and acquire mEffectsLock.
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
-    sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
     AudioPolicyInterface *mAudioPolicyManager;
     AudioPolicyClient *mAudioPolicyClient;
 
-    DefaultKeyedVector< uid_t, sp<NotificationClient> >    mNotificationClients;
+    DefaultKeyedVector< int64_t, sp<NotificationClient> >    mNotificationClients;
     Mutex mNotificationClientsLock;  // protects mNotificationClients
     // Manage all effects configured in audio_effects.conf
     sp<AudioPolicyEffects> mAudioPolicyEffects;
@@ -688,6 +686,7 @@
 
     sp<UidPolicy> mUidPolicy;
     DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> >   mAudioRecordClients;
+    DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> >   mAudioPlaybackClients;
 };
 
 } // namespace android
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index cfa9ab1..b739b88 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -29,3 +29,26 @@
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 include $(BUILD_NATIVE_TEST)
+
+# system/audio.h utilities test
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+  libbase \
+  liblog \
+  libmedia_helper \
+  libutils
+
+LOCAL_SRC_FILES := \
+  systemaudio_tests.cpp \
+
+LOCAL_MODULE := systemaudio_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index eb8222c..2ff7675 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -60,9 +60,6 @@
                        int /*delayMs*/) override { }
     String8 getParameters(audio_io_handle_t /*ioHandle*/,
                           const String8& /*keys*/) override { return String8(); }
-    status_t startTone(audio_policy_tone_t /*tone*/,
-                       audio_stream_type_t /*stream*/) override { return NO_INIT; }
-    status_t stopTone() override { return NO_INIT; }
     status_t setVoiceVolume(float /*volume*/, int /*delayMs*/) override { return NO_INIT; }
     status_t moveEffects(audio_session_t /*session*/,
                          audio_io_handle_t /*srcOutput*/,
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
new file mode 100644
index 0000000..abaae52
--- /dev/null
+++ b/services/audiopolicy/tests/systemaudio_tests.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#define LOG_TAG "SysAudio_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+#include <system/audio.h>
+
+using namespace android;
+
+TEST(SystemAudioTest, PatchInvalid) {
+    audio_patch patch{};
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
+    patch.num_sinks = 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = 1;
+    patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+    patch.num_sources = 0;
+    patch.num_sinks = 1;
+    ASSERT_FALSE(audio_patch_is_valid(&patch));
+}
+
+TEST(SystemAudioTest, PatchValid) {
+    const audio_port_config src = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    // It's OK not to have sinks.
+    ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).patch()));
+    const audio_port_config sink = {
+        .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSink(sink).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patch_is_valid(
+                    (PatchBuilder{}).addSource(src).addSource(src).
+                    addSink(sink).addSink(sink).patch()));
+}
+
+TEST(SystemAudioTest, PatchHwAvSync) {
+    audio_port_config device_src_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+    device_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+    device_src_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+
+    audio_port_config device_sink_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+    device_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+    device_sink_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+
+    audio_port_config mix_sink_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_MIX };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+    mix_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+    mix_sink_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+
+    audio_port_config mix_src_cfg = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_MIX };
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+    mix_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+    mix_src_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+}
+
+TEST(SystemAudioTest, PatchEqual) {
+    const audio_patch patch1{}, patch2{};
+    // Invalid patches are not equal.
+    ASSERT_FALSE(audio_patches_are_equal(&patch1, &patch2));
+    const audio_port_config src = {
+        .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+    const audio_port_config sink = {
+        .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_TRUE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+    audio_port_config sink_hw_av_sync = sink;
+    sink_hw_av_sync.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+    sink_hw_av_sync.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+    ASSERT_FALSE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+    ASSERT_TRUE(audio_patches_are_equal(
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
+                    (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index d59b313..c8b3c2f 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -780,33 +780,35 @@
     int lastJpegStreamId = mJpegProcessor->getStreamId();
     // If jpeg stream will slow down preview, make sure we remove it before starting preview
     if (params.slowJpegMode) {
-        // Pause preview if we are streaming
-        int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
-        if (activeRequestId != 0) {
-            res = mStreamingProcessor->togglePauseStream(/*pause*/true);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
+        if (lastJpegStreamId != NO_STREAM) {
+            // Pause preview if we are streaming
+            int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
+            if (activeRequestId != 0) {
+                res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+                            __FUNCTION__, mCameraId, strerror(-res), res);
+                }
+                res = mDevice->waitUntilDrained();
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                            __FUNCTION__, mCameraId, strerror(-res), res);
+                }
             }
-            res = mDevice->waitUntilDrained();
+
+            res = mJpegProcessor->deleteStream();
+
             if (res != OK) {
-                ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
+                ALOGE("%s: Camera %d: delete Jpeg stream failed: %s (%d)",
+                        __FUNCTION__, mCameraId,  strerror(-res), res);
             }
-        }
 
-        res = mJpegProcessor->deleteStream();
-
-        if (res != OK) {
-            ALOGE("%s: Camera %d: delete Jpeg stream failed: %s (%d)",
-                    __FUNCTION__, mCameraId,  strerror(-res), res);
-        }
-
-        if (activeRequestId != 0) {
-            res = mStreamingProcessor->togglePauseStream(/*pause*/false);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
+            if (activeRequestId != 0) {
+                res = mStreamingProcessor->togglePauseStream(/*pause*/false);
+                if (res != OK) {
+                    ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
+                            __FUNCTION__, mCameraId, strerror(-res), res);
+                }
             }
         }
     } else {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 98d0534..84428c2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -661,7 +661,8 @@
         }
 
         sp<Surface> surface;
-        res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer);
+        res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
+                physicalCameraId);
 
         if (!res.isOk())
             return res;
@@ -889,6 +890,8 @@
 
     const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
             outputConfiguration.getGraphicBufferProducers();
+    String8 physicalCameraId(outputConfiguration.getPhysicalCameraId());
+
     auto producerCount = bufferProducers.size();
     if (producerCount == 0) {
         ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -942,7 +945,7 @@
         OutputStreamInfo outInfo;
         sp<Surface> surface;
         res = createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false, surface,
-                newOutputsMap.valueAt(i));
+                newOutputsMap.valueAt(i), physicalCameraId);
         if (!res.isOk())
             return res;
 
@@ -1021,7 +1024,8 @@
 
 binder::Status CameraDeviceClient::createSurfaceFromGbp(
         OutputStreamInfo& streamInfo, bool isStreamInfoValid,
-        sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp) {
+        sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+        const String8& physicalId) {
 
     // bufferProducer must be non-null
     if (gbp == nullptr) {
@@ -1098,7 +1102,7 @@
     // Round dimensions to the nearest dimensions available for this format
     if (flexibleConsumer && isPublicFormat(format) &&
             !CameraDeviceClient::roundBufferDimensionNearest(width, height,
-            format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+            format, dataSpace, mDevice->info(physicalId), /*out*/&width, /*out*/&height)) {
         String8 msg = String8::format("Camera %s: No supported stream configurations with "
                 "format %#x defined, failed to create output stream",
                 mCameraIdStr.string(), format);
@@ -1468,6 +1472,7 @@
 
     const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
             outputConfiguration.getGraphicBufferProducers();
+    String8 physicalId(outputConfiguration.getPhysicalCameraId());
 
     if (bufferProducers.size() == 0) {
         ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -1521,7 +1526,7 @@
 
         sp<Surface> surface;
         res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
-                surface, bufferProducer);
+                surface, bufferProducer, physicalId);
 
         if (!res.isOk())
             return res;
@@ -1677,9 +1682,12 @@
 
     // WORKAROUND: HAL refuses to disconnect while there's streams in flight
     {
-        mDevice->clearStreamingRequest();
-
+        int64_t lastFrameNumber;
         status_t code;
+        if ((code = mDevice->flush(&lastFrameNumber)) != OK) {
+            ALOGE("%s: flush failed with code 0x%x", __FUNCTION__, code);
+        }
+
         if ((code = mDevice->waitUntilDrained()) != OK) {
             ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
                   code);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 5aaf5aa..c30561d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -258,7 +258,8 @@
     // Create a Surface from an IGraphicBufferProducer. Returns error if
     // IGraphicBufferProducer's property doesn't match with streamInfo
     binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
-            sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp);
+            sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+            const String8& physicalCameraId);
 
 
     // Utility method to insert the surface into SurfaceMap
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 0ba7403..98c1b5e 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -69,6 +69,10 @@
      * The device's static characteristics metadata buffer
      */
     virtual const CameraMetadata& info() const = 0;
+    /**
+     * The physical camera device's static characteristics metadata buffer
+     */
+    virtual const CameraMetadata& info(const String8& physicalId) const = 0;
 
     struct PhysicalCameraSettings {
         std::string cameraId;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 43f1a91..3be6399 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -336,6 +336,7 @@
         const hardware::hidl_string& /*fqName*/,
         const hardware::hidl_string& name,
         bool /*preexisting*/) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
     {
         std::lock_guard<std::mutex> lock(mInterfaceMutex);
 
@@ -458,6 +459,7 @@
 }
 
 status_t CameraProviderManager::removeProvider(const std::string& provider) {
+    std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
     std::unique_lock<std::mutex> lock(mInterfaceMutex);
     std::vector<String8> removedDeviceIds;
     status_t res = NAME_NOT_FOUND;
@@ -631,7 +633,12 @@
 
     mUniqueCameraIds.insert(id);
     if (isAPI1Compatible) {
-        mUniqueAPI1CompatibleCameraIds.push_back(id);
+        // addDevice can be called more than once for the same camera id if HAL
+        // supports openLegacy.
+        if (std::find(mUniqueAPI1CompatibleCameraIds.begin(), mUniqueAPI1CompatibleCameraIds.end(),
+                id) == mUniqueAPI1CompatibleCameraIds.end()) {
+            mUniqueAPI1CompatibleCameraIds.push_back(id);
+        }
     }
 
     if (parsedId != nullptr) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index b8b8b8c..c523c2d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -246,6 +246,9 @@
     wp<StatusListener> mListener;
     ServiceInteractionProxy* mServiceProxy;
 
+    // mProviderLifecycleLock is locked during onRegistration and removeProvider
+    mutable std::mutex mProviderLifecycleLock;
+
     static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
 
     struct ProviderInfo :
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 543914e..491ed72 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -121,11 +121,25 @@
 
     res = manager->getCameraCharacteristics(mId.string(), &mDeviceInfo);
     if (res != OK) {
-        SET_ERR_L("Could not retrive camera characteristics: %s (%d)", strerror(-res), res);
+        SET_ERR_L("Could not retrieve camera characteristics: %s (%d)", strerror(-res), res);
         session->close();
         return res;
     }
 
+    std::vector<std::string> physicalCameraIds;
+    bool isLogical = CameraProviderManager::isLogicalCamera(mDeviceInfo, &physicalCameraIds);
+    if (isLogical) {
+        for (auto& physicalId : physicalCameraIds) {
+            res = manager->getCameraCharacteristics(physicalId, &mPhysicalDeviceInfoMap[physicalId]);
+            if (res != OK) {
+                SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)",
+                        physicalId.c_str(), strerror(-res), res);
+                session->close();
+                return res;
+            }
+        }
+    }
+
     std::shared_ptr<RequestMetadataQueue> queue;
     auto requestQueueRet = session->getCaptureRequestMetadataQueue(
         [&queue](const auto& descriptor) {
@@ -263,6 +277,7 @@
 status_t Camera3Device::disconnect() {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock stLock(mTrackerLock);
 
     ALOGI("%s: E", __FUNCTION__);
 
@@ -719,7 +734,7 @@
     return OK;
 }
 
-const CameraMetadata& Camera3Device::info() const {
+const CameraMetadata& Camera3Device::info(const String8& physicalId) const {
     ALOGVV("%s: E", __FUNCTION__);
     if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
                     mStatus == STATUS_ERROR)) {
@@ -727,7 +742,22 @@
                 mStatus == STATUS_ERROR ?
                 "when in error state" : "before init");
     }
-    return mDeviceInfo;
+    if (physicalId.isEmpty()) {
+        return mDeviceInfo;
+    } else {
+        std::string id(physicalId.c_str());
+        if (mPhysicalDeviceInfoMap.find(id) != mPhysicalDeviceInfoMap.end()) {
+            return mPhysicalDeviceInfoMap.at(id);
+        } else {
+            ALOGE("%s: Invalid physical camera id %s", __FUNCTION__, physicalId.c_str());
+            return mDeviceInfo;
+        }
+    }
+}
+
+const CameraMetadata& Camera3Device::info() const {
+    String8 emptyId;
+    return info(emptyId);
 }
 
 status_t Camera3Device::checkStatusOkToCaptureLocked() {
@@ -2143,8 +2173,14 @@
 
         res = stream->finishConfiguration();
         if (res != OK) {
-            SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
-                   stream->getId(), strerror(-res), res);
+            // If finishConfiguration fails due to abandoned surface, do not set
+            // device to error state.
+            bool isSurfaceAbandoned =
+                    (res == NO_INIT || res == DEAD_OBJECT) && stream->isAbandoned();
+            if (!isSurfaceAbandoned) {
+                SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+                        stream->getId(), strerror(-res), res);
+            }
             return res;
         }
     }
@@ -2361,9 +2397,16 @@
             //present streams end up with outstanding buffers that will
             //not get drained.
             internalUpdateStatusLocked(STATUS_ACTIVE);
+        } else if (rc == DEAD_OBJECT) {
+            // DEAD_OBJECT can be returned if either the consumer surface is
+            // abandoned, or the HAL has died.
+            // - If the HAL has died, configureStreamsLocked call will set
+            // device to error state,
+            // - If surface is abandoned, we should not set device to error
+            // state.
+            ALOGE("Failed to re-configure camera due to abandoned surface");
         } else {
-            setErrorStateLocked("%s: Failed to re-configure camera: %d",
-                    __FUNCTION__, rc);
+            SET_ERR_L("Failed to re-configure camera: %d", rc);
         }
     } else {
         ALOGE("%s: Failed to pause streaming: %d", __FUNCTION__, rc);
@@ -2497,6 +2540,9 @@
             CLOGE("Can't finish configuring input stream %d: %s (%d)",
                     mInputStream->getId(), strerror(-res), res);
             cancelStreamsConfigurationLocked();
+            if ((res == NO_INIT || res == DEAD_OBJECT) && mInputStream->isAbandoned()) {
+                return DEAD_OBJECT;
+            }
             return BAD_VALUE;
         }
     }
@@ -2510,6 +2556,9 @@
                 CLOGE("Can't finish configuring output stream %d: %s (%d)",
                         outputStream->getId(), strerror(-res), res);
                 cancelStreamsConfigurationLocked();
+                if ((res == NO_INIT || res == DEAD_OBJECT) && outputStream->isAbandoned()) {
+                    return DEAD_OBJECT;
+                }
                 return BAD_VALUE;
             }
         }
@@ -2699,8 +2748,9 @@
     if (res < 0) return res;
 
     if (mInFlightMap.size() == 1) {
-        // hold mLock to prevent race with disconnect
-        Mutex::Autolock l(mLock);
+        // Hold a separate dedicated tracker lock to prevent race with disconnect and also
+        // avoid a deadlock during reprocess requests.
+        Mutex::Autolock l(mTrackerLock);
         if (mStatusTracker != nullptr) {
             mStatusTracker->markComponentActive(mInFlightStatusId);
         }
@@ -2733,8 +2783,9 @@
 
     // Indicate idle inFlightMap to the status tracker
     if (mInFlightMap.size() == 0) {
-        // hold mLock to prevent race with disconnect
-        Mutex::Autolock l(mLock);
+        // Hold a separate dedicated tracker lock to prevent race with disconnect and also
+        // avoid a deadlock during reprocess requests.
+        Mutex::Autolock l(mTrackerLock);
         if (mStatusTracker != nullptr) {
             mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
         }
@@ -3889,18 +3940,17 @@
     }
 
     hardware::details::return_status err;
+    auto resultCallback =
+        [&status, &numRequestProcessed] (auto s, uint32_t n) {
+                status = s;
+                *numRequestProcessed = n;
+        };
     if (hidlSession_3_4 != nullptr) {
         err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
-            [&status, &numRequestProcessed] (auto s, uint32_t n) {
-                status = s;
-                *numRequestProcessed = n;
-            });
+                                                         resultCallback);
     } else {
         err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
-            [&status, &numRequestProcessed] (auto s, uint32_t n) {
-                status = s;
-                *numRequestProcessed = n;
-            });
+                                                  resultCallback);
     }
     if (!err.isOk()) {
         ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
@@ -4710,6 +4760,7 @@
 status_t Camera3Device::RequestThread::prepareHalRequests() {
     ATRACE_CALL();
 
+    bool batchedRequest = mNextRequests[0].captureRequest->mBatchSize > 1;
     for (size_t i = 0; i < mNextRequests.size(); i++) {
         auto& nextRequest = mNextRequests.editItemAt(i);
         sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
@@ -4733,7 +4784,10 @@
         mPrevTriggers = triggerCount;
 
         // If the request is the same as last, or we had triggers last time
-        bool newRequest = mPrevRequest != captureRequest || triggersMixedIn;
+        bool newRequest = (mPrevRequest != captureRequest || triggersMixedIn) &&
+                // Request settings are all the same within one batch, so only treat the first
+                // request in a batch as new
+                !(batchedRequest && i > 0);
         if (newRequest) {
             /**
              * HAL workaround:
@@ -4882,7 +4936,7 @@
         // preview), and the current request is not the last one in the batch,
         // do not send callback to the app.
         bool hasCallback = true;
-        if (mNextRequests[0].captureRequest->mBatchSize > 1 && i != mNextRequests.size()-1) {
+        if (batchedRequest && i != mNextRequests.size()-1) {
             hasCallback = false;
         }
         res = parent->registerInFlight(halRequest->frame_number,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d8fe19f..96212ab 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -101,6 +101,7 @@
     status_t disconnect() override;
     status_t dump(int fd, const Vector<String16> &args) override;
     const CameraMetadata& info() const override;
+    const CameraMetadata& info(const String8& physicalId) const override;
 
     // Capture and setStreamingRequest will configure streams if currently in
     // idle state
@@ -379,6 +380,7 @@
     sp<HalInterface> mInterface;
 
     CameraMetadata             mDeviceInfo;
+    std::unordered_map<std::string, CameraMetadata> mPhysicalDeviceInfoMap;
 
     CameraMetadata             mRequestTemplateCache[CAMERA3_TEMPLATE_COUNT];
 
@@ -1208,6 +1210,9 @@
 
     static callbacks_notify_t sNotify;
 
+    // Synchronizes access to status tracker between inflight updates and disconnect.
+    // b/79972865
+    Mutex mTrackerLock;
 }; // class Camera3Device
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index b3c3717..2c020a2 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -564,7 +564,7 @@
 
             // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
             // let prepareNextBuffer handle the error.)
-            if (res == NO_INIT && mState == STATE_CONFIGURED) {
+            if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
                 mState = STATE_ABANDONED;
             }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 1105b75..6030d15 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -331,7 +331,14 @@
 
     status_t res;
     res = configureQueueLocked();
-    if (res != OK) {
+    // configureQueueLocked could return error in case of abandoned surface.
+    // Treat as non-fatal error.
+    if (res == NO_INIT || res == DEAD_OBJECT) {
+        ALOGE("%s: Unable to configure stream %d queue (non-fatal): %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        mState = STATE_ABANDONED;
+        return res;
+    } else if (res != OK) {
         ALOGE("%s: Unable to configure stream %d queue: %s (%d)",
                 __FUNCTION__, mId, strerror(-res), res);
         mState = STATE_ERROR;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index a60cb56..4ddcf1a 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -482,6 +482,7 @@
     // after the HAL has provided usage and max_buffers values. After this call,
     // the stream must be ready to produce all buffers for registration with
     // HAL.
+    // Returns NO_INIT or DEAD_OBJECT if the queue has been abandoned.
     virtual status_t configureQueueLocked() = 0;
 
     // Get the total number of buffers in the queue
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index eef6658..ae7af8e 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -49,7 +49,7 @@
 };
 
 // Only for capture result
-constexpr std::array<uint32_t, 2> DistortionMapper::kResultPointsToCorrect = {
+constexpr std::array<uint32_t, 2> DistortionMapper::kResultPointsToCorrectNoClamp = {
     ANDROID_STATISTICS_FACE_RECTANGLES, // Says rectangles, is really points
     ANDROID_STATISTICS_FACE_LANDMARKS,
 };
@@ -79,12 +79,21 @@
     array = deviceInfo.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
     if (array.count != 4) return BAD_VALUE;
 
-    mArrayWidth = array.data.i32[2];
-    mArrayHeight = array.data.i32[3];
+    float arrayX = static_cast<float>(array.data.i32[0]);
+    float arrayY = static_cast<float>(array.data.i32[1]);
+    mArrayWidth = static_cast<float>(array.data.i32[2]);
+    mArrayHeight = static_cast<float>(array.data.i32[3]);
 
     array = deviceInfo.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
-    mActiveWidth = array.data.i32[2];
-    mActiveHeight = array.data.i32[3];
+    if (array.count != 4) return BAD_VALUE;
+
+    float activeX = static_cast<float>(array.data.i32[0]);
+    float activeY = static_cast<float>(array.data.i32[1]);
+    mActiveWidth = static_cast<float>(array.data.i32[2]);
+    mActiveHeight = static_cast<float>(array.data.i32[3]);
+
+    mArrayDiffX = activeX - arrayX;
+    mArrayDiffY = activeY - arrayY;
 
     return updateCalibration(deviceInfo);
 }
@@ -111,22 +120,13 @@
                 if (weight == 0) {
                     continue;
                 }
-                res = mapCorrectedToRaw(e.data.i32 + j, 2);
+                res = mapCorrectedToRaw(e.data.i32 + j, 2, /*clamp*/true);
                 if (res != OK) return res;
-                for (size_t k = 0; k < 4; k+=2) {
-                    int32_t& x = e.data.i32[j + k];
-                    int32_t& y = e.data.i32[j + k + 1];
-                    // Clamp to within active array
-                    x = std::max(0, x);
-                    x = std::min(mActiveWidth - 1, x);
-                    y = std::max(0, y);
-                    y = std::min(mActiveHeight - 1, y);
-                }
             }
         }
         for (auto rect : kRequestRectsToCorrect) {
             e = request->find(rect);
-            res = mapCorrectedRectToRaw(e.data.i32, e.count / 4);
+            res = mapCorrectedRectToRaw(e.data.i32, e.count / 4, /*clamp*/true);
             if (res != OK) return res;
         }
     }
@@ -156,27 +156,18 @@
                 if (weight == 0) {
                     continue;
                 }
-                res = mapRawToCorrected(e.data.i32 + j, 2);
+                res = mapRawToCorrected(e.data.i32 + j, 2, /*clamp*/true);
                 if (res != OK) return res;
-                for (size_t k = 0; k < 4; k+=2) {
-                    int32_t& x = e.data.i32[j + k];
-                    int32_t& y = e.data.i32[j + k + 1];
-                    // Clamp to within active array
-                    x = std::max(0, x);
-                    x = std::min(mActiveWidth - 1, x);
-                    y = std::max(0, y);
-                    y = std::min(mActiveHeight - 1, y);
-                }
             }
         }
         for (auto rect : kResultRectsToCorrect) {
             e = result->find(rect);
-            res = mapRawRectToCorrected(e.data.i32, e.count / 4);
+            res = mapRawRectToCorrected(e.data.i32, e.count / 4, /*clamp*/true);
             if (res != OK) return res;
         }
-        for (auto pts : kResultPointsToCorrect) {
+        for (auto pts : kResultPointsToCorrectNoClamp) {
             e = result->find(pts);
-            res = mapRawToCorrected(e.data.i32, e.count / 2);
+            res = mapRawToCorrected(e.data.i32, e.count / 2, /*clamp*/false);
             if (res != OK) return res;
         }
     }
@@ -232,9 +223,12 @@
     return OK;
 }
 
-status_t DistortionMapper::mapRawToCorrected(int32_t *coordPairs, int coordCount) {
+status_t DistortionMapper::mapRawToCorrected(int32_t *coordPairs, int coordCount,
+        bool clamp, bool simple) {
     if (!mValidMapping) return INVALID_OPERATION;
 
+    if (simple) return mapRawToCorrectedSimple(coordPairs, coordCount, clamp);
+
     if (!mValidGrids) {
         status_t res = buildGrids();
         if (res != OK) return res;
@@ -275,6 +269,12 @@
         // Interpolate along left edge of corrected quad (which are axis-aligned) for y
         float corrY = corrQuad->coords[1] + v * (corrQuad->coords[7] - corrQuad->coords[1]);
 
+        // Clamp to within active array
+        if (clamp) {
+            corrX = std::min(mActiveWidth - 1, std::max(0.f, corrX));
+            corrY = std::min(mActiveHeight - 1, std::max(0.f, corrY));
+        }
+
         coordPairs[i] = static_cast<int32_t>(std::round(corrX));
         coordPairs[i + 1] = static_cast<int32_t>(std::round(corrY));
     }
@@ -282,37 +282,70 @@
     return OK;
 }
 
-status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount) {
+status_t DistortionMapper::mapRawToCorrectedSimple(int32_t *coordPairs, int coordCount,
+        bool clamp) const {
+    if (!mValidMapping) return INVALID_OPERATION;
+
+    float scaleX = mActiveWidth / mArrayWidth;
+    float scaleY = mActiveHeight / mArrayHeight;
+    for (int i = 0; i < coordCount * 2; i += 2) {
+        float x = coordPairs[i];
+        float y = coordPairs[i + 1];
+        float corrX = x * scaleX;
+        float corrY = y * scaleY;
+        if (clamp) {
+            corrX = std::min(mActiveWidth - 1, std::max(0.f, corrX));
+            corrY = std::min(mActiveHeight - 1, std::max(0.f, corrY));
+        }
+        coordPairs[i] = static_cast<int32_t>(std::round(corrX));
+        coordPairs[i + 1] = static_cast<int32_t>(std::round(corrY));
+    }
+
+    return OK;
+}
+
+status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount, bool clamp,
+        bool simple) {
     if (!mValidMapping) return INVALID_OPERATION;
     for (int i = 0; i < rectCount * 4; i += 4) {
         // Map from (l, t, width, height) to (l, t, r, b)
         int32_t coords[4] = {
             rects[i],
             rects[i + 1],
-            rects[i] + rects[i + 2],
-            rects[i + 1] + rects[i + 3]
+            rects[i] + rects[i + 2] - 1,
+            rects[i + 1] + rects[i + 3] - 1
         };
 
-        mapRawToCorrected(coords, 2);
+        mapRawToCorrected(coords, 2, clamp, simple);
 
         // Map back to (l, t, width, height)
         rects[i] = coords[0];
         rects[i + 1] = coords[1];
-        rects[i + 2] = coords[2] - coords[0];
-        rects[i + 3] = coords[3] - coords[1];
+        rects[i + 2] = coords[2] - coords[0] + 1;
+        rects[i + 3] = coords[3] - coords[1] + 1;
     }
 
     return OK;
 }
 
+status_t DistortionMapper::mapCorrectedToRaw(int32_t *coordPairs, int coordCount, bool clamp,
+        bool simple) const {
+    return mapCorrectedToRawImpl(coordPairs, coordCount, clamp, simple);
+}
+
 template<typename T>
-status_t DistortionMapper::mapCorrectedToRaw(T *coordPairs, int coordCount) const {
+status_t DistortionMapper::mapCorrectedToRawImpl(T *coordPairs, int coordCount, bool clamp,
+        bool simple) const {
     if (!mValidMapping) return INVALID_OPERATION;
 
+    if (simple) return mapCorrectedToRawImplSimple(coordPairs, coordCount, clamp);
+
+    float activeCx = mCx - mArrayDiffX;
+    float activeCy = mCy - mArrayDiffY;
     for (int i = 0; i < coordCount * 2; i += 2) {
-        // Move to normalized space
-        float ywi = (coordPairs[i + 1] - mCy) * mInvFy;
-        float xwi = (coordPairs[i] - mCx - mS * ywi) * mInvFx;
+        // Move to normalized space from active array space
+        float ywi = (coordPairs[i + 1] - activeCy) * mInvFy;
+        float xwi = (coordPairs[i] - activeCx - mS * ywi) * mInvFx;
         // Apply distortion model to calculate raw image coordinates
         float rSq = xwi * xwi + ywi * ywi;
         float Fr = 1.f + (mK[0] * rSq) + (mK[1] * rSq * rSq) + (mK[2] * rSq * rSq * rSq);
@@ -321,6 +354,11 @@
         // Move back to image space
         float xr = mFx * xc + mS * yc + mCx;
         float yr = mFy * yc + mCy;
+        // Clamp to within pre-correction active array
+        if (clamp) {
+            xr = std::min(mArrayWidth - 1, std::max(0.f, xr));
+            yr = std::min(mArrayHeight - 1, std::max(0.f, yr));
+        }
 
         coordPairs[i] = static_cast<T>(std::round(xr));
         coordPairs[i + 1] = static_cast<T>(std::round(yr));
@@ -329,10 +367,32 @@
     return OK;
 }
 
-template status_t DistortionMapper::mapCorrectedToRaw(int32_t*, int) const;
-template status_t DistortionMapper::mapCorrectedToRaw(float*, int) const;
+template<typename T>
+status_t DistortionMapper::mapCorrectedToRawImplSimple(T *coordPairs, int coordCount,
+        bool clamp) const {
+    if (!mValidMapping) return INVALID_OPERATION;
 
-status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount) const {
+    float scaleX = mArrayWidth / mActiveWidth;
+    float scaleY = mArrayHeight / mActiveHeight;
+    for (int i = 0; i < coordCount * 2; i += 2) {
+        float x = coordPairs[i];
+        float y = coordPairs[i + 1];
+        float rawX = x * scaleX;
+        float rawY = y * scaleY;
+        if (clamp) {
+            rawX = std::min(mArrayWidth - 1, std::max(0.f, rawX));
+            rawY = std::min(mArrayHeight - 1, std::max(0.f, rawY));
+        }
+        coordPairs[i] = static_cast<T>(std::round(rawX));
+        coordPairs[i + 1] = static_cast<T>(std::round(rawY));
+    }
+
+    return OK;
+}
+
+
+status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount, bool clamp,
+        bool simple) const {
     if (!mValidMapping) return INVALID_OPERATION;
 
     for (int i = 0; i < rectCount * 4; i += 4) {
@@ -340,17 +400,17 @@
         int32_t coords[4] = {
             rects[i],
             rects[i + 1],
-            rects[i] + rects[i + 2],
-            rects[i + 1] + rects[i + 3]
+            rects[i] + rects[i + 2] - 1,
+            rects[i + 1] + rects[i + 3] - 1
         };
 
-        mapCorrectedToRaw(coords, 2);
+        mapCorrectedToRaw(coords, 2, clamp, simple);
 
         // Map back to (l, t, width, height)
         rects[i] = coords[0];
         rects[i + 1] = coords[1];
-        rects[i + 2] = coords[2] - coords[0];
-        rects[i + 3] = coords[3] - coords[1];
+        rects[i + 2] = coords[2] - coords[0] + 1;
+        rects[i + 3] = coords[3] - coords[1] + 1;
     }
 
     return OK;
@@ -380,7 +440,8 @@
             };
             mDistortedGrid[index].src = &mCorrectedGrid[index];
             mDistortedGrid[index].coords = mCorrectedGrid[index].coords;
-            status_t res = mapCorrectedToRaw(mDistortedGrid[index].coords.data(), 4);
+            status_t res = mapCorrectedToRawImpl(mDistortedGrid[index].coords.data(), 4,
+                    /*clamp*/false, /*simple*/false);
             if (res != OK) return res;
         }
     }
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.h b/services/camera/libcameraservice/device3/DistortionMapper.h
index 00cbd32..4c0a1a6 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.h
+++ b/services/camera/libcameraservice/device3/DistortionMapper.h
@@ -73,8 +73,11 @@
      *
      *   coordPairs: A pointer to an array of consecutive (x,y) points
      *   coordCount: Number of (x,y) pairs to transform
+     *   clamp: Whether to clamp the result to the bounds of the active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    status_t mapRawToCorrected(int32_t *coordPairs, int coordCount);
+    status_t mapRawToCorrected(int32_t *coordPairs, int coordCount, bool clamp,
+            bool simple = true);
 
     /**
      * Transform from distorted (original) to corrected (warped) coordinates.
@@ -82,8 +85,11 @@
      *
      *   rects: A pointer to an array of consecutive (x,y, w, h) rectangles
      *   rectCount: Number of rectangles to transform
+     *   clamp: Whether to clamp the result to the bounds of the active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    status_t mapRawRectToCorrected(int32_t *rects, int rectCount);
+    status_t mapRawRectToCorrected(int32_t *rects, int rectCount, bool clamp,
+            bool simple = true);
 
     /**
      * Transform from corrected (warped) to distorted (original) coordinates.
@@ -91,9 +97,11 @@
      *
      *   coordPairs: A pointer to an array of consecutive (x,y) points
      *   coordCount: Number of (x,y) pairs to transform
+     *   clamp: Whether to clamp the result to the bounds of the precorrection active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    template<typename T>
-    status_t mapCorrectedToRaw(T* coordPairs, int coordCount) const;
+    status_t mapCorrectedToRaw(int32_t* coordPairs, int coordCount, bool clamp,
+            bool simple = true) const;
 
     /**
      * Transform from corrected (warped) to distorted (original) coordinates.
@@ -101,8 +109,11 @@
      *
      *   rects: A pointer to an array of consecutive (x,y, w, h) rectangles
      *   rectCount: Number of rectangles to transform
+     *   clamp: Whether to clamp the result to the bounds of the precorrection active array
+     *   simple: Whether to do complex correction or just a simple linear map
      */
-    status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount) const;
+    status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount, bool clamp,
+            bool simple = true) const;
 
     struct GridQuad {
         // Source grid quad, or null
@@ -150,8 +161,18 @@
     // Only capture result
     static const std::array<uint32_t, 1> kResultRectsToCorrect;
 
-    // Only for capture results
-    static const std::array<uint32_t, 2> kResultPointsToCorrect;
+    // Only for capture results; don't clamp
+    static const std::array<uint32_t, 2> kResultPointsToCorrectNoClamp;
+
+    // Single implementation for various mapCorrectedToRaw methods
+    template<typename T>
+    status_t mapCorrectedToRawImpl(T* coordPairs, int coordCount, bool clamp, bool simple) const;
+
+    // Simple linear interpolation option
+    template<typename T>
+    status_t mapCorrectedToRawImplSimple(T* coordPairs, int coordCount, bool clamp) const;
+
+    status_t mapRawToCorrectedSimple(int32_t *coordPairs, int coordCount, bool clamp) const;
 
     // Utility to create reverse mapping grids
     status_t buildGrids();
@@ -168,9 +189,11 @@
     float mK[5];
 
     // pre-correction active array dimensions
-    int mArrayWidth, mArrayHeight;
+    float mArrayWidth, mArrayHeight;
     // active array dimensions
-    int mActiveWidth, mActiveHeight;
+    float mActiveWidth, mActiveHeight;
+    // corner offsets between pre-correction and active arrays
+    float mArrayDiffX, mArrayDiffY;
 
     std::vector<GridQuad> mCorrectedGrid;
     std::vector<GridQuad> mDistortedGrid;
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
index b489931..54935c9 100644
--- a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
@@ -30,6 +30,7 @@
 
 
 int32_t testActiveArray[] = {100, 100, 1000, 750};
+int32_t testPreCorrActiveArray[] = {90, 90, 1020, 770};
 
 float testICal[] = { 1000.f, 1000.f, 500.f, 500.f, 0.f };
 
@@ -45,14 +46,19 @@
 };
 
 
-void setupTestMapper(DistortionMapper *m, float distortion[5]) {
+void setupTestMapper(DistortionMapper *m,
+        float distortion[5], float intrinsics[5],
+        int32_t activeArray[4], int32_t preCorrectionActiveArray[4]) {
     CameraMetadata deviceInfo;
 
     deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
-            testActiveArray, 4);
+            preCorrectionActiveArray, 4);
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+            activeArray, 4);
 
     deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
-            testICal, 5);
+            intrinsics, 5);
 
     deviceInfo.update(ANDROID_LENS_DISTORTION,
             distortion, 5);
@@ -89,6 +95,9 @@
     ASSERT_FALSE(m.calibrationValid());
 
     deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+            testPreCorrActiveArray, 4);
+
+    deviceInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
             testActiveArray, 4);
 
     deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
@@ -118,17 +127,19 @@
     status_t res;
 
     DistortionMapper m;
-    setupTestMapper(&m, identityDistortion);
+    setupTestMapper(&m, identityDistortion, testICal,
+            /*activeArray*/ testActiveArray,
+            /*preCorrectionActiveArray*/ testActiveArray);
 
     auto coords = basicCoords;
-    res = m.mapCorrectedToRaw(coords.data(), 5);
+    res = m.mapCorrectedToRaw(coords.data(), 5,  /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < coords.size(); i++) {
         EXPECT_EQ(coords[i], basicCoords[i]);
     }
 
-    res = m.mapRawToCorrected(coords.data(), 5);
+    res = m.mapRawToCorrected(coords.data(), 5, /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < coords.size(); i++) {
@@ -137,18 +148,18 @@
 
     std::array<int32_t, 8> rects = {
         0, 0, 100, 100,
-        testActiveArray[2] - 100, testActiveArray[3]-100, 100, 100
+        testActiveArray[2] - 101, testActiveArray[3] - 101, 100, 100
     };
 
     auto rectsOrig = rects;
-    res = m.mapCorrectedRectToRaw(rects.data(), 2);
+    res = m.mapCorrectedRectToRaw(rects.data(), 2, /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < rects.size(); i++) {
         EXPECT_EQ(rects[i], rectsOrig[i]);
     }
 
-    res = m.mapRawRectToCorrected(rects.data(), 2);
+    res = m.mapRawRectToCorrected(rects.data(), 2, /*clamp*/true);
     ASSERT_EQ(res, OK);
 
     for (size_t i = 0; i < rects.size(); i++) {
@@ -156,23 +167,63 @@
     }
 }
 
-TEST(DistortionMapperTest, LargeTransform) {
+TEST(DistortionMapperTest, ClampConsistency) {
+    status_t res;
+
+    std::array<int32_t, 4> activeArray = {0, 0, 4032, 3024};
+    DistortionMapper m;
+    setupTestMapper(&m, identityDistortion, testICal, /*activeArray*/ activeArray.data(),
+            /*preCorrectionActiveArray*/ activeArray.data());
+
+    auto rectsOrig = activeArray;
+    res = m.mapCorrectedRectToRaw(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < activeArray.size(); i++) {
+        EXPECT_EQ(activeArray[i], rectsOrig[i]);
+    }
+
+    res = m.mapRawRectToCorrected(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+    ASSERT_EQ(res, OK);
+
+    for (size_t i = 0; i < activeArray.size(); i++) {
+        EXPECT_EQ(activeArray[i], rectsOrig[i]);
+    }
+}
+
+TEST(DistortionMapperTest, SimpleTransform) {
+    status_t res;
+
+    DistortionMapper m;
+    setupTestMapper(&m, identityDistortion, testICal,
+            /*activeArray*/ testActiveArray,
+            /*preCorrectionActiveArray*/ testPreCorrActiveArray);
+
+    auto coords = basicCoords;
+    res = m.mapCorrectedToRaw(coords.data(), 5,  /*clamp*/true, /*simple*/true);
+    ASSERT_EQ(res, OK);
+
+    ASSERT_EQ(coords[0], 0); ASSERT_EQ(coords[1], 0);
+    ASSERT_EQ(coords[2], testPreCorrActiveArray[2] - 1); ASSERT_EQ(coords[3], 0);
+    ASSERT_EQ(coords[4], testPreCorrActiveArray[2] - 1); ASSERT_EQ(coords[5], testPreCorrActiveArray[3] - 1);
+    ASSERT_EQ(coords[6], 0); ASSERT_EQ(coords[7], testPreCorrActiveArray[3] - 1);
+    ASSERT_EQ(coords[8], testPreCorrActiveArray[2] / 2); ASSERT_EQ(coords[9], testPreCorrActiveArray[3] / 2);
+}
+
+
+void RandomTransformTest(::testing::Test *test,
+        int32_t* activeArray, DistortionMapper &m, bool clamp, bool simple) {
     status_t res;
     constexpr int maxAllowedPixelError = 2; // Maximum per-pixel error allowed
     constexpr int bucketsPerPixel = 3; // Histogram granularity
 
     unsigned int seed = 1234; // Ensure repeatability for debugging
-    const size_t coordCount = 1e6; // Number of random test points
-
-    float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
-
-    DistortionMapper m;
-    setupTestMapper(&m, bigDistortion);
+    const size_t coordCount = 1e5; // Number of random test points
 
     std::default_random_engine gen(seed);
 
-    std::uniform_int_distribution<int> x_dist(0, testActiveArray[2] - 1);
-    std::uniform_int_distribution<int> y_dist(0, testActiveArray[3] - 1);
+    std::uniform_int_distribution<int> x_dist(0, activeArray[2] - 1);
+    std::uniform_int_distribution<int> y_dist(0, activeArray[3] - 1);
 
     std::vector<int32_t> randCoords(coordCount * 2);
 
@@ -186,12 +237,12 @@
     auto origCoords = randCoords;
 
     base::Timer correctedToRawTimer;
-    res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2);
+    res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2, clamp, simple);
     auto correctedToRawDurationMs = correctedToRawTimer.duration();
     EXPECT_EQ(res, OK);
 
     base::Timer rawToCorrectedTimer;
-    res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2);
+    res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2, clamp, simple);
     auto rawToCorrectedDurationMs = rawToCorrectedTimer.duration();
     EXPECT_EQ(res, OK);
 
@@ -202,9 +253,9 @@
             (std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(
                 rawToCorrectedDurationMs) / (randCoords.size() / 2) ).count();
 
-    RecordProperty("CorrectedToRawDurationPerCoordUs",
+    test->RecordProperty("CorrectedToRawDurationPerCoordUs",
             base::StringPrintf("%f", correctedToRawDurationPerCoordUs));
-    RecordProperty("RawToCorrectedDurationPerCoordUs",
+    test->RecordProperty("RawToCorrectedDurationPerCoordUs",
             base::StringPrintf("%f", rawToCorrectedDurationPerCoordUs));
 
     // Calculate mapping errors after round trip
@@ -239,17 +290,61 @@
     }
 
     float rmsError = std::sqrt(totalErrorSq / randCoords.size());
-    RecordProperty("RmsError", base::StringPrintf("%f", rmsError));
+    test->RecordProperty("RmsError", base::StringPrintf("%f", rmsError));
     for (size_t i = 0; i < histogram.size(); i++) {
         std::string label = base::StringPrintf("HistogramBin[%f,%f)",
                 (float)i/bucketsPerPixel, (float)(i + 1)/bucketsPerPixel);
-        RecordProperty(label, histogram[i]);
+        test->RecordProperty(label, histogram[i]);
     }
-    RecordProperty("HistogramOutOfRange", outOfHistogram);
+    test->RecordProperty("HistogramOutOfRange", outOfHistogram);
+}
+
+// Test a realistic distortion function with matching calibration values, enforcing
+// clamping.
+TEST(DistortionMapperTest, DISABLED_SmallTransform) {
+    int32_t activeArray[] = {0, 8, 3278, 2450};
+    int32_t preCorrectionActiveArray[] = {0, 0, 3280, 2464};
+
+    float distortion[] = {0.06875723, -0.13922249, 0.02818312, -0.00032781, -0.00025431};
+    float intrinsics[] = {1812.50000000, 1812.50000000, 1645.59533691, 1229.23229980, 0.00000000};
+
+    DistortionMapper m;
+    setupTestMapper(&m, distortion, intrinsics, activeArray, preCorrectionActiveArray);
+
+    RandomTransformTest(this, activeArray, m, /*clamp*/true, /*simple*/false);
+}
+
+// Test a realistic distortion function with matching calibration values, enforcing
+// clamping, but using the simple linear transform
+TEST(DistortionMapperTest, SmallSimpleTransform) {
+    int32_t activeArray[] = {0, 8, 3278, 2450};
+    int32_t preCorrectionActiveArray[] = {0, 0, 3280, 2464};
+
+    float distortion[] = {0.06875723, -0.13922249, 0.02818312, -0.00032781, -0.00025431};
+    float intrinsics[] = {1812.50000000, 1812.50000000, 1645.59533691, 1229.23229980, 0.00000000};
+
+    DistortionMapper m;
+    setupTestMapper(&m, distortion, intrinsics, activeArray, preCorrectionActiveArray);
+
+    RandomTransformTest(this, activeArray, m, /*clamp*/true, /*simple*/true);
+}
+
+// Test a very large distortion function; the regions aren't valid for such a big transform,
+// so disable clamping.  This test is just to verify round-trip math accuracy for big transforms
+TEST(DistortionMapperTest, LargeTransform) {
+    float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
+
+    DistortionMapper m;
+    setupTestMapper(&m, bigDistortion, testICal,
+            /*activeArray*/testActiveArray,
+            /*preCorrectionActiveArray*/testPreCorrActiveArray);
+
+    RandomTransformTest(this, testActiveArray, m, /*clamp*/false, /*simple*/false);
 }
 
 // Compare against values calculated by OpenCV
 // undistortPoints() method, which is the same as mapRawToCorrected
+// Ignore clamping
 // See script DistortionMapperComp.py
 #include "DistortionMapperTest_OpenCvData.h"
 
@@ -262,11 +357,14 @@
     const int32_t maxSqError = 2;
 
     DistortionMapper m;
-    setupTestMapper(&m, bigDistortion);
+    setupTestMapper(&m, bigDistortion, testICal,
+            /*activeArray*/testActiveArray,
+            /*preCorrectionActiveArray*/testActiveArray);
 
     using namespace openCvData;
 
-    res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2);
+    res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2, /*clamp*/false,
+            /*simple*/false);
 
     for (size_t i = 0; i < rawCoords.size(); i+=2) {
         int32_t dist = (rawCoords[i] - expCoords[i]) * (rawCoords[i] - expCoords[i]) +
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index c0a353f..f4c49ec 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -49,7 +49,8 @@
     std::lock_guard<std::mutex> lock(mMonitorMutex);
 
     // Expand shorthands
-    if (ssize_t idx = tagNames.find("3a") != -1) {
+    ssize_t idx = tagNames.find("3a");
+    if (idx != -1) {
         ssize_t end = tagNames.find(",", idx);
         char* start = tagNames.lockBuffer(tagNames.size());
         start[idx] = '\0';
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
index 2eeb7fa..5b20e61 100644
--- a/services/mediaanalytics/Android.mk
+++ b/services/mediaanalytics/Android.mk
@@ -34,8 +34,7 @@
     $(TOP)/frameworks/av/include/camera                             \
     $(TOP)/frameworks/native/include/media/openmax                  \
     $(TOP)/frameworks/native/include/media/hardware                 \
-    $(TOP)/external/tremolo/Tremolo                                 \
-    libcore/include
+    $(TOP)/external/tremolo/Tremolo
 
 
 LOCAL_MODULE:= mediametrics
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index 6ec8895..edf4dab 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -55,4 +55,8 @@
 getdents64: 1
 getrandom: 1
 
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
 @include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index bbbe552..4031b11 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -24,6 +24,7 @@
 mmap2: 1
 fstat64: 1
 stat64: 1
+statfs64: 1
 madvise: 1
 fstatat64: 1
 futex: 1
@@ -55,4 +56,8 @@
 getpid: 1
 gettid: 1
 
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
 @include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index f9e21fb..f30f9bb 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -189,6 +189,7 @@
         minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
     }
     status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+    bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
     if (status != OK) {
         ALOGE("%s() - createMmapBuffer() failed with status %d %s",
               __func__, status, strerror(-status));
@@ -198,18 +199,13 @@
         ALOGD("%s() createMmapBuffer() returned = %d, buffer_size = %d, burst_size %d"
                       ", Sharable FD: %s",
               __func__, status,
-              abs(mMmapBufferinfo.buffer_size_frames),
+              mMmapBufferinfo.buffer_size_frames,
               mMmapBufferinfo.burst_size_frames,
-              mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
+              isBufferShareable ? "Yes" : "No");
     }
 
     setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
-    // The audio HAL indicates if the shared memory fd can be shared outside of audioserver
-    // by returning a negative buffer size
-    if (getBufferCapacity() < 0) {
-        // Exclusive mode can be used by client or service.
-        setBufferCapacity(-getBufferCapacity());
-    } else {
+    if (!isBufferShareable) {
         // Exclusive mode can only be used by the service because the FD cannot be shared.
         uid_t audioServiceUid = getuid();
         if ((mMmapClient.clientUid != audioServiceUid) &&